summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/Kconfig1
-rw-r--r--drivers/accel/Makefile1
-rw-r--r--drivers/accel/amdxdna/Kconfig18
-rw-r--r--drivers/accel/amdxdna/Makefile23
-rw-r--r--drivers/accel/amdxdna/TODO3
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c910
-rw-r--r--drivers/accel/amdxdna/aie2_error.c360
-rw-r--r--drivers/accel/amdxdna/aie2_message.c776
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h370
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c928
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h297
-rw-r--r--drivers/accel/amdxdna/aie2_pm.c108
-rw-r--r--drivers/accel/amdxdna/aie2_psp.c146
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c134
-rw-r--r--drivers/accel/amdxdna/aie2_solver.c380
-rw-r--r--drivers/accel/amdxdna/aie2_solver.h155
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c550
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h162
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c622
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h65
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c561
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.h124
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.c61
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.h42
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c429
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h147
-rw-r--r--drivers/accel/amdxdna/amdxdna_sysfs.c67
-rw-r--r--drivers/accel/amdxdna/npu1_regs.c114
-rw-r--r--drivers/accel/amdxdna/npu2_regs.c113
-rw-r--r--drivers/accel/amdxdna/npu4_regs.c134
-rw-r--r--drivers/accel/amdxdna/npu5_regs.c113
-rw-r--r--drivers/accel/amdxdna/npu6_regs.c114
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c1
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c8
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c2
-rw-r--r--drivers/accel/qaic/qaic_drv.c1
-rw-r--r--drivers/accel/qaic/sahara.c3
-rw-r--r--drivers/acpi/acpi_pad.c5
-rw-r--r--drivers/acpi/acpi_video.c49
-rw-r--r--drivers/acpi/apei/ghes.c10
-rw-r--r--drivers/acpi/battery.c14
-rw-r--r--drivers/acpi/bgrt.c6
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c6
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/fan_core.c10
-rw-r--r--drivers/acpi/mipi-disco-img.c3
-rw-r--r--drivers/acpi/osl.c22
-rw-r--r--drivers/acpi/platform_profile.c647
-rw-r--r--drivers/acpi/property.c3
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/acpi/tables.c12
-rw-r--r--drivers/acpi/utils.c7
-rw-r--r--drivers/android/binder.c25
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_nv.c4
-rw-r--r--drivers/ata/sata_sil24.c1
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c8
-rw-r--r--drivers/base/physical_location.c4
-rw-r--r--drivers/base/power/main.c26
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/power/wakeirq.c26
-rw-r--r--drivers/base/property.c38
-rw-r--r--drivers/base/regmap/regcache-maple.c7
-rw-r--r--drivers/base/regmap/regcache-rbtree.c10
-rw-r--r--drivers/base/regmap/regcache.c2
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c219
-rw-r--r--drivers/base/regmap/regmap.c13
-rw-r--r--drivers/base/swnode.c1
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c178
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/nbd.c116
-rw-r--r--drivers/block/null_blk/main.c31
-rw-r--r--drivers/block/null_blk/null_blk.h1
-rw-r--r--drivers/block/ps3disk.c7
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/block/rnbd/rnbd-clt.c3
-rw-r--r--drivers/block/rnbd/rnbd-srv.c2
-rw-r--r--drivers/block/rnull.rs30
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/swim3.c3
-rw-r--r--drivers/block/ublk_drv.c1
-rw-r--r--drivers/block/virtio_blk.c9
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/block/z2ram.c1
-rw-r--r--drivers/block/zram/zram_drv.c1
-rw-r--r--drivers/bluetooth/btbcm.c3
-rw-r--r--drivers/bluetooth/btintel.c17
-rw-r--r--drivers/bluetooth/btmrvl_main.c3
-rw-r--r--drivers/bluetooth/btmtk.c4
-rw-r--r--drivers/bluetooth/btmtksdio.c4
-rw-r--r--drivers/bluetooth/btqca.c200
-rw-r--r--drivers/bluetooth/btqca.h5
-rw-r--r--drivers/bluetooth/btrtl.c4
-rw-r--r--drivers/bluetooth/btusb.c73
-rw-r--r--drivers/bluetooth/hci_qca.c33
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c5
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c5
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c5
-rw-r--r--drivers/char/ipmi/ssif_bmc.c5
-rw-r--r--drivers/char/tpm/eventlog/acpi.c15
-rw-r--r--drivers/clk/analogbits/wrpll-cln28hpc.c2
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-master.c2
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c2
-rw-r--r--drivers/clk/at91/pmc.c1
-rw-r--r--drivers/clk/at91/sama7d65.c1375
-rw-r--r--drivers/clk/at91/sckc.c24
-rw-r--r--drivers/clk/bcm/clk-kona.c3
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c33
-rw-r--r--drivers/clk/clk-en7523.c24
-rw-r--r--drivers/clk/clk-ep93xx.c6
-rw-r--r--drivers/clk/clk-lmk04832.c4
-rw-r--r--drivers/clk/clk-loongson2.c9
-rw-r--r--drivers/clk/clk-nomadik.c5
-rw-r--r--drivers/clk/clk-stm32f4.c155
-rw-r--r--drivers/clk/clk-versaclock3.c67
-rw-r--r--drivers/clk/clk-xgene.c4
-rw-r--r--drivers/clk/clk.c4
-rw-r--r--drivers/clk/davinci/pll.c32
-rw-r--r--drivers/clk/imx/clk-imx8mp.c5
-rw-r--r--drivers/clk/imx/clk-imx93.c32
-rw-r--r--drivers/clk/imx/clk-pll14xx.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c1
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbc.c4
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbcp.c4
-rw-r--r--drivers/clk/mmp/clk-pxa1908-mpmu.c4
-rw-r--r--drivers/clk/mmp/pwr-island.c2
-rw-r--r--drivers/clk/qcom/Kconfig65
-rw-r--r--drivers/clk/qcom/Makefile7
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c3
-rw-r--r--drivers/clk/qcom/camcc-x1e80100.c7
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c181
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h13
-rw-r--r--drivers/clk/qcom/clk-rcg.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c198
-rw-r--r--drivers/clk/qcom/clk-rpm.c27
-rw-r--r--drivers/clk/qcom/clk-rpmh.c50
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c81
-rw-r--r--drivers/clk/qcom/clk-spmi-pmic-div.c13
-rw-r--r--drivers/clk/qcom/dispcc-qcm2290.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c7
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c1963
-rw-r--r--drivers/clk/qcom/gcc-ipq5424.c57
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c4
-rw-r--r--drivers/clk/qcom/gcc-mdm9607.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs615.c3034
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c43
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c22
-rw-r--r--drivers/clk/qcom/gcc-sm8550.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c3274
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c2
-rw-r--r--drivers/clk/qcom/gpucc-x1p42100.c587
-rw-r--r--drivers/clk/qcom/ipq-cmn-pll.c435
-rw-r--r--drivers/clk/qcom/lpasscc-sm6115.c85
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c61
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8750.c141
-rw-r--r--drivers/clk/ralink/clk-mtmips.c1
-rw-r--r--drivers/clk/renesas/Kconfig7
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c29
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c47
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c150
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c181
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c196
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h39
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c120
-rw-r--r--drivers/clk/rockchip/clk.c102
-rw-r--r--drivers/clk/rockchip/clk.h40
-rw-r--r--drivers/clk/rockchip/gate-link.c85
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-exynos990.c1343
-rw-r--r--drivers/clk/samsung/clk-pll.c14
-rw-r--r--drivers/clk/samsung/clk-pll.h3
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100-audio.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-aon.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-isp.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-stg.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-vout.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.c12
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c13
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c28
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c13
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c121
-rw-r--r--drivers/cpufreq/Kconfig6
-rw-r--r--drivers/cpufreq/Kconfig.arm8
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c36
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c152
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h52
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c12
-rw-r--r--drivers/cpufreq/amd-pstate.c483
-rw-r--r--drivers/cpufreq/amd-pstate.h3
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c56
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c4
-rw-r--r--drivers/cpufreq/cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c60
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c34
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c45
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c2
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c1
-rw-r--r--drivers/cpuidle/governors/teo.c91
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/bcm/spu.c7
-rw-r--r--drivers/crypto/caam/blob_gen.c3
-rw-r--r--drivers/crypto/ccp/dbc.c53
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c13
-rw-r--r--drivers/crypto/hisilicon/qm.c291
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h3
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c161
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h11
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c13
-rw-r--r--drivers/crypto/hisilicon/zip/Makefile2
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c262
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h8
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c52
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c3
-rw-r--r--drivers/crypto/n2_asm.S96
-rw-r--r--drivers/crypto/n2_core.c2168
-rw-r--r--drivers/crypto/n2_core.h232
-rw-r--r--drivers/crypto/omap-aes.c34
-rw-r--r--drivers/crypto/omap-aes.h6
-rw-r--r--drivers/crypto/omap-des.c40
-rw-r--r--drivers/crypto/qce/aead.c2
-rw-r--r--drivers/crypto/qce/core.c129
-rw-r--r--drivers/crypto/qce/core.h9
-rw-r--r--drivers/crypto/qce/dma.c22
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c7
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c7
-rw-r--r--drivers/devfreq/devfreq-event.c8
-rw-r--r--drivers/devfreq/exynos-bus.c5
-rw-r--r--drivers/dma/ioat/dca.c8
-rw-r--r--drivers/edac/Kconfig17
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/amd64_edac.c1
-rw-r--r--drivers/edac/cell_edac.c281
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c6
-rw-r--r--drivers/edac/i10nm_base.c33
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/edac/loongson_edac.c157
-rw-r--r--drivers/edac/skx_base.c11
-rw-r--r--drivers/edac/skx_common.c47
-rw-r--r--drivers/edac/skx_common.h3
-rw-r--r--drivers/firmware/arm_scmi/common.h4
-rw-r--r--drivers/firmware/arm_scmi/driver.c74
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c1
-rw-r--r--drivers/firmware/arm_scmi/transports/smc.c1
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c1
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c5
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c5
-rw-r--r--drivers/firmware/cirrus/Kconfig20
-rw-r--r--drivers/firmware/cirrus/Makefile2
-rw-r--r--drivers/firmware/cirrus/test/Makefile23
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c199
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c752
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c367
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_utils.c13
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c473
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2556
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c600
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c688
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c3282
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c1851
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c2669
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c2211
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c1347
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_tests.c14
-rw-r--r--drivers/firmware/efi/efi.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c9
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c49
-rw-r--r--drivers/firmware/efi/libstub/efistub.h20
-rw-r--r--drivers/firmware/efi/libstub/gop.c323
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c4
-rw-r--r--drivers/firmware/efi/libstub/mem.c20
-rw-r--r--drivers/firmware/efi/libstub/pci.c34
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c4
-rw-r--r--drivers/firmware/efi/libstub/relocate.c10
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c164
-rw-r--r--drivers/firmware/efi/sysfb_efi.c2
-rw-r--r--drivers/firmware/google/cbmem.c10
-rw-r--r--drivers/firmware/google/gsmi.c6
-rw-r--r--drivers/firmware/google/memconsole.c4
-rw-r--r--drivers/firmware/google/vpd.c8
-rw-r--r--drivers/firmware/qcom/qcom_scm-smc.c6
-rw-r--r--drivers/firmware/qcom/qcom_scm.c271
-rw-r--r--drivers/firmware/qcom/qcom_scm.h4
-rw-r--r--drivers/gpio/Kconfig4
-rw-r--r--drivers/gpio/gpio-altera.c9
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-pca953x.c3
-rw-r--r--drivers/gpio/gpio-regmap.c2
-rw-r--r--drivers/gpio/gpio-tps65219.c12
-rw-r--r--drivers/gpio/gpio-tqmx86.c206
-rw-r--r--drivers/gpio/gpio-twl6040.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c32
-rw-r--r--drivers/gpu/drm/Kconfig81
-rw-r--r--drivers/gpu/drm/Makefile11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c345
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c209
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c708
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c299
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c1118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h491
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm202
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm1126
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm58
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c135
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c64
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c186
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c564
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h55
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c136
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c59
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c69
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c140
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c243
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h179
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c201
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h109
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h401
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c1412
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h135
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c225
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c428
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c1178
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h)19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c549
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c836
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c179
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c177
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h219
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c9
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h9
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h)4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h)4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h37
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h17
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h47
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c108
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c86
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c64
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c38
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c80
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c28
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c2
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c2
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c41
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c337
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c6
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c2
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c187
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c7
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c11
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c4
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c29
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c1030
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c149
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c2
-rw-r--r--drivers/gpu/drm/clients/Kconfig123
-rw-r--r--drivers/gpu/drm/clients/Makefile8
-rw-r--r--drivers/gpu/drm/clients/drm_client_internal.h25
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c (renamed from drivers/gpu/drm/drm_client_setup.c)34
-rw-r--r--drivers/gpu/drm/clients/drm_fbdev_client.c (renamed from drivers/gpu/drm/drm_fbdev_client.c)4
-rw-r--r--drivers/gpu/drm/clients/drm_log.c420
-rw-r--r--drivers/gpu/drm/display/Kconfig8
-rw-r--r--drivers/gpu/drm/display/Makefile2
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c170
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c125
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c100
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c190
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c91
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c177
-rw-r--r--drivers/gpu/drm/drm_draw.c233
-rw-r--r--drivers/gpu/drm/drm_draw_internal.h56
-rw-r--r--drivers/gpu/drm/drm_drv.c32
-rw-r--r--drivers/gpu/drm/drm_edid.c6
-rw-r--r--drivers/gpu/drm/drm_file.c23
-rw-r--r--drivers/gpu/drm/drm_mode_config.c9
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_of.c4
-rw-r--r--drivers/gpu/drm/drm_panel.c3
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c94
-rw-r--r--drivers/gpu/drm/drm_panic.c269
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs2
-rw-r--r--drivers/gpu/drm/drm_print.c23
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c13
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c41
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c164
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h63
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h19
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c220
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h28
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c332
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h76
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c118
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c17
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h19
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c41
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c20
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c26
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.h6
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c36
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h5
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c20
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h6
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c97
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.h14
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm_regs.h257
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c177
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c163
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt_regs.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c666
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c117
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c274
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h155
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c301
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.h38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c952
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c567
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h35
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c317
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c1263
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h39
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c928
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c184
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c195
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c174
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c88
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c3
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c287
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h11
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c51
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c863
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h5
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c28
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c45
-rw-r--r--drivers/gpu/drm/i915/i915_active.c18
-rw-r--r--drivers/gpu/drm/i915/i915_active.h1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c133
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h1
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c50
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h378
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c141
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.h14
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h28
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.c44
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.h13
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c9
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h5
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.h49
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c28
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h3
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.h1
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c3
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c3
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c3
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h1
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c1
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c3
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c10
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c35
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp_reg.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c22
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c203
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h26
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c24
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h54
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h254
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c63
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c396
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c175
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c277
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c159
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h35
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c11
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c26
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h11
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.c10
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c21
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c67
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c3
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c79
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h4
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c2
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml5
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdss.xml11
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c3
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/log.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c508
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c8
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c1
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c136
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c9
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c4
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c12
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c90
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h37
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c6
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c142
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c23
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c26
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c22
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c68
-rw-r--r--drivers/gpu/drm/radeon/Kconfig1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c10
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c21
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c24
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c18
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c6
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig10
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c13
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.h2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c487
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c147
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c598
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c219
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h2
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c4
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/stm/drv.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c32
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c523
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c17
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c461
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h102
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c28
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c5
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h5
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c34
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/tiny/Makefile2
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c6
-rw-r--r--drivers/gpu/drm/tiny/bochs.c3
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c (renamed from drivers/gpu/drm/tiny/cirrus.c)10
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c4
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c3
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c3
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c4
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c4
-rw-r--r--drivers/gpu/drm/tiny/repaper.c3
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c3
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c4
-rw-r--r--drivers/gpu/drm/tiny/st7586.c3
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c18
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c4
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c52
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c54
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c23
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h8
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c57
-rw-r--r--drivers/gpu/drm/v3d/v3d_performance_counters.h12
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h29
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c26
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c19
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c3
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c8
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c106
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c96
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h54
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c216
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h6
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c640
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h217
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c737
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c105
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c871
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h297
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c91
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c125
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c178
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c35
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c312
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h63
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c409
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c49
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c21
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c5
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug4
-rw-r--r--drivers/gpu/drm/xe/Makefile6
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h20
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h38
-rw-r--r--drivers/gpu/drm/xe/abi/guc_capture_abi.h2
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h20
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h16
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h)0
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_irq.c13
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c25
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c116
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h2
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c12
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c8
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h19
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c30
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c5
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h8
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c190
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h33
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c14
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c119
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h7
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_device.c17
-rw-r--r--drivers/gpu/drm/xe/xe_device.h3
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h65
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c83
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h1
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c24
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c10
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c37
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c47
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h27
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h31
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c80
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c63
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c350
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c26
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c35
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c36
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c79
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c153
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c8
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c22
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.h4
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c350
-rw-r--r--drivers/gpu/drm/xe/xe_irq.h8
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c53
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h6
-rw-r--r--drivers/gpu/drm/xe/xe_macros.h12
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c26
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c2
-rw-r--r--drivers/gpu/drm/xe/xe_module.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c174
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c4
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c13
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c5
-rw-r--r--drivers/gpu/drm/xe/xe_query.c5
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c53
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c37
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c5
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h15
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h17
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c263
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h14
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h11
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h17
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h52
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c61
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c37
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h1
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h22
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c233
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h11
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c6
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules3
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c6
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig9
-rw-r--r--drivers/gpu/drm/xlnx/Makefile1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c48
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp_regs.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c56
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c447
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c39
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h15
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c3
-rw-r--r--drivers/hid/Kconfig4
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h1
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c22
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c38
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h24
-rw-r--r--drivers/hid/hid-asus.c26
-rw-r--r--drivers/hid/hid-core.c29
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c37
-rw-r--r--drivers/hid/hid-lenovo.c109
-rw-r--r--drivers/hid/hid-magicmouse.c8
-rw-r--r--drivers/hid/hid-multitouch.c11
-rw-r--r--drivers/hid/hid-nintendo.c16
-rw-r--r--drivers/hid/hid-roccat-arvo.c20
-rw-r--r--drivers/hid/hid-roccat-common.h22
-rw-r--r--drivers/hid/hid-roccat-isku.c22
-rw-r--r--drivers/hid/hid-roccat-kone.c22
-rw-r--r--drivers/hid/hid-roccat-koneplus.c42
-rw-r--r--drivers/hid/hid-roccat-konepure.c4
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c38
-rw-r--r--drivers/hid/hid-roccat-lua.c10
-rw-r--r--drivers/hid/hid-roccat-pyra.c50
-rw-r--r--drivers/hid/hid-roccat-ryos.c4
-rw-r--r--drivers/hid/hid-roccat-savu.c4
-rw-r--r--drivers/hid/hid-sensor-hub.c21
-rw-r--r--drivers/hid/hid-steam.c1
-rw-r--r--drivers/hid/hid-steelseries.c120
-rw-r--r--drivers/hid/hid-thrustmaster.c8
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c12
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c21
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c19
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c30
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h1
-rw-r--r--drivers/hid/intel-thc-hid/Kconfig43
-rw-r--r--drivers/hid/intel-thc-hid/Makefile22
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c969
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h186
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c166
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c224
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h20
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c987
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h172
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c165
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c414
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h25
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c1578
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h116
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c969
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h146
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h881
-rw-r--r--drivers/hid/wacom.h8
-rw-r--r--drivers/hid/wacom_sys.c43
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/hwmon/Kconfig20
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/acpi_power_meter.c41
-rw-r--r--drivers/hwmon/asus-ec-sensors.c13
-rw-r--r--drivers/hwmon/asus_atk0110.c15
-rw-r--r--drivers/hwmon/chipcap2.c63
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c8
-rw-r--r--drivers/hwmon/drivetemp.c2
-rw-r--r--drivers/hwmon/hwmon.c27
-rw-r--r--drivers/hwmon/isl28022.c11
-rw-r--r--drivers/hwmon/k10temp.c7
-rw-r--r--drivers/hwmon/lm75.c339
-rw-r--r--drivers/hwmon/ltc2991.c2
-rw-r--r--drivers/hwmon/nct6683.c6
-rw-r--r--drivers/hwmon/nct6775-core.c6
-rw-r--r--drivers/hwmon/occ/p9_sbe.c4
-rw-r--r--drivers/hwmon/pmbus/Kconfig30
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c10
-rw-r--r--drivers/hwmon/pmbus/crps.c74
-rw-r--r--drivers/hwmon/pmbus/dps920ab.c7
-rw-r--r--drivers/hwmon/pmbus/max15301.c1
-rw-r--r--drivers/hwmon/pmbus/pmbus.h4
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c94
-rw-r--r--drivers/hwmon/pmbus/tps25990.c436
-rw-r--r--drivers/hwmon/pwm-fan.c26
-rw-r--r--drivers/hwmon/qnap-mcu-hwmon.c364
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c22
-rw-r--r--drivers/hwmon/spd5118.c8
-rw-r--r--drivers/hwmon/tmp108.c13
-rw-r--r--drivers/hwmon/tmp513.c7
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-amd756.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c112
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c5
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c35
-rw-r--r--drivers/i2c/busses/i2c-i801.c131
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c811
-rw-r--r--drivers/i2c/busses/i2c-imx.c99
-rw-r--r--drivers/i2c/busses/i2c-isch.c6
-rw-r--r--drivers/i2c/busses/i2c-keba.c8
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c427
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c56
-rw-r--r--drivers/i2c/busses/i2c-rcar.c20
-rw-r--r--drivers/i2c/busses/i2c-riic.c134
-rw-r--r--drivers/i2c/busses/i2c-xiic.c281
-rw-r--r--drivers/i2c/i2c-atr.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c22
-rw-r--r--drivers/i2c/i2c-core-base.c131
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c8
-rw-r--r--drivers/i2c/i2c-slave-testunit.c19
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c6
-rw-r--r--drivers/i3c/master.c14
-rw-r--r--drivers/i3c/master/Kconfig11
-rw-r--r--drivers/i3c/master/dw-i3c-master.c15
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c3
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c17
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c148
-rw-r--r--drivers/idle/intel_idle.c7
-rw-r--r--drivers/infiniband/core/cache.c35
-rw-r--r--drivers/infiniband/core/device.c116
-rw-r--r--drivers/infiniband/core/ud_header.c83
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c42
-rw-r--r--drivers/infiniband/hw/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c47
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c339
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c117
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c8
-rw-r--r--drivers/infiniband/hw/efa/efa.h8
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c28
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h14
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c71
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c26
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c65
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h135
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c62
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c301
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c568
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h166
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h14
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c31
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c14
-rw-r--r--drivers/infiniband/hw/hns/Kconfig20
-rw-r--r--drivers/infiniband/hw/hns/Makefile9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c13
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h4
-rw-r--r--drivers/infiniband/hw/irdma/protos.h4
-rw-r--r--drivers/infiniband/hw/irdma/utils.c71
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c60
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c286
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c12
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c37
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c17
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c70
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c73
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c66
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c5
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/input/Kconfig14
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/evbug.c100
-rw-r--r--drivers/input/ff-core.c91
-rw-r--r--drivers/input/ff-memless.c18
-rw-r--r--drivers/input/input-mt.c34
-rw-r--r--drivers/input/input-poller.c4
-rw-r--r--drivers/input/input.c339
-rw-r--r--drivers/input/joystick/sidewinder.c3
-rw-r--r--drivers/input/joystick/xpad.c9
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/dlink-dir685-touchkeys.c3
-rw-r--r--drivers/input/keyboard/lm8323.c3
-rw-r--r--drivers/input/misc/Kconfig12
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ideapad_slidebar.c4
-rw-r--r--drivers/input/misc/max77693-haptic.c3
-rw-r--r--drivers/input/misc/mma8450.c16
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c8
-rw-r--r--drivers/input/misc/qnap-mcu-input.c153
-rw-r--r--drivers/input/misc/regulator-haptic.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/mouse/synaptics.c56
-rw-r--r--drivers/input/mouse/synaptics.h1
-rw-r--r--drivers/input/serio/i8042.c17
-rw-r--r--drivers/input/touchscreen/egalax_ts.c3
-rw-r--r--drivers/iommu/Kconfig12
-rw-r--r--drivers/iommu/amd/amd_iommu.h9
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h41
-rw-r--r--drivers/iommu/amd/init.c255
-rw-r--r--drivers/iommu/amd/iommu.c533
-rw-r--r--drivers/iommu/amd/pasid.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c8
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c15
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c298
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h31
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c8
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c5
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c121
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c43
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h2
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/cache.c11
-rw-r--r--drivers/iommu/intel/cap_audit.c217
-rw-r--r--drivers/iommu/intel/cap_audit.h131
-rw-r--r--drivers/iommu/intel/iommu.c50
-rw-r--r--drivers/iommu/intel/irq_remapping.c9
-rw-r--r--drivers/iommu/intel/pasid.c22
-rw-r--r--drivers/iommu/intel/pasid.h6
-rw-r--r--drivers/iommu/io-pgtable-arm.c227
-rw-r--r--drivers/iommu/iommu.c37
-rw-r--r--drivers/iommu/iommufd/fault.c44
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c10
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h29
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c2
-rw-r--r--drivers/iommu/iommufd/main.c32
-rw-r--r--drivers/iommu/iommufd/selftest.c45
-rw-r--r--drivers/iommu/msm_iommu.c51
-rw-r--r--drivers/iommu/mtk_iommu.c9
-rw-r--r--drivers/iommu/mtk_iommu_v1.c3
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/riscv/iommu-pci.c8
-rw-r--r--drivers/iommu/riscv/iommu-platform.c108
-rw-r--r--drivers/iommu/riscv/iommu.c14
-rw-r--r--drivers/iommu/riscv/iommu.h1
-rw-r--r--drivers/iommu/rockchip-iommu.c3
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c28
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-keystone.c11
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c16
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c3
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c1
-rw-r--r--drivers/irqchip/irq-ts4800.c2
-rw-r--r--drivers/irqchip/irqchip.c4
-rw-r--r--drivers/isdn/mISDN/core.c14
-rw-r--r--drivers/isdn/mISDN/core.h1
-rw-r--r--drivers/leds/Kconfig44
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/leds/led-triggers.c4
-rw-r--r--drivers/leds/leds-cht-wcove.c6
-rw-r--r--drivers/leds/leds-lp8860.c2
-rw-r--r--drivers/leds/leds-lp8864.c296
-rw-r--r--drivers/leds/leds-netxbig.c1
-rw-r--r--drivers/leds/leds-qnap-mcu.c227
-rw-r--r--drivers/leds/leds-st1202.c416
-rw-r--r--drivers/leds/leds-turris-omnia.c336
-rw-r--r--drivers/leds/leds-upboard.c126
-rw-r--r--drivers/leds/leds.h4
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c8
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c2
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c2
-rw-r--r--drivers/macintosh/mac_hid.c2
-rw-r--r--drivers/md/Kconfig13
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-verity-fec.c6
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/md-autodetect.c8
-rw-r--r--drivers/md/md-bitmap.c116
-rw-r--r--drivers/md/md-bitmap.h7
-rw-r--r--drivers/md/md-linear.c354
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/md.h5
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c36
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c28
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5-cache.c20
-rw-r--r--drivers/md/raid5.c111
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c29
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c2
-rw-r--r--drivers/memory/omap-gpmc.c33
-rw-r--r--drivers/memory/tegra/tegra20-emc.c8
-rw-r--r--drivers/memory/ti-aemif.c192
-rw-r--r--drivers/memstick/core/ms_block.c3
-rw-r--r--drivers/memstick/core/mspro_block.c3
-rw-r--r--drivers/mfd/Kconfig25
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/axp20x.c7
-rw-r--r--drivers/mfd/cs42l43-i2c.c8
-rw-r--r--drivers/mfd/cs42l43-sdw.c10
-rw-r--r--drivers/mfd/cs42l43.c37
-rw-r--r--drivers/mfd/cs42l43.h1
-rw-r--r--drivers/mfd/da9052-core.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c2
-rw-r--r--drivers/mfd/lpc_ich.c3
-rw-r--r--drivers/mfd/qnap-mcu.c338
-rw-r--r--drivers/mfd/stpmic1.c6
-rw-r--r--drivers/mfd/syscon.c95
-rw-r--r--drivers/mfd/tps65219.c15
-rw-r--r--drivers/mfd/upboard-fpga.c325
-rw-r--r--drivers/mfd/vexpress-sysreg.c1
-rw-r--r--drivers/misc/cxl/Kconfig6
-rw-r--r--drivers/misc/cxl/of.c2
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/mmc/core/core.c7
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/core/queue.c2
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/host/bcm2835.c20
-rw-r--r--drivers/mmc/host/cqhci-crypto.c38
-rw-r--r--drivers/mmc/host/cqhci.h8
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798mv200.c8
-rw-r--r--drivers/mmc/host/mtk-sd.c21
-rw-r--r--drivers/mmc/host/mxcmmc.c8
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c3
-rw-r--r--drivers/mmc/host/sdhci-acpi.c20
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c3
-rw-r--r--drivers/mmc/host/sdhci-msm.c147
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c1
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/nand/spi/core.c2
-rw-r--r--drivers/mtd/spi-nor/core.c2
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/net/bareudp.c16
-rw-r--r--drivers/net/can/dev/dev.c2
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/kvaser_pciefd.c81
-rw-r--r--drivers/net/can/m_can/m_can.c22
-rw-r--r--drivers/net/can/m_can/m_can.h1
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c30
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c15
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c133
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c38
-rw-r--r--drivers/net/dsa/b53/b53_common.c14
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c118
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c4
-rw-r--r--drivers/net/dsa/mt7530.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c60
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c3
-rw-r--r--drivers/net/dsa/ocelot/felix.c9
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c10
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c7
-rw-r--r--drivers/net/dsa/qca/qca8k.h3
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c8
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c19
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c39
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c135
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c114
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c64
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c28
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c330
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h29
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h23
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c13
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c35
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c35
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c604
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c74
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h29
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c160
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c134
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c199
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c120
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c458
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c231
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c79
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c20
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c550
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.c)2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h100
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c495
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c176
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c155
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c427
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c32
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c270
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c562
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c118
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c50
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c2658
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h81
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c459
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1074
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c25
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c141
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c113
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c41
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c68
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c23
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c29
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c25
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1056
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c10
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c1
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c567
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c292
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c365
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c159
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1377
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c450
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c221
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c211
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h63
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h20
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c543
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c160
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c53
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c72
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c12
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c238
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h16
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c21
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c46
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h17
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c68
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c48
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h35
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h145
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c57
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h5
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c14
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c53
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c40
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h22
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c69
-rw-r--r--drivers/net/ethernet/realtek/r8169.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c159
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c38
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h1
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c14
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c119
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h48
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c328
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/niu.c22
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c452
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c175
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c43
-rw-r--r--drivers/net/geneve.c12
-rw-r--r--drivers/net/gtp.c30
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c6
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/mctp/mctp-i2c.c3
-rw-r--r--drivers/net/mdio/mdio-octeon.c25
-rw-r--r--drivers/net/mii.c3
-rw-r--r--drivers/net/netconsole.c62
-rw-r--r--drivers/net/netdevsim/ethtool.c12
-rw-r--r--drivers/net/netdevsim/netdev.c268
-rw-r--r--drivers/net/netdevsim/netdevsim.h8
-rw-r--r--drivers/net/netkit.c66
-rw-r--r--drivers/net/pcs/pcs-lynx.c39
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c25
-rw-r--r--drivers/net/pcs/pcs-xpcs.c44
-rw-r--r--drivers/net/pfcp.c15
-rw-r--r--drivers/net/phy/Kconfig14
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin.c2
-rw-r--r--drivers/net/phy/adin1100.c2
-rw-r--r--drivers/net/phy/air_en8811h.c2
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c2
-rw-r--r--drivers/net/phy/ax88796b.c2
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm54140.c2
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm84881.c12
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/cortina.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c385
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/dp83869.c2
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/dp83td510.c114
-rw-r--r--drivers/net/phy/dp83tg720.c163
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/intel-xway.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c2
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c54
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c2
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c2
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c1309
-rw-r--r--drivers/net/phy/microchip_rds_ptp.h247
-rw-r--r--drivers/net/phy/microchip_t1.c53
-rw-r--r--drivers/net/phy/microchip_t1s.c2
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/mxl-gpy.c2
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/ncn26000.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c2
-rw-r--r--drivers/net/phy/nxp-cbtx.c2
-rw-r--r--drivers/net/phy/nxp-tja11xx.c2
-rw-r--r--drivers/net/phy/phy-c45.c14
-rw-r--r--drivers/net/phy/phy.c172
-rw-r--r--drivers/net/phy/phy_device.c83
-rw-r--r--drivers/net/phy/phylink.c588
-rw-r--r--drivers/net/phy/qcom/at803x.c2
-rw-r--r--drivers/net/phy/qcom/qca807x.c2
-rw-r--r--drivers/net/phy/qcom/qca808x.c2
-rw-r--r--drivers/net/phy/qcom/qca83xx.c2
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek/Kconfig11
-rw-r--r--drivers/net/phy/realtek/Makefile4
-rw-r--r--drivers/net/phy/realtek/realtek.h10
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c79
-rw-r--r--drivers/net/phy/realtek/realtek_main.c (renamed from drivers/net/phy/realtek.c)58
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/smsc.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c8
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/teranetics.c2
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/pse-pd/pd692x0.c224
-rw-r--r--drivers/net/pse-pd/pse_core.c183
-rw-r--r--drivers/net/pse-pd/pse_regulator.c23
-rw-r--r--drivers/net/pse-pd/tps23881.c449
-rw-r--r--drivers/net/tap.c6
-rw-r--r--drivers/net/team/team_core.c7
-rw-r--r--drivers/net/tun.c20
-rw-r--r--drivers/net/usb/lan78xx.c938
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/vrf.c49
-rw-r--r--drivers/net/vxlan/vxlan_core.c187
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c2
-rw-r--r--drivers/net/wan/framer/framer-core.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c132
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c747
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h179
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c1183
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h373
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c84
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h33
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c156
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c327
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h14
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c2713
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h27
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c225
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h26
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c489
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h21
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c430
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h171
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c54
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c38
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c31
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c223
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c291
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c406
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c138
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c130
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c236
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c150
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c216
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c403
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c57
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c903
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c504
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h177
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c168
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c9
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c61
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig5
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c73
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c21
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c68
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c28
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c52
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c267
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig6
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c47
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h9
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c31
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h9
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c193
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h163
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c332
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h85
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c173
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h43
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c301
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c323
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h37
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c42
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c57
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c50
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c54
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c57
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c39
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c56
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c23
-rw-r--r--drivers/nfc/nfcmrvl/uart.c9
-rw-r--r--drivers/nfc/st21nfca/dep.c18
-rw-r--r--drivers/nfc/st21nfca/i2c.c1
-rw-r--r--drivers/nvme/host/apple.c2
-rw-r--r--drivers/nvme/host/core.c67
-rw-r--r--drivers/nvme/host/fc.c1
-rw-r--r--drivers/nvme/host/nvme.h39
-rw-r--r--drivers/nvme/host/pci.c17
-rw-r--r--drivers/nvme/host/tcp.c70
-rw-r--r--drivers/nvme/target/Kconfig11
-rw-r--r--drivers/nvme/target/Makefile2
-rw-r--r--drivers/nvme/target/admin-cmd.c388
-rw-r--r--drivers/nvme/target/configfs.c49
-rw-r--r--drivers/nvme/target/core.c266
-rw-r--r--drivers/nvme/target/discovery.c17
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd.c101
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c3
-rw-r--r--drivers/nvme/target/nvmet.h110
-rw-r--r--drivers/nvme/target/passthru.c18
-rw-r--r--drivers/nvme/target/pci-epf.c2591
-rw-r--r--drivers/nvme/target/zns.c3
-rw-r--r--drivers/of/address.c51
-rw-r--r--drivers/of/base.c25
-rw-r--r--drivers/of/fdt.c33
-rw-r--r--drivers/of/fdt_address.c21
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/kobj.c4
-rw-r--r--drivers/of/of_private.h20
-rw-r--r--drivers/of/of_reserved_mem.c15
-rw-r--r--drivers/of/pdt.c2
-rw-r--r--drivers/of/platform.c23
-rw-r--r--drivers/of/property.c35
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi13
-rw-r--r--drivers/of/unittest.c23
-rw-r--r--drivers/opp/core.c99
-rw-r--r--drivers/opp/of.c4
-rw-r--r--drivers/opp/opp.h1
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c1
-rw-r--r--drivers/pci/pci-driver.c14
-rw-r--r--drivers/pci/pcie/bwctrl.c25
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c2
-rw-r--r--drivers/perf/arm-cmn.c4
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c75
-rw-r--r--drivers/perf/arm_spe_pmu.c22
-rw-r--r--drivers/perf/dwc_pcie_pmu.c72
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c33
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c42
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c61
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c48
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c44
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c53
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c160
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h49
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c43
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c45
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c530
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c66
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c3
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c3
-rw-r--r--drivers/pinctrl/core.c50
-rw-r--r--drivers/pinctrl/mediatek/Kconfig7
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c1556
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c57
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c5
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c42
-rw-r--r--drivers/pinctrl/pinctrl-amd.c30
-rw-r--r--drivers/pinctrl/pinctrl-amd.h7
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c11
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c20
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c200
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h3
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c2
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm6
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c34
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c1620
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c10
-rw-r--r--drivers/pinctrl/renesas/Kconfig1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c190
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c81
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c12
-rw-r--r--drivers/platform/chrome/cros_ec.c5
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c3
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c203
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c69
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c2
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c4
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c10
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c2
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c10
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c79
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c5
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c3
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h130
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c20
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c113
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c2
-rw-r--r--drivers/platform/surface/surface_platform_profile.c44
-rw-r--r--drivers/platform/x86/acer-wmi.c550
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c12
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c47
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c12
-rw-r--r--drivers/platform/x86/amd/pmc/Kconfig2
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmc/mp1_stb.c332
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c394
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h24
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig2
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c30
-rw-r--r--drivers/platform/x86/amd/pmf/core.c22
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c66
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h39
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c75
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c49
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c55
-rw-r--r--drivers/platform/x86/asus-wmi.h3
-rw-r--r--drivers/platform/x86/dell/Kconfig1
-rw-r--r--drivers/platform/x86/dell/Makefile1
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c522
-rw-r--r--drivers/platform/x86/dell/dcdbas.c10
-rw-r--r--drivers/platform/x86/dell/dcdbas.h8
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c6
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c256
-rw-r--r--drivers/platform/x86/dell/dell-pc.c69
-rw-r--r--drivers/platform/x86/dell/dell-smo8800-ids.h27
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c16
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c7
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c17
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c20
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c42
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h5
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c8
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c14
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c449
-rw-r--r--drivers/platform/x86/hp/hp_accel.c4
-rw-r--r--drivers/platform/x86/ideapad-laptop.c43
-rw-r--r--drivers/platform/x86/inspur_platform_profile.c43
-rw-r--r--drivers/platform/x86/intel/Kconfig1
-rw-r--r--drivers/platform/x86/intel/bytcrc_pwrsrc.c79
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c14
-rw-r--r--drivers/platform/x86/intel/int3472/common.c2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c24
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c3
-rw-r--r--drivers/platform/x86/intel/plr_tpmi.c2
-rw-r--r--drivers/platform/x86/intel/pmc/core.c7
-rw-r--r--drivers/platform/x86/intel/pmt/class.c4
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c33
-rw-r--r--drivers/platform/x86/intel/sdsi.c34
-rw-r--r--drivers/platform/x86/lenovo-wmi-camera.c69
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c5
-rw-r--r--drivers/platform/x86/msi-laptop.c6
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/quickstart.c1
-rw-r--r--drivers/platform/x86/serdev_helpers.h60
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c12
-rw-r--r--drivers/platform/x86/think-lmi.c13
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c46
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/wmi-bmof.c75
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile2
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c31
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c8
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c16
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c261
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h13
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c8
-rw-r--r--drivers/pmdomain/core.c15
-rw-r--r--drivers/pmdomain/imx/gpcv2.c2
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c1
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c3
-rw-r--r--drivers/pmdomain/mediatek/Kconfig12
-rw-r--r--drivers/pmdomain/mediatek/Makefile8
-rw-r--r--drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c144
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c81
-rw-r--r--drivers/power/sequencing/pwrseq-qcom-wcn.c29
-rw-r--r--drivers/powercap/intel_rapl_common.c1
-rw-r--r--drivers/ptp/ptp_clock.c2
-rw-r--r--drivers/ptp/ptp_ocp.c16
-rw-r--r--drivers/pwm/pwm-dwc.c14
-rw-r--r--drivers/pwm/pwm-lpss-pci.c9
-rw-r--r--drivers/pwm/pwm-stm32-lp.c8
-rw-r--r--drivers/pwm/pwm-stm32.c7
-rw-r--r--drivers/ras/amd/atl/Kconfig1
-rw-r--r--drivers/ras/amd/atl/internal.h1
-rw-r--r--drivers/regulator/bd96801-regulator.c130
-rw-r--r--drivers/regulator/core.c153
-rw-r--r--drivers/regulator/of_regulator.c17
-rw-r--r--drivers/regulator/pca9450-regulator.c111
-rw-r--r--drivers/regulator/tps65219-regulator.c39
-rw-r--r--drivers/reset/amlogic/reset-meson-aux.c97
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c1
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/char/sclp.h18
-rw-r--r--drivers/s390/char/sclp_config.c4
-rw-r--r--drivers/s390/char/sclp_early.c3
-rw-r--r--drivers/s390/char/sclp_pci.c19
-rw-r--r--drivers/s390/char/sclp_sd.c4
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/qdio.h9
-rw-r--r--drivers/s390/cio/qdio_setup.c21
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c128
-rw-r--r--drivers/scsi/cxlflash/Kconfig6
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c7
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/virtio_scsi.c3
-rw-r--r--drivers/soc/atmel/soc.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c6
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/soc-imx9.c128
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c23
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c19
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c4
-rw-r--r--drivers/soc/qcom/Kconfig2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c58
-rw-r--r--drivers/soc/qcom/pmic_glink.c70
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c11
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c2
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c2
-rw-r--r--drivers/soc/qcom/smem_state.c3
-rw-r--r--drivers/soc/qcom/socinfo.c3
-rw-r--r--drivers/soc/renesas/Kconfig5
-rw-r--r--drivers/soc/samsung/exynos-pmu.c2
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c20
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c17
-rw-r--r--drivers/spi/Kconfig12
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel-quadspi.c987
-rw-r--r--drivers/spi/spi-amd.c26
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c7
-rw-r--r--drivers/spi/spi-cadence-quadspi.c57
-rw-r--r--drivers/spi/spi-dw-core.c10
-rw-r--r--drivers/spi/spi-fsl-qspi.c12
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-kspi2.c431
-rw-r--r--drivers/spi/spi-mem.c64
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c26
-rw-r--r--drivers/spi/spi-mt65xx.c7
-rw-r--r--drivers/spi/spi-mxic.c3
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-nxp-fspi.c12
-rw-r--r--drivers/spi/spi-pxa2xx.c88
-rw-r--r--drivers/spi/spi-rockchip-sfc.c233
-rw-r--r--drivers/spi/spi-sc18is602.c34
-rw-r--r--drivers/spi/spi-sn-f-ospi.c8
-rw-r--r--drivers/spi/spi-ti-qspi.c19
-rw-r--r--drivers/spi/spi-zynq-qspi.c26
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c13
-rw-r--r--drivers/spi/spi.c43
-rw-r--r--drivers/spi/spidev.c30
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c3
-rw-r--r--drivers/target/iscsi/Kconfig4
-rw-r--r--drivers/target/iscsi/iscsi_target.c153
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c50
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c21
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/tee/optee/smc_abi.c5
-rw-r--r--drivers/thermal/gov_bang_bang.c57
-rw-r--r--drivers/thermal/gov_user_space.c12
-rw-r--r--drivers/thermal/intel/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c4
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/ufs/core/ufshcd.c1
-rw-r--r--drivers/usb/core/usb-acpi.c3
-rw-r--r--drivers/usb/storage/scsiglue.c5
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c2
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c4
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c8
-rw-r--r--drivers/video/fbdev/efifb.c4
-rw-r--r--drivers/video/fbdev/omap/lcd_dma.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c17
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h1
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c6
-rw-r--r--drivers/video/fbdev/sm501fb.c5
-rw-r--r--drivers/video/fbdev/udlfb.c8
-rw-r--r--drivers/video/fbdev/vga16fb.c7
-rw-r--r--drivers/video/hdmi.c28
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c8
-rw-r--r--drivers/virt/coco/sev-guest/Kconfig1
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c485
-rw-r--r--drivers/virt/vboxguest/Kconfig2
-rw-r--r--drivers/virtio/virtio.c19
-rw-r--r--drivers/watchdog/watchdog_dev.c2
-rw-r--r--drivers/xen/events/events_base.c6
-rw-r--r--drivers/zorro/zorro-sysfs.c10
2926 files changed, 154948 insertions, 40919 deletions
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index 64065fb8922b..5b9490367a39 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -24,6 +24,7 @@ menuconfig DRM_ACCEL
different device files, called accel/accel* (in /dev, sysfs
and debugfs).
+source "drivers/accel/amdxdna/Kconfig"
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
index ab3df932937f..a301fb6089d4 100644
--- a/drivers/accel/Makefile
+++ b/drivers/accel/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
diff --git a/drivers/accel/amdxdna/Kconfig b/drivers/accel/amdxdna/Kconfig
new file mode 100644
index 000000000000..f39d7a87296c
--- /dev/null
+++ b/drivers/accel/amdxdna/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_AMDXDNA
+ tristate "AMD AI Engine"
+ depends on AMD_IOMMU
+ depends on DRM_ACCEL
+ depends on PCI && HAS_IOMEM
+ depends on X86_64
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ select FW_LOADER
+ select HMM_MIRROR
+ help
+ Choose this option to enable support for NPU integrated into AMD
+ client CPUs like AMD Ryzen AI 300 Series. AMD NPU can be used to
+ accelerate machine learning applications.
+
+ If "M" is selected, the driver module will be amdxdna.
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
new file mode 100644
index 000000000000..0e9adf6890a0
--- /dev/null
+++ b/drivers/accel/amdxdna/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+amdxdna-y := \
+ aie2_ctx.o \
+ aie2_error.o \
+ aie2_message.o \
+ aie2_pci.o \
+ aie2_pm.o \
+ aie2_psp.o \
+ aie2_smu.o \
+ aie2_solver.o \
+ amdxdna_ctx.o \
+ amdxdna_gem.o \
+ amdxdna_mailbox.o \
+ amdxdna_mailbox_helper.o \
+ amdxdna_pci_drv.o \
+ amdxdna_sysfs.o \
+ npu1_regs.o \
+ npu2_regs.o \
+ npu4_regs.o \
+ npu5_regs.o \
+ npu6_regs.o
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) = amdxdna.o
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
new file mode 100644
index 000000000000..5119bccd1917
--- /dev/null
+++ b/drivers/accel/amdxdna/TODO
@@ -0,0 +1,3 @@
+- Add import and export BO support
+- Add debugfs support
+- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
new file mode 100644
index 000000000000..5f43db02b240
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/drm_syncobj.h>
+#include <linux/hmm.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+static bool force_cmdlist;
+module_param(force_cmdlist, bool, 0600);
+MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
+
+#define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */
+
+static void aie2_job_release(struct kref *ref)
+{
+ struct amdxdna_sched_job *job;
+
+ job = container_of(ref, struct amdxdna_sched_job, refcnt);
+ amdxdna_sched_job_cleanup(job);
+ if (job->out_fence)
+ dma_fence_put(job->out_fence);
+ kfree(job);
+}
+
+static void aie2_job_put(struct amdxdna_sched_job *job)
+{
+ kref_put(&job->refcnt, aie2_job_release);
+}
+
+/* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
+static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
+ struct drm_sched_job *bad_job)
+{
+ drm_sched_stop(&hwctx->priv->sched, bad_job);
+ aie2_destroy_context(xdna->dev_handle, hwctx);
+}
+
+static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_gem_obj *heap = hwctx->priv->heap;
+ int ret;
+
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
+ goto out;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
+ goto out;
+ }
+
+ if (hwctx->status != HWCTX_STAT_READY) {
+ XDNA_DBG(xdna, "hwctx is not ready, status %d", hwctx->status);
+ goto out;
+ }
+
+ ret = aie2_config_cu(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
+ goto out;
+ }
+
+out:
+ drm_sched_start(&hwctx->priv->sched, 0);
+ XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
+ return ret;
+}
+
+void aie2_restart_ctx(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ if (hwctx->status != HWCTX_STAT_STOP)
+ continue;
+
+ hwctx->status = hwctx->old_status;
+ XDNA_DBG(xdna, "Resetting %s", hwctx->name);
+ aie2_hwctx_restart(xdna, hwctx);
+ }
+ mutex_unlock(&client->hwctx_lock);
+}
+
+static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
+{
+ struct dma_fence *fence, *out_fence = NULL;
+ int ret;
+
+ fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
+ if (!fence)
+ return NULL;
+
+ ret = dma_fence_chain_find_seqno(&fence, seq);
+ if (ret)
+ goto out;
+
+ out_fence = dma_fence_get(dma_fence_chain_contained(fence));
+
+out:
+ dma_fence_put(fence);
+ return out_fence;
+}
+
+static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
+{
+ struct dma_fence *fence;
+
+ fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
+ if (!fence)
+ return;
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+}
+
+void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ /*
+ * Command timeout is unlikely. But if it happens, it doesn't
+ * break the system. aie2_hwctx_stop() will destroy mailbox
+ * and abort all commands.
+ */
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ aie2_hwctx_wait_for_idle(hwctx);
+ aie2_hwctx_stop(xdna, hwctx, NULL);
+ hwctx->old_status = hwctx->status;
+ hwctx->status = HWCTX_STAT_STOP;
+}
+
+void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ /*
+ * The resume path cannot guarantee that mailbox channel can be
+ * regenerated. If this happen, when submit message to this
+ * mailbox channel, error will return.
+ */
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ hwctx->status = hwctx->old_status;
+ aie2_hwctx_restart(xdna, hwctx);
+}
+
+static void
+aie2_sched_notify(struct amdxdna_sched_job *job)
+{
+ struct dma_fence *fence = job->fence;
+
+ trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
+ job->hwctx->priv->completed++;
+ dma_fence_signal(fence);
+
+ up(&job->hwctx->priv->job_sem);
+ job->job_done = true;
+ dma_fence_put(fence);
+ mmput_async(job->mm);
+ aie2_job_put(job);
+}
+
+static int
+aie2_sched_resp_handler(void *handle, const u32 *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ u32 ret = 0;
+ u32 status;
+
+ cmd_abo = job->cmd_bo;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(size != sizeof(u32))) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ status = *data;
+ XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+ if (status == AIE2_STATUS_SUCCESS)
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ else
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ u32 ret = 0;
+ u32 status;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(size != sizeof(u32))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ status = *data;
+ XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ struct cmd_chain_resp *resp;
+ struct amdxdna_dev *xdna;
+ u32 fail_cmd_status;
+ u32 fail_cmd_idx;
+ u32 ret = 0;
+
+ cmd_abo = job->cmd_bo;
+ if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ resp = (struct cmd_chain_resp *)data;
+ xdna = job->hwctx->client->xdna;
+ XDNA_DBG(xdna, "Status 0x%x", resp->status);
+ if (resp->status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ goto out;
+ }
+
+ /* Slow path to handle error, read from ringbuf on BAR */
+ fail_cmd_idx = resp->fail_cmd_idx;
+ fail_cmd_status = resp->fail_cmd_status;
+ XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
+ fail_cmd_idx, fail_cmd_status);
+
+ if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+ amdxdna_cmd_set_state(cmd_abo, fail_cmd_status);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
+ struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
+
+ cc->error_index = fail_cmd_idx;
+ if (cc->error_index >= cc->command_count)
+ cc->error_index = 0;
+ }
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static struct dma_fence *
+aie2_sched_job_run(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct dma_fence *fence;
+ int ret;
+
+ if (!mmget_not_zero(job->mm))
+ return ERR_PTR(-ESRCH);
+
+ kref_get(&job->refcnt);
+ fence = dma_fence_get(job->fence);
+
+ if (unlikely(!cmd_abo)) {
+ ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler);
+ goto out;
+ }
+
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
+ ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else if (force_cmdlist)
+ ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else
+ ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
+
+out:
+ if (ret) {
+ dma_fence_put(job->fence);
+ aie2_job_put(job);
+ mmput(job->mm);
+ fence = ERR_PTR(ret);
+ }
+ trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
+
+ return fence;
+}
+
+static void aie2_sched_job_free(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+
+ trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
+ if (!job->job_done)
+ up(&hwctx->priv->job_sem);
+
+ drm_sched_job_cleanup(sched_job);
+ aie2_job_put(job);
+}
+
+static enum drm_gpu_sched_stat
+aie2_sched_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct amdxdna_dev *xdna;
+
+ xdna = hwctx->client->xdna;
+ trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
+ mutex_lock(&xdna->dev_lock);
+ aie2_hwctx_stop(xdna, hwctx, sched_job);
+
+ aie2_hwctx_restart(xdna, hwctx);
+ mutex_unlock(&xdna->dev_lock);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+const struct drm_sched_backend_ops sched_ops = {
+ .run_job = aie2_sched_job_run,
+ .free_job = aie2_sched_job_free,
+ .timedout_job = aie2_sched_job_timedout,
+};
+
+static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int start, end, first, last;
+ u32 width = 1, entries = 0;
+ int i;
+
+ if (!hwctx->num_tiles) {
+ XDNA_ERR(xdna, "Number of tiles is zero");
+ return -EINVAL;
+ }
+
+ ndev = xdna->dev_handle;
+ if (unlikely(!ndev->metadata.core.row_count)) {
+ XDNA_WARN(xdna, "Core tile row count is zero");
+ return -EINVAL;
+ }
+
+ hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
+ if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
+ XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
+ return -EINVAL;
+ }
+
+ if (ndev->priv->col_align == COL_ALIGN_NATURE)
+ width = hwctx->num_col;
+
+ /*
+ * In range [start, end], find out columns that is multiple of width.
+ * 'first' is the first column,
+ * 'last' is the last column,
+ * 'entries' is the total number of columns.
+ */
+ start = xdna->dev_info->first_col;
+ end = ndev->total_col - hwctx->num_col;
+ if (start > 0 && end == 0) {
+ XDNA_DBG(xdna, "Force start from col 0");
+ start = 0;
+ }
+ first = start + (width - start % width) % width;
+ last = end - end % width;
+ if (last >= first)
+ entries = (last - first) / width + 1;
+ XDNA_DBG(xdna, "start %d end %d first %d last %d",
+ start, end, first, last);
+
+ if (unlikely(!entries)) {
+ XDNA_ERR(xdna, "Start %d end %d width %d",
+ start, end, width);
+ return -EINVAL;
+ }
+
+ hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
+ if (!hwctx->col_list)
+ return -ENOMEM;
+
+ hwctx->col_list_len = entries;
+ hwctx->col_list[0] = first;
+ for (i = 1; i < entries; i++)
+ hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
+
+ print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
+ entries * sizeof(*hwctx->col_list), false);
+ return 0;
+}
+
+static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct alloc_requests *xrs_req;
+ int ret;
+
+ xrs_req = kzalloc(sizeof(*xrs_req), GFP_KERNEL);
+ if (!xrs_req)
+ return -ENOMEM;
+
+ xrs_req->cdo.start_cols = hwctx->col_list;
+ xrs_req->cdo.cols_len = hwctx->col_list_len;
+ xrs_req->cdo.ncols = hwctx->num_col;
+ xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
+
+ xrs_req->rqos.gops = hwctx->qos.gops;
+ xrs_req->rqos.fps = hwctx->qos.fps;
+ xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
+ xrs_req->rqos.latency = hwctx->qos.latency;
+ xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
+ xrs_req->rqos.priority = hwctx->qos.priority;
+
+ xrs_req->rid = (uintptr_t)hwctx;
+
+ ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
+
+ kfree(xrs_req);
+ return ret;
+}
+
+static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ int ret;
+
+ ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
+}
+
+static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct drm_file *filp = hwctx->client->filp;
+ struct drm_syncobj *syncobj;
+ u32 hdl;
+ int ret;
+
+ hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
+
+ ret = drm_syncobj_create(&syncobj, 0, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
+ return ret;
+ }
+ ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
+ if (ret) {
+ drm_syncobj_put(syncobj);
+ XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
+ return ret;
+ }
+ hwctx->priv->syncobj = syncobj;
+ hwctx->syncobj_hdl = hdl;
+
+ return 0;
+}
+
+static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
+{
+ /*
+ * The syncobj_hdl is owned by user space and will be cleaned up
+ * separately.
+ */
+ drm_syncobj_put(hwctx->priv->syncobj);
+}
+
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct drm_gpu_scheduler *sched;
+ struct amdxdna_hwctx_priv *priv;
+ struct amdxdna_gem_obj *heap;
+ struct amdxdna_dev_hdl *ndev;
+ int i, ret;
+
+ priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ hwctx->priv = priv;
+
+ mutex_lock(&client->mm_lock);
+ heap = client->dev_heap;
+ if (!heap) {
+ XDNA_ERR(xdna, "The client dev heap object not exist");
+ mutex_unlock(&client->mm_lock);
+ ret = -ENOENT;
+ goto free_priv;
+ }
+ drm_gem_object_get(to_gobj(heap));
+ mutex_unlock(&client->mm_lock);
+ priv->heap = heap;
+ sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
+
+ ret = amdxdna_gem_pin(heap);
+ if (ret) {
+ XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
+ goto put_heap;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ struct amdxdna_gem_obj *abo;
+ struct amdxdna_drm_create_bo args = {
+ .flags = 0,
+ .type = AMDXDNA_BO_DEV,
+ .vaddr = 0,
+ .size = MAX_CHAIN_CMDBUF_SIZE,
+ };
+
+ abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp, true);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto free_cmd_bufs;
+ }
+
+ XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
+ i, abo->mem.dev_addr, abo->mem.size);
+ priv->cmd_buf[i] = abo;
+ }
+
+ sched = &priv->sched;
+ mutex_init(&priv->io_lock);
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&priv->io_lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+ ret = drm_sched_init(sched, &sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT,
+ HWCTX_MAX_CMDS, 0, msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
+ NULL, NULL, hwctx->name, xdna->ddev.dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
+ goto free_cmd_bufs;
+ }
+
+ ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
+ goto free_sched;
+ }
+
+ ret = aie2_hwctx_col_list(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
+ goto free_entity;
+ }
+
+ ret = aie2_alloc_resource(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
+ goto free_col_list;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
+ goto release_resource;
+ }
+
+ ret = aie2_ctx_syncobj_create(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
+ goto release_resource;
+ }
+
+ hwctx->status = HWCTX_STAT_INIT;
+ ndev = xdna->dev_handle;
+ ndev->hwctx_num++;
+
+ XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
+
+ return 0;
+
+release_resource:
+ aie2_release_resource(hwctx);
+free_col_list:
+ kfree(hwctx->col_list);
+free_entity:
+ drm_sched_entity_destroy(&priv->entity);
+free_sched:
+ drm_sched_fini(&priv->sched);
+free_cmd_bufs:
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ if (!priv->cmd_buf[i])
+ continue;
+ drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
+ }
+ amdxdna_gem_unpin(heap);
+put_heap:
+ drm_gem_object_put(to_gobj(heap));
+free_priv:
+ kfree(priv);
+ return ret;
+}
+
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev_hdl *ndev;
+ struct amdxdna_dev *xdna;
+ int idx;
+
+ xdna = hwctx->client->xdna;
+ ndev = xdna->dev_handle;
+ ndev->hwctx_num--;
+ drm_sched_wqueue_stop(&hwctx->priv->sched);
+
+ /* Now, scheduler will not send command to device. */
+ aie2_release_resource(hwctx);
+
+ /*
+ * All submitted commands are aborted.
+ * Restart scheduler queues to cleanup jobs. The amdxdna_sched_job_run()
+ * will return NODEV if it is called.
+ */
+ drm_sched_wqueue_start(&hwctx->priv->sched);
+
+ aie2_hwctx_wait_for_idle(hwctx);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+ drm_sched_fini(&hwctx->priv->sched);
+ aie2_ctx_syncobj_destroy(hwctx);
+
+ XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
+
+ for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
+ drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
+ amdxdna_gem_unpin(hwctx->priv->heap);
+ drm_gem_object_put(to_gobj(hwctx->priv->heap));
+
+ mutex_destroy(&hwctx->priv->io_lock);
+ kfree(hwctx->col_list);
+ kfree(hwctx->priv);
+ kfree(hwctx->cus);
+}
+
+static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
+{
+ struct amdxdna_hwctx_param_config_cu *config = buf;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 total_size;
+ int ret;
+
+ XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
+ if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
+ return -EINVAL;
+
+ if (hwctx->status != HWCTX_STAT_INIT) {
+ XDNA_ERR(xdna, "Not support re-config CU");
+ return -EINVAL;
+ }
+
+ if (!config->num_cus) {
+ XDNA_ERR(xdna, "Number of CU is zero");
+ return -EINVAL;
+ }
+
+ total_size = struct_size(config, cu_configs, config->num_cus);
+ if (total_size > size) {
+ XDNA_ERR(xdna, "CU config larger than size");
+ return -EINVAL;
+ }
+
+ hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
+ if (!hwctx->cus)
+ return -ENOMEM;
+
+ ret = aie2_config_cu(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
+ goto free_cus;
+ }
+
+ wmb(); /* To avoid locking in command submit when check status */
+ hwctx->status = HWCTX_STAT_READY;
+
+ return 0;
+
+free_cus:
+ kfree(hwctx->cus);
+ hwctx->cus = NULL;
+ return ret;
+}
+
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ switch (type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ return aie2_hwctx_cu_config(hwctx, buf, size);
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ return -EOPNOTSUPP;
+ default:
+ XDNA_DBG(xdna, "Not supported type %d", type);
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aie2_populate_range(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct mm_struct *mm = abo->mem.notifier.mm;
+ struct hmm_range range = { 0 };
+ unsigned long timeout;
+ int ret;
+
+ XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
+ abo->mem.userptr, abo->mem.size);
+ range.notifier = &abo->mem.notifier;
+ range.start = abo->mem.userptr;
+ range.end = abo->mem.userptr + abo->mem.size;
+ range.hmm_pfns = abo->mem.pfns;
+ range.default_flags = HMM_PFN_REQ_FAULT;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+again:
+ range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(&range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto put_mm;
+ }
+
+ if (ret == -EBUSY)
+ goto again;
+
+ goto put_mm;
+ }
+
+ down_read(&xdna->notifier_lock);
+ if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
+ up_read(&xdna->notifier_lock);
+ goto again;
+ }
+ abo->mem.map_invalid = false;
+ up_read(&xdna->notifier_lock);
+
+put_mm:
+ mmput(mm);
+ return ret;
+}
+
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct ww_acquire_ctx acquire_ctx;
+ struct dma_fence_chain *chain;
+ struct amdxdna_gem_obj *abo;
+ unsigned long timeout = 0;
+ int ret, i;
+
+ ret = down_interruptible(&hwctx->priv->job_sem);
+ if (ret) {
+ XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
+ return ret;
+ }
+
+ chain = dma_fence_chain_alloc();
+ if (!chain) {
+ XDNA_ERR(xdna, "Alloc fence chain failed");
+ ret = -ENOMEM;
+ goto up_sem;
+ }
+
+ ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
+ goto free_chain;
+ }
+
+retry:
+ ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
+ goto cleanup_job;
+ }
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ goto cleanup_job;
+ }
+ }
+
+ down_read(&xdna->notifier_lock);
+ for (i = 0; i < job->bo_cnt; i++) {
+ abo = to_xdna_obj(job->bos[i]);
+ if (abo->mem.map_invalid) {
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (!timeout) {
+ timeout = jiffies +
+ msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ } else if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto cleanup_job;
+ }
+
+ ret = aie2_populate_range(abo);
+ if (ret)
+ goto cleanup_job;
+ goto retry;
+ }
+ }
+
+ mutex_lock(&hwctx->priv->io_lock);
+ drm_sched_job_arm(&job->base);
+ job->out_fence = dma_fence_get(&job->base.s_fence->finished);
+ for (i = 0; i < job->bo_cnt; i++)
+ dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
+ job->seq = hwctx->priv->seq++;
+ kref_get(&job->refcnt);
+ drm_sched_entity_push_job(&job->base);
+
+ *seq = job->seq;
+ drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
+ mutex_unlock(&hwctx->priv->io_lock);
+
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+
+ aie2_job_put(job);
+
+ return 0;
+
+cleanup_job:
+ drm_sched_job_cleanup(&job->base);
+free_chain:
+ dma_fence_chain_free(chain);
+up_sem:
+ up(&hwctx->priv->job_sem);
+ job->job_done = true;
+ return ret;
+}
+
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
+ unsigned long cur_seq)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct drm_gem_object *gobj = to_gobj(abo);
+ long ret;
+
+ down_write(&xdna->notifier_lock);
+ abo->mem.map_invalid = true;
+ mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
+ up_write(&xdna->notifier_lock);
+ ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (!ret || ret == -ERESTARTSYS)
+ XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
new file mode 100644
index 000000000000..b1defaa8513b
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+struct async_event {
+ struct amdxdna_dev_hdl *ndev;
+ struct async_event_msg_resp resp;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+};
+
+struct async_events {
+ struct workqueue_struct *wq;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+ u32 event_cnt;
+ struct async_event event[] __counted_by(event_cnt);
+};
+
+/*
+ * Below enum, struct and lookup tables are porting from XAIE util header file.
+ *
+ * Below data is defined by AIE device and it is used for decode error message
+ * from the device.
+ */
+
+enum aie_module_type {
+ AIE_MEM_MOD = 0,
+ AIE_CORE_MOD,
+ AIE_PL_MOD,
+};
+
+enum aie_error_category {
+ AIE_ERROR_SATURATION = 0,
+ AIE_ERROR_FP,
+ AIE_ERROR_STREAM,
+ AIE_ERROR_ACCESS,
+ AIE_ERROR_BUS,
+ AIE_ERROR_INSTRUCTION,
+ AIE_ERROR_ECC,
+ AIE_ERROR_LOCK,
+ AIE_ERROR_DMA,
+ AIE_ERROR_MEM_PARITY,
+ /* Unknown is not from XAIE, added for better category */
+ AIE_ERROR_UNKNOWN,
+};
+
+/* Don't pack, unless XAIE side changed */
+struct aie_error {
+ __u8 row;
+ __u8 col;
+ __u32 mod_type;
+ __u8 event_id;
+};
+
+struct aie_err_info {
+ u32 err_cnt;
+ u32 ret_code;
+ u32 rsvd;
+ struct aie_error payload[] __counted_by(err_cnt);
+};
+
+struct aie_event_category {
+ u8 event_id;
+ enum aie_error_category category;
+};
+
+#define EVENT_CATEGORY(id, cat) { id, cat }
+static const struct aie_event_category aie_ml_mem_event_cat[] = {
+ EVENT_CATEGORY(88U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(90U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(91U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(92U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(93U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(94U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(95U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(96U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(97U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(98U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(99U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(100U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(101U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_core_event_cat[] = {
+ EVENT_CATEGORY(55U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(56U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(57U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(58U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(59U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(60U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(62U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(64U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(65U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(66U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(67U, AIE_ERROR_LOCK),
+ EVENT_CATEGORY(70U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(71U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(72U, AIE_ERROR_BUS),
+};
+
+static const struct aie_event_category aie_ml_mem_tile_event_cat[] = {
+ EVENT_CATEGORY(130U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(132U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(133U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(134U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(135U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(136U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(137U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(138U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(139U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_shim_tile_event_cat[] = {
+ EVENT_CATEGORY(64U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(65U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(66U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(67U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(68U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(69U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(70U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(71U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(72U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(73U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(74U, AIE_ERROR_LOCK),
+};
+
+static enum aie_error_category
+aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
+{
+ const struct aie_event_category *lut;
+ int num_entry;
+ int i;
+
+ switch (mod_type) {
+ case AIE_PL_MOD:
+ lut = aie_ml_shim_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_shim_tile_event_cat);
+ break;
+ case AIE_CORE_MOD:
+ lut = aie_ml_core_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_core_event_cat);
+ break;
+ case AIE_MEM_MOD:
+ if (row == 1) {
+ lut = aie_ml_mem_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_tile_event_cat);
+ } else {
+ lut = aie_ml_mem_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_event_cat);
+ }
+ break;
+ default:
+ return AIE_ERROR_UNKNOWN;
+ }
+
+ for (i = 0; i < num_entry; i++) {
+ if (event_id != lut[i].event_id)
+ continue;
+
+ return lut[i].category;
+ }
+
+ return AIE_ERROR_UNKNOWN;
+}
+
+static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
+{
+ struct aie_error *errs = err_info;
+ u32 err_col = 0; /* assume that AIE has less than 32 columns */
+ int i;
+
+ /* Get err column bitmap */
+ for (i = 0; i < num_err; i++) {
+ struct aie_error *err = &errs[i];
+ enum aie_error_category cat;
+
+ cat = aie_get_error_category(err->row, err->event_id, err->mod_type);
+ XDNA_ERR(ndev->xdna, "Row: %d, Col: %d, module %d, event ID %d, category %d",
+ err->row, err->col, err->mod_type,
+ err->event_id, cat);
+
+ if (err->col >= 32) {
+ XDNA_WARN(ndev->xdna, "Invalid column number");
+ break;
+ }
+
+ err_col |= (1 << err->col);
+ }
+
+ return err_col;
+}
+
+static int aie2_error_async_cb(void *handle, const u32 *data, size_t size)
+{
+ struct async_event_msg_resp *resp;
+ struct async_event *e = handle;
+
+ if (data) {
+ resp = (struct async_event_msg_resp *)data;
+ e->resp.type = resp->type;
+ wmb(); /* Update status in the end, so that no lock for here */
+ e->resp.status = resp->status;
+ }
+ queue_work(e->wq, &e->work);
+ return 0;
+}
+
+static int aie2_error_event_send(struct async_event *e)
+{
+ drm_clflush_virt_range(e->buf, e->size); /* device can access */
+ return aie2_register_asyn_event_msg(e->ndev, e->addr, e->size, e,
+ aie2_error_async_cb);
+}
+
+static void aie2_error_worker(struct work_struct *err_work)
+{
+ struct aie_err_info *info;
+ struct amdxdna_dev *xdna;
+ struct async_event *e;
+ u32 max_err;
+ u32 err_col;
+
+ e = container_of(err_work, struct async_event, work);
+
+ xdna = e->ndev->xdna;
+
+ if (e->resp.status == MAX_AIE2_STATUS_CODE)
+ return;
+
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+
+ print_hex_dump_debug("AIE error: ", DUMP_PREFIX_OFFSET, 16, 4,
+ e->buf, 0x100, false);
+
+ info = (struct aie_err_info *)e->buf;
+ XDNA_DBG(xdna, "Error count %d return code %d", info->err_cnt, info->ret_code);
+
+ max_err = (e->size - sizeof(*info)) / sizeof(struct aie_error);
+ if (unlikely(info->err_cnt > max_err)) {
+ WARN_ONCE(1, "Error count too large %d\n", info->err_cnt);
+ return;
+ }
+ err_col = aie2_error_backtrack(e->ndev, info->payload, info->err_cnt);
+ if (!err_col) {
+ XDNA_WARN(xdna, "Did not get error column");
+ return;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ /* Re-sent this event to firmware */
+ if (aie2_error_event_send(e))
+ XDNA_WARN(xdna, "Unable to register async event");
+ mutex_unlock(&xdna->dev_lock);
+}
+
+int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct async_event *e;
+ int i, ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ for (i = 0; i < ndev->async_events->event_cnt; i++) {
+ e = &ndev->async_events->event[i];
+ ret = aie2_error_event_send(e);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct async_events *events;
+
+ events = ndev->async_events;
+
+ mutex_unlock(&xdna->dev_lock);
+ destroy_workqueue(events->wq);
+ mutex_lock(&xdna->dev_lock);
+
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+ kfree(events);
+}
+
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 total_col = ndev->total_col;
+ u32 total_size = ASYNC_BUF_SIZE * total_col;
+ struct async_events *events;
+ int i, ret;
+
+ events = kzalloc(struct_size(events, event, total_col), GFP_KERNEL);
+ if (!events)
+ return -ENOMEM;
+
+ events->buf = dma_alloc_noncoherent(xdna->ddev.dev, total_size, &events->addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!events->buf) {
+ ret = -ENOMEM;
+ goto free_events;
+ }
+ events->size = total_size;
+ events->event_cnt = total_col;
+
+ events->wq = alloc_ordered_workqueue("async_wq", 0);
+ if (!events->wq) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ for (i = 0; i < events->event_cnt; i++) {
+ struct async_event *e = &events->event[i];
+ u32 offset = i * ASYNC_BUF_SIZE;
+
+ e->ndev = ndev;
+ e->wq = events->wq;
+ e->buf = &events->buf[offset];
+ e->addr = events->addr + offset;
+ e->size = ASYNC_BUF_SIZE;
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+ INIT_WORK(&e->work, aie2_error_worker);
+ }
+
+ ndev->async_events = events;
+
+ XDNA_DBG(xdna, "Async event count %d, buf total size 0x%x",
+ events->event_cnt, events->size);
+ return 0;
+
+free_buf:
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+free_events:
+ kfree(events);
+ return ret;
+}
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
new file mode 100644
index 000000000000..9e2c9a44f76a
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+#define DECLARE_AIE2_MSG(name, op) \
+ DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE)
+
+static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
+ struct xdna_mailbox_msg *msg)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ if (!ndev->mgmt_chann)
+ return -ENODEV;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
+ if (ret == -ETIME) {
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ }
+
+ if (!ret && *hdl->data != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
+ msg->opcode, *hdl->data);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_SUSPEND);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_RESUME);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value)
+{
+ DECLARE_AIE2_MSG(set_runtime_cfg, MSG_OP_SET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ req.value = value;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to set runtime config, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value)
+{
+ DECLARE_AIE2_MSG(get_runtime_cfg, MSG_OP_GET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to get runtime config, ret %d", ret);
+ return ret;
+ }
+
+ *value = resp.value;
+ return 0;
+}
+
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid)
+{
+ DECLARE_AIE2_MSG(assign_mgmt_pasid, MSG_OP_ASSIGN_MGMT_PASID);
+
+ req.pasid = pasid;
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version)
+{
+ DECLARE_AIE2_MSG(aie_version_info, MSG_OP_QUERY_AIE_VERSION);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "Query AIE version - major: %u minor: %u completed",
+ resp.major, resp.minor);
+
+ version->major = resp.major;
+ version->minor = resp.minor;
+
+ return 0;
+}
+
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata)
+{
+ DECLARE_AIE2_MSG(aie_tile_info, MSG_OP_QUERY_AIE_TILE_INFO);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ metadata->size = resp.info.size;
+ metadata->cols = resp.info.cols;
+ metadata->rows = resp.info.rows;
+
+ metadata->version.major = resp.info.major;
+ metadata->version.minor = resp.info.minor;
+
+ metadata->core.row_count = resp.info.core_rows;
+ metadata->core.row_start = resp.info.core_row_start;
+ metadata->core.dma_channel_count = resp.info.core_dma_channels;
+ metadata->core.lock_count = resp.info.core_locks;
+ metadata->core.event_reg_count = resp.info.core_events;
+
+ metadata->mem.row_count = resp.info.mem_rows;
+ metadata->mem.row_start = resp.info.mem_row_start;
+ metadata->mem.dma_channel_count = resp.info.mem_dma_channels;
+ metadata->mem.lock_count = resp.info.mem_locks;
+ metadata->mem.event_reg_count = resp.info.mem_events;
+
+ metadata->shim.row_count = resp.info.shim_rows;
+ metadata->shim.row_start = resp.info.shim_row_start;
+ metadata->shim.dma_channel_count = resp.info.shim_dma_channels;
+ metadata->shim.lock_count = resp.info.shim_locks;
+ metadata->shim.event_reg_count = resp.info.shim_events;
+
+ return 0;
+}
+
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver)
+{
+ DECLARE_AIE2_MSG(firmware_version, MSG_OP_GET_FIRMWARE_VERSION);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ fw_ver->major = resp.major;
+ fw_ver->minor = resp.minor;
+ fw_ver->sub = resp.sub;
+ fw_ver->build = resp.build;
+
+ return 0;
+}
+
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(create_ctx, MSG_OP_CREATE_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_mailbox_chann_res x2i;
+ struct xdna_mailbox_chann_res i2x;
+ struct cq_pair *cq_pair;
+ u32 intr_reg;
+ int ret;
+
+ req.aie_type = 1;
+ req.start_col = hwctx->start_col;
+ req.num_col = hwctx->num_col;
+ req.num_cq_pairs_requested = 1;
+ req.pasid = hwctx->client->pasid;
+ req.context_priority = 2;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ hwctx->fw_ctx_id = resp.context_id;
+ WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id");
+
+ cq_pair = &resp.cq_pair[0];
+ x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr);
+ x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr);
+ x2i.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->x2i_q.buf_addr);
+ x2i.rb_size = cq_pair->x2i_q.buf_size;
+
+ i2x.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.head_addr);
+ i2x.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.tail_addr);
+ i2x.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->i2x_q.buf_addr);
+ i2x.rb_size = cq_pair->i2x_q.buf_size;
+
+ ret = pci_irq_vector(to_pci_dev(xdna->ddev.dev), resp.msix_id);
+ if (ret == -EINVAL) {
+ XDNA_ERR(xdna, "not able to create channel");
+ goto out_destroy_context;
+ }
+
+ intr_reg = i2x.mb_head_ptr_reg + 4;
+ hwctx->priv->mbox_chann = xdna_mailbox_create_channel(ndev->mbox, &x2i, &i2x,
+ intr_reg, ret);
+ if (!hwctx->priv->mbox_chann) {
+ XDNA_ERR(xdna, "not able to create channel");
+ ret = -EINVAL;
+ goto out_destroy_context;
+ }
+
+ XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d",
+ hwctx->name, ret, resp.msix_id);
+ XDNA_DBG(xdna, "%s created fw ctx %d pasid %d", hwctx->name,
+ hwctx->fw_ctx_id, hwctx->client->pasid);
+
+ return 0;
+
+out_destroy_context:
+ aie2_destroy_context(ndev, hwctx);
+ return ret;
+}
+
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(destroy_ctx, MSG_OP_DESTROY_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ if (hwctx->fw_ctx_id == -1)
+ return 0;
+
+ xdna_mailbox_stop_channel(hwctx->priv->mbox_chann);
+
+ req.context_id = hwctx->fw_ctx_id;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ XDNA_WARN(xdna, "%s destroy context failed, ret %d", hwctx->name, ret);
+
+ xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann);
+ XDNA_DBG(xdna, "%s destroyed fw ctx %d", hwctx->name,
+ hwctx->fw_ctx_id);
+ hwctx->priv->mbox_chann = NULL;
+ hwctx->fw_ctx_id = -1;
+
+ return ret;
+}
+
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size)
+{
+ DECLARE_AIE2_MSG(map_host_buffer, MSG_OP_MAP_HOST_BUFFER);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ req.context_id = context_id;
+ req.buf_addr = addr;
+ req.buf_size = size;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "fw ctx %d map host buf addr 0x%llx size 0x%llx",
+ context_id, addr, size);
+
+ return 0;
+}
+
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
+ u32 size, u32 *cols_filled)
+{
+ DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct amdxdna_client *client;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ dma_addr_t dma_addr;
+ u32 aie_bitmap = 0;
+ u8 *buff_addr;
+ int ret, idx;
+
+ buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!buff_addr)
+ return -ENOMEM;
+
+ /* Go through each hardware context and mark the AIE columns that are active */
+ list_for_each_entry(client, &xdna->client_list, node) {
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
+ aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ }
+
+ *cols_filled = 0;
+ req.dump_buff_addr = dma_addr;
+ req.dump_buff_size = size;
+ req.num_cols = hweight32(aie_bitmap);
+ req.aie_bitmap = aie_bitmap;
+
+ drm_clflush_virt_range(buff_addr, size); /* device can access */
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(xdna, "Error during NPU query, status %d", ret);
+ goto fail;
+ }
+
+ if (resp.status != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "Query NPU status failed, status 0x%x", resp.status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ XDNA_DBG(xdna, "Query NPU status completed");
+
+ if (size < resp.size) {
+ ret = -EINVAL;
+ XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size);
+ goto fail;
+ }
+
+ if (copy_to_user(buf, buff_addr, resp.size)) {
+ ret = -EFAULT;
+ XDNA_ERR(xdna, "Failed to copy NPU status to user space");
+ goto fail;
+ }
+
+ *cols_filled = aie_bitmap;
+
+fail:
+ dma_free_noncoherent(xdna->ddev.dev, size, buff_addr, dma_addr, DMA_FROM_DEVICE);
+ return ret;
+}
+
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, const u32 *, size_t))
+{
+ struct async_event_msg_req req = { 0 };
+ struct xdna_mailbox_msg msg = {
+ .send_data = (u8 *)&req,
+ .send_size = sizeof(req),
+ .handle = handle,
+ .opcode = MSG_OP_REGISTER_ASYNC_EVENT_MSG,
+ .notify_cb = cb,
+ };
+
+ req.buf_addr = addr;
+ req.buf_size = size;
+
+ XDNA_DBG(ndev->xdna, "Register addr 0x%llx size 0x%x", addr, size);
+ return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
+}
+
+int aie2_config_cu(struct amdxdna_hwctx *hwctx)
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 shift = xdna->dev_info->dev_mem_buf_shift;
+ DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU);
+ struct drm_gem_object *gobj;
+ struct amdxdna_gem_obj *abo;
+ int ret, i;
+
+ if (!chann)
+ return -ENODEV;
+
+ if (hwctx->cus->num_cus > MAX_NUM_CUS) {
+ XDNA_DBG(xdna, "Exceed maximum CU %d", MAX_NUM_CUS);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hwctx->cus->num_cus; i++) {
+ struct amdxdna_cu_config *cu = &hwctx->cus->cu_configs[i];
+
+ if (XDNA_MBZ_DBG(xdna, cu->pad, sizeof(cu->pad)))
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(hwctx->client->filp, cu->cu_bo);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -EINVAL;
+ }
+ abo = to_xdna_obj(gobj);
+
+ if (abo->type != AMDXDNA_BO_DEV) {
+ drm_gem_object_put(gobj);
+ XDNA_ERR(xdna, "Invalid BO type");
+ return -EINVAL;
+ }
+
+ req.cfgs[i] = FIELD_PREP(AIE2_MSG_CFG_CU_PDI_ADDR,
+ abo->mem.dev_addr >> shift);
+ req.cfgs[i] |= FIELD_PREP(AIE2_MSG_CFG_CU_FUNC, cu->cu_func);
+ XDNA_DBG(xdna, "CU %d full addr 0x%llx, cfg 0x%x", i,
+ abo->mem.dev_addr, req.cfgs[i]);
+ drm_gem_object_put(gobj);
+ }
+ req.num_cus = hwctx->cus->num_cus;
+
+ ret = xdna_send_msg_wait(xdna, chann, &msg);
+ if (ret == -ETIME)
+ aie2_destroy_context(xdna->dev_handle, hwctx);
+
+ if (resp.status == AIE2_STATUS_SUCCESS) {
+ XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret);
+ return 0;
+ }
+
+ XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d",
+ msg.opcode, resp.status, ret);
+ return ret;
+}
+
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ union {
+ struct execute_buffer_req ebuf;
+ struct exec_dpu_req dpu;
+ } req;
+ struct xdna_mailbox_msg msg;
+ u32 payload_len;
+ void *payload;
+ int cu_idx;
+ int ret;
+ u32 op;
+
+ if (!chann)
+ return -ENODEV;
+
+ payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
+ if (!payload) {
+ XDNA_ERR(xdna, "Invalid command, cannot get payload");
+ return -EINVAL;
+ }
+
+ cu_idx = amdxdna_cmd_get_cu_idx(cmd_abo);
+ if (cu_idx < 0) {
+ XDNA_DBG(xdna, "Invalid cu idx");
+ return -EINVAL;
+ }
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ switch (op) {
+ case ERT_START_CU:
+ if (unlikely(payload_len > sizeof(req.ebuf.payload)))
+ XDNA_DBG(xdna, "Invalid ebuf payload len: %d", payload_len);
+ req.ebuf.cu_idx = cu_idx;
+ memcpy(req.ebuf.payload, payload, sizeof(req.ebuf.payload));
+ msg.send_size = sizeof(req.ebuf);
+ msg.opcode = MSG_OP_EXECUTE_BUFFER_CF;
+ break;
+ case ERT_START_NPU: {
+ struct amdxdna_cmd_start_npu *sn = payload;
+
+ if (unlikely(payload_len - sizeof(*sn) > sizeof(req.dpu.payload)))
+ XDNA_DBG(xdna, "Invalid dpu payload len: %d", payload_len);
+ req.dpu.inst_buf_addr = sn->buffer;
+ req.dpu.inst_size = sn->buffer_size;
+ req.dpu.inst_prop_cnt = sn->prop_count;
+ req.dpu.cu_idx = cu_idx;
+ memcpy(req.dpu.payload, sn->prop_args, sizeof(req.dpu.payload));
+ msg.send_size = sizeof(req.dpu);
+ msg.opcode = MSG_OP_EXEC_DPU;
+ break;
+ }
+ default:
+ XDNA_DBG(xdna, "Invalid ERT cmd op code: %d", op);
+ return -EINVAL;
+ }
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
+ 0x40, false);
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct cmd_chain_slot_execbuf_cf *buf = cmd_buf + offset;
+ int cu_idx = amdxdna_cmd_get_cu_idx(abo);
+ u32 payload_len;
+ void *payload;
+
+ if (cu_idx < 0)
+ return -EINVAL;
+
+ payload = amdxdna_cmd_get_payload(abo, &payload_len);
+ if (!payload)
+ return -EINVAL;
+
+ if (!slot_cf_has_space(offset, payload_len))
+ return -ENOSPC;
+
+ buf->cu_idx = cu_idx;
+ buf->arg_cnt = payload_len / sizeof(u32);
+ memcpy(buf->args, payload, payload_len);
+ /* Accurate buf size to hint firmware to do necessary copy */
+ *size = sizeof(*buf) + payload_len;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct cmd_chain_slot_dpu *buf = cmd_buf + offset;
+ int cu_idx = amdxdna_cmd_get_cu_idx(abo);
+ struct amdxdna_cmd_start_npu *sn;
+ u32 payload_len;
+ void *payload;
+ u32 arg_sz;
+
+ if (cu_idx < 0)
+ return -EINVAL;
+
+ payload = amdxdna_cmd_get_payload(abo, &payload_len);
+ if (!payload)
+ return -EINVAL;
+ sn = payload;
+ arg_sz = payload_len - sizeof(*sn);
+ if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (!slot_dpu_has_space(offset, arg_sz))
+ return -ENOSPC;
+
+ buf->inst_buf_addr = sn->buffer;
+ buf->inst_size = sn->buffer_size;
+ buf->inst_prop_cnt = sn->prop_count;
+ buf->cu_idx = cu_idx;
+ buf->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(buf->args, sn->prop_args, arg_sz);
+
+ /* Accurate buf size to hint firmware to do necessary copy */
+ *size += sizeof(*buf) + arg_sz;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot(u32 op, struct amdxdna_gem_obj *cmdbuf_abo, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ u32 this_op = amdxdna_cmd_get_op(abo);
+ void *cmd_buf = cmdbuf_abo->mem.kva;
+ int ret;
+
+ if (this_op != op) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (op) {
+ case ERT_START_CU:
+ ret = aie2_cmdlist_fill_one_slot_cf(cmd_buf, offset, abo, size);
+ break;
+ case ERT_START_NPU:
+ ret = aie2_cmdlist_fill_one_slot_dpu(cmd_buf, offset, abo, size);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+done:
+ if (ret) {
+ XDNA_ERR(abo->client->xdna, "Can't fill slot for cmd op %d ret %d",
+ op, ret);
+ }
+ return ret;
+}
+
+static inline struct amdxdna_gem_obj *
+aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
+{
+ int idx = get_job_idx(job->seq);
+
+ return job->hwctx->priv->cmd_buf[idx];
+}
+
+static void
+aie2_cmdlist_prepare_request(struct cmd_chain_req *req,
+ struct amdxdna_gem_obj *cmdbuf_abo, u32 size, u32 cnt)
+{
+ req->buf_addr = cmdbuf_abo->mem.dev_addr;
+ req->buf_size = size;
+ req->count = cnt;
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
+ XDNA_DBG(cmdbuf_abo->client->xdna, "Command buf addr 0x%llx size 0x%x count %d",
+ req->buf_addr, size, cnt);
+}
+
+static inline u32
+aie2_cmd_op_to_msg_op(u32 op)
+{
+ switch (op) {
+ case ERT_START_CU:
+ return MSG_OP_CHAIN_EXEC_BUFFER_CF;
+ case ERT_START_NPU:
+ return MSG_OP_CHAIN_EXEC_DPU;
+ default:
+ return MSG_OP_MAX_OPCODE;
+ }
+}
+
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_cmd_chain *payload;
+ struct xdna_mailbox_msg msg;
+ struct cmd_chain_req req;
+ u32 payload_len;
+ u32 offset = 0;
+ u32 size;
+ int ret;
+ u32 op;
+ u32 i;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
+ if (op != ERT_CMD_CHAIN || !payload ||
+ payload_len < struct_size(payload, data, payload->command_count))
+ return -EINVAL;
+
+ for (i = 0; i < payload->command_count; i++) {
+ u32 boh = (u32)(payload->data[i]);
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD);
+ if (!abo) {
+ XDNA_ERR(client->xdna, "Failed to find cmd BO %d", boh);
+ return -ENOENT;
+ }
+
+ /* All sub-cmd should have same op, use the first one. */
+ if (i == 0)
+ op = amdxdna_cmd_get_op(abo);
+
+ ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, offset, abo, &size);
+ amdxdna_gem_put_obj(abo);
+ if (ret)
+ return -EINVAL;
+
+ offset += size;
+ }
+
+ /* The offset is the accumulated total size of the cmd buffer */
+ aie2_cmdlist_prepare_request(&req, cmdbuf_abo, offset, payload->command_count);
+
+ msg.opcode = aie2_cmd_op_to_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct xdna_mailbox_msg msg;
+ struct cmd_chain_req req;
+ u32 size;
+ int ret;
+ u32 op;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, 0, cmd_abo, &size);
+ if (ret)
+ return ret;
+
+ aie2_cmdlist_prepare_request(&req, cmdbuf_abo, size, 1);
+
+ msg.opcode = aie2_cmd_op_to_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct xdna_mailbox_msg msg;
+ struct sync_bo_req req;
+ int ret = 0;
+
+ req.src_addr = 0;
+ req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr;
+ req.size = abo->mem.size;
+
+ /* Device to Host */
+ req.type = FIELD_PREP(AIE2_MSG_SYNC_BO_SRC_TYPE, SYNC_BO_DEV_MEM) |
+ FIELD_PREP(AIE2_MSG_SYNC_BO_DST_TYPE, SYNC_BO_HOST_MEM);
+
+ XDNA_DBG(xdna, "sync %d bytes src(0x%llx) to dst(0x%llx) completed",
+ req.size, req.src_addr, req.dst_addr);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.opcode = MSG_OP_SYNC_BO;
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
new file mode 100644
index 000000000000..4e02e744b470
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MSG_PRIV_H_
+#define _AIE2_MSG_PRIV_H_
+
+enum aie2_msg_opcode {
+ MSG_OP_CREATE_CONTEXT = 0x2,
+ MSG_OP_DESTROY_CONTEXT = 0x3,
+ MSG_OP_SYNC_BO = 0x7,
+ MSG_OP_EXECUTE_BUFFER_CF = 0xC,
+ MSG_OP_QUERY_COL_STATUS = 0xD,
+ MSG_OP_QUERY_AIE_TILE_INFO = 0xE,
+ MSG_OP_QUERY_AIE_VERSION = 0xF,
+ MSG_OP_EXEC_DPU = 0x10,
+ MSG_OP_CONFIG_CU = 0x11,
+ MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12,
+ MSG_OP_CHAIN_EXEC_DPU = 0x13,
+ MSG_OP_MAX_XRT_OPCODE,
+ MSG_OP_SUSPEND = 0x101,
+ MSG_OP_RESUME = 0x102,
+ MSG_OP_ASSIGN_MGMT_PASID = 0x103,
+ MSG_OP_INVOKE_SELF_TEST = 0x104,
+ MSG_OP_MAP_HOST_BUFFER = 0x106,
+ MSG_OP_GET_FIRMWARE_VERSION = 0x108,
+ MSG_OP_SET_RUNTIME_CONFIG = 0x10A,
+ MSG_OP_GET_RUNTIME_CONFIG = 0x10B,
+ MSG_OP_REGISTER_ASYNC_EVENT_MSG = 0x10C,
+ MSG_OP_MAX_DRV_OPCODE,
+ MSG_OP_GET_PROTOCOL_VERSION = 0x301,
+ MSG_OP_MAX_OPCODE
+};
+
+enum aie2_msg_status {
+ AIE2_STATUS_SUCCESS = 0x0,
+ /* AIE Error codes */
+ AIE2_STATUS_AIE_SATURATION_ERROR = 0x1000001,
+ AIE2_STATUS_AIE_FP_ERROR = 0x1000002,
+ AIE2_STATUS_AIE_STREAM_ERROR = 0x1000003,
+ AIE2_STATUS_AIE_ACCESS_ERROR = 0x1000004,
+ AIE2_STATUS_AIE_BUS_ERROR = 0x1000005,
+ AIE2_STATUS_AIE_INSTRUCTION_ERROR = 0x1000006,
+ AIE2_STATUS_AIE_ECC_ERROR = 0x1000007,
+ AIE2_STATUS_AIE_LOCK_ERROR = 0x1000008,
+ AIE2_STATUS_AIE_DMA_ERROR = 0x1000009,
+ AIE2_STATUS_AIE_MEM_PARITY_ERROR = 0x100000a,
+ AIE2_STATUS_AIE_PWR_CFG_ERROR = 0x100000b,
+ AIE2_STATUS_AIE_BACKTRACK_ERROR = 0x100000c,
+ AIE2_STATUS_MAX_AIE_STATUS_CODE,
+ /* MGMT ERT Error codes */
+ AIE2_STATUS_MGMT_ERT_SELF_TEST_FAILURE = 0x2000001,
+ AIE2_STATUS_MGMT_ERT_HASH_MISMATCH,
+ AIE2_STATUS_MGMT_ERT_NOAVAIL,
+ AIE2_STATUS_MGMT_ERT_INVALID_PARAM,
+ AIE2_STATUS_MGMT_ERT_ENTER_SUSPEND_FAILURE,
+ AIE2_STATUS_MGMT_ERT_BUSY,
+ AIE2_STATUS_MGMT_ERT_APPLICATION_ACTIVE,
+ MAX_MGMT_ERT_STATUS_CODE,
+ /* APP ERT Error codes */
+ AIE2_STATUS_APP_ERT_FIRST_ERROR = 0x3000001,
+ AIE2_STATUS_APP_INVALID_INSTR,
+ AIE2_STATUS_APP_LOAD_PDI_FAIL,
+ MAX_APP_ERT_STATUS_CODE,
+ /* NPU RTOS Error Codes */
+ AIE2_STATUS_INVALID_INPUT_BUFFER = 0x4000001,
+ AIE2_STATUS_INVALID_COMMAND,
+ AIE2_STATUS_INVALID_PARAM,
+ AIE2_STATUS_INVALID_OPERATION = 0x4000006,
+ AIE2_STATUS_ASYNC_EVENT_MSGS_FULL,
+ AIE2_STATUS_MAX_RTOS_STATUS_CODE,
+ MAX_AIE2_STATUS_CODE
+};
+
+struct assign_mgmt_pasid_req {
+ __u16 pasid;
+ __u16 reserved;
+} __packed;
+
+struct assign_mgmt_pasid_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct map_host_buffer_req {
+ __u32 context_id;
+ __u64 buf_addr;
+ __u64 buf_size;
+} __packed;
+
+struct map_host_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+#define MAX_CQ_PAIRS 2
+struct cq_info {
+ __u32 head_addr;
+ __u32 tail_addr;
+ __u32 buf_addr;
+ __u32 buf_size;
+};
+
+struct cq_pair {
+ struct cq_info x2i_q;
+ struct cq_info i2x_q;
+};
+
+struct create_ctx_req {
+ __u32 aie_type;
+ __u8 start_col;
+ __u8 num_col;
+ __u16 reserved;
+ __u8 num_cq_pairs_requested;
+ __u8 reserved1;
+ __u16 pasid;
+ __u32 pad[2];
+ __u32 sec_comm_target_type;
+ __u32 context_priority;
+} __packed;
+
+struct create_ctx_resp {
+ enum aie2_msg_status status;
+ __u32 context_id;
+ __u16 msix_id;
+ __u8 num_cq_pairs_allocated;
+ __u8 reserved;
+ struct cq_pair cq_pair[MAX_CQ_PAIRS];
+} __packed;
+
+struct destroy_ctx_req {
+ __u32 context_id;
+} __packed;
+
+struct destroy_ctx_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct execute_buffer_req {
+ __u32 cu_idx;
+ __u32 payload[19];
+} __packed;
+
+struct exec_dpu_req {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 payload[35];
+} __packed;
+
+struct execute_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct aie_tile_info {
+ __u32 size;
+ __u16 major;
+ __u16 minor;
+ __u16 cols;
+ __u16 rows;
+ __u16 core_rows;
+ __u16 mem_rows;
+ __u16 shim_rows;
+ __u16 core_row_start;
+ __u16 mem_row_start;
+ __u16 shim_row_start;
+ __u16 core_dma_channels;
+ __u16 mem_dma_channels;
+ __u16 shim_dma_channels;
+ __u16 core_locks;
+ __u16 mem_locks;
+ __u16 shim_locks;
+ __u16 core_events;
+ __u16 mem_events;
+ __u16 shim_events;
+ __u16 reserved;
+};
+
+struct aie_tile_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_tile_info_resp {
+ enum aie2_msg_status status;
+ struct aie_tile_info info;
+} __packed;
+
+struct aie_version_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_version_info_resp {
+ enum aie2_msg_status status;
+ __u16 major;
+ __u16 minor;
+} __packed;
+
+struct aie_column_info_req {
+ __u64 dump_buff_addr;
+ __u32 dump_buff_size;
+ __u32 num_cols;
+ __u32 aie_bitmap;
+} __packed;
+
+struct aie_column_info_resp {
+ enum aie2_msg_status status;
+ __u32 size;
+} __packed;
+
+struct suspend_req {
+ __u32 place_holder;
+} __packed;
+
+struct suspend_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct resume_req {
+ __u32 place_holder;
+} __packed;
+
+struct resume_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct check_header_hash_req {
+ __u64 hash_high;
+ __u64 hash_low;
+} __packed;
+
+struct check_header_hash_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct query_error_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct query_error_resp {
+ enum aie2_msg_status status;
+ __u32 num_err;
+ __u32 has_next_err;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct protocol_version_req {
+ __u32 reserved;
+} __packed;
+
+struct protocol_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+} __packed;
+
+struct firmware_version_req {
+ __u32 reserved;
+} __packed;
+
+struct firmware_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+ __u32 sub;
+ __u32 build;
+} __packed;
+
+#define MAX_NUM_CUS 32
+#define AIE2_MSG_CFG_CU_PDI_ADDR GENMASK(16, 0)
+#define AIE2_MSG_CFG_CU_FUNC GENMASK(24, 17)
+struct config_cu_req {
+ __u32 num_cus;
+ __u32 cfgs[MAX_NUM_CUS];
+} __packed;
+
+struct config_cu_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct set_runtime_cfg_req {
+ __u32 type;
+ __u64 value;
+} __packed;
+
+struct set_runtime_cfg_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct get_runtime_cfg_req {
+ __u32 type;
+} __packed;
+
+struct get_runtime_cfg_resp {
+ enum aie2_msg_status status;
+ __u64 value;
+} __packed;
+
+enum async_event_type {
+ ASYNC_EVENT_TYPE_AIE_ERROR,
+ ASYNC_EVENT_TYPE_EXCEPTION,
+ MAX_ASYNC_EVENT_TYPE
+};
+
+#define ASYNC_BUF_SIZE SZ_8K
+struct async_event_msg_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+} __packed;
+
+struct async_event_msg_resp {
+ enum aie2_msg_status status;
+ enum async_event_type type;
+} __packed;
+
+#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
+#define slot_cf_has_space(offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
+ offsetof(struct cmd_chain_slot_execbuf_cf, args[0]))
+struct cmd_chain_slot_execbuf_cf {
+ __u32 cu_idx;
+ __u32 arg_cnt;
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+#define slot_dpu_has_space(offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
+ offsetof(struct cmd_chain_slot_dpu, args[0]))
+struct cmd_chain_slot_dpu {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 arg_cnt;
+#define MAX_DPU_ARGS_SIZE (34 * sizeof(__u32))
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+struct cmd_chain_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 count;
+} __packed;
+
+struct cmd_chain_resp {
+ enum aie2_msg_status status;
+ __u32 fail_cmd_idx;
+ enum aie2_msg_status fail_cmd_status;
+} __packed;
+
+#define AIE2_MSG_SYNC_BO_SRC_TYPE GENMASK(3, 0)
+#define AIE2_MSG_SYNC_BO_DST_TYPE GENMASK(7, 4)
+struct sync_bo_req {
+ __u64 src_addr;
+ __u64 dst_addr;
+ __u32 size;
+#define SYNC_BO_DEV_MEM 0
+#define SYNC_BO_HOST_MEM 2
+ __u32 type;
+} __packed;
+
+struct sync_bo_resp {
+ enum aie2_msg_status status;
+} __packed;
+#endif /* _AIE2_MSG_PRIV_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
new file mode 100644
index 000000000000..5a058e565b01
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -0,0 +1,928 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+static int aie2_max_col = XRS_MAX_COL;
+module_param(aie2_max_col, uint, 0600);
+MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
+
+/*
+ * The management mailbox channel is allocated by firmware.
+ * The related register and ring buffer information is on SRAM BAR.
+ * This struct is the register layout.
+ */
+#define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
+struct mgmt_mbox_chann_info {
+ __u32 x2i_tail;
+ __u32 x2i_head;
+ __u32 x2i_buf;
+ __u32 x2i_buf_sz;
+ __u32 i2x_tail;
+ __u32 i2x_head;
+ __u32 i2x_buf;
+ __u32 i2x_buf_sz;
+ __u32 magic;
+ __u32 msi_id;
+ __u32 prot_major;
+ __u32 prot_minor;
+ __u32 rsvd[4];
+};
+
+static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ /*
+ * The driver supported mailbox behavior is defined by
+ * ndev->priv->protocol_major and protocol_minor.
+ *
+ * When protocol_major and fw_major are different, it means driver
+ * and firmware are incompatible.
+ */
+ if (ndev->priv->protocol_major != fw_major) {
+ XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
+ fw_major, fw_minor);
+ return -EINVAL;
+ }
+
+ /*
+ * When protocol_minor is greater then fw_minor, that means driver
+ * relies on operation the installed firmware does not support.
+ */
+ if (ndev->priv->protocol_minor > fw_minor) {
+ XDNA_ERR(xdna, "Firmware minor version smaller than supported");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ XDNA_DBG(xdna, "i2x tail 0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "i2x head 0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
+ XDNA_DBG(xdna, "i2x rsize 0x%x", ndev->mgmt_i2x.rb_size);
+ XDNA_DBG(xdna, "x2i tail 0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "x2i head 0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
+ XDNA_DBG(xdna, "x2i rsize 0x%x", ndev->mgmt_x2i.rb_size);
+ XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
+ XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
+ XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
+}
+
+static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
+{
+ struct mgmt_mbox_chann_info info_regs;
+ struct xdna_mailbox_chann_res *i2x;
+ struct xdna_mailbox_chann_res *x2i;
+ u32 addr, off;
+ u32 *reg;
+ int ret;
+ int i;
+
+ /*
+ * Once firmware is alive, it will write management channel
+ * information in SRAM BAR and write the address of that information
+ * at FW_ALIVE_OFF offset in SRMA BAR.
+ *
+ * Read a non-zero value from FW_ALIVE_OFF implies that firmware
+ * is alive.
+ */
+ ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
+ addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret || !addr)
+ return -ETIME;
+
+ off = AIE2_SRAM_OFF(ndev, addr);
+ reg = (u32 *)&info_regs;
+ for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
+ reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
+
+ if (info_regs.magic != MGMT_MBOX_MAGIC) {
+ XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ i2x = &ndev->mgmt_i2x;
+ x2i = &ndev->mgmt_x2i;
+
+ i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
+ i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
+ i2x->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
+ i2x->rb_size = info_regs.i2x_buf_sz;
+
+ x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
+ x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
+ x2i->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
+ x2i->rb_size = info_regs.x2i_buf_sz;
+
+ ndev->mgmt_chan_idx = info_regs.msi_id;
+ ndev->mgmt_prot_major = info_regs.prot_major;
+ ndev->mgmt_prot_minor = info_regs.prot_minor;
+
+ ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
+
+done:
+ aie2_dump_chann_info_debug(ndev);
+
+ /* Must clear address at FW_ALIVE_OFF */
+ writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
+
+ return ret;
+}
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val)
+{
+ const struct rt_config *cfg;
+ u32 value;
+ int ret;
+
+ for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
+ if (cfg->category != category)
+ continue;
+
+ value = val ? *val : cfg->value;
+ ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
+ cfg->type, value);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_suspend_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Suspend firmware failed");
+ return ret;
+ }
+
+ ret = aie2_resume_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Resume firmware failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Runtime config failed");
+ return ret;
+ }
+
+ ret = aie2_assign_mgmt_pasid(ndev, 0);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Can not assign PASID");
+ return ret;
+ }
+
+ ret = aie2_xdna_reset(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Reset firmware failed");
+ return ret;
+ }
+
+ if (!ndev->async_events)
+ return 0;
+
+ ret = aie2_error_async_events_send(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Send async events failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "query firmware version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_version(ndev, &ndev->version);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
+{
+ if (aie2_suspend_fw(ndev))
+ XDNA_ERR(ndev->xdna, "Suspend_fw failed");
+ XDNA_DBG(ndev->xdna, "Firmware suspended");
+}
+
+static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ hwctx->start_col = action->part.start_col;
+ hwctx->num_col = action->part.ncols;
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "create context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_unload(void *cb_arg)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ ret = aie2_destroy_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_dev_hdl *ndev;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ ndev = xdna->dev_handle;
+ ndev->dft_dpm_level = dpm_level;
+ if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
+ return 0;
+
+ return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+}
+
+static struct xrs_action_ops aie2_xrs_actions = {
+ .load = aie2_xrs_load,
+ .unload = aie2_xrs_unload,
+ .set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
+};
+
+static void aie2_hw_stop(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+
+ if (ndev->dev_status <= AIE2_DEV_INIT) {
+ XDNA_ERR(xdna, "device is already stopped");
+ return;
+ }
+
+ aie2_mgmt_fw_fini(ndev);
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ drmm_kfree(&xdna->ddev, ndev->mbox);
+ ndev->mbox = NULL;
+ aie2_psp_stop(ndev->psp_hdl);
+ aie2_smu_fini(ndev);
+ pci_disable_device(pdev);
+
+ ndev->dev_status = AIE2_DEV_INIT;
+}
+
+static int aie2_hw_start(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+ struct xdna_mailbox_res mbox_res;
+ u32 xdna_mailbox_intr_reg;
+ int mgmt_mb_irq, ret;
+
+ if (ndev->dev_status >= AIE2_DEV_START) {
+ XDNA_INFO(xdna, "device is already started");
+ return 0;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ ret = aie2_smu_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
+ goto disable_dev;
+ }
+
+ ret = aie2_psp_start(ndev->psp_hdl);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
+ goto fini_smu;
+ }
+
+ ret = aie2_get_mgmt_chann_info(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "firmware is not alive");
+ goto stop_psp;
+ }
+
+ mbox_res.ringbuf_base = ndev->sram_base;
+ mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
+ mbox_res.mbox_base = ndev->mbox_base;
+ mbox_res.mbox_size = MBOX_SIZE(ndev);
+ mbox_res.name = "xdna_mailbox";
+ ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
+ if (!ndev->mbox) {
+ XDNA_ERR(xdna, "failed to create mailbox device");
+ ret = -ENODEV;
+ goto stop_psp;
+ }
+
+ mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
+ if (mgmt_mb_irq < 0) {
+ ret = mgmt_mb_irq;
+ XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
+ goto stop_psp;
+ }
+
+ xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
+ ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
+ &ndev->mgmt_x2i,
+ &ndev->mgmt_i2x,
+ xdna_mailbox_intr_reg,
+ mgmt_mb_irq);
+ if (!ndev->mgmt_chann) {
+ XDNA_ERR(xdna, "failed to create management mailbox channel");
+ ret = -EINVAL;
+ goto stop_psp;
+ }
+
+ ret = aie2_pm_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_mgmt_fw_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ndev->dev_status = AIE2_DEV_START;
+
+ return 0;
+
+destroy_mgmt_chann:
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+stop_psp:
+ aie2_psp_stop(ndev->psp_hdl);
+fini_smu:
+ aie2_smu_fini(ndev);
+disable_dev:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static int aie2_init(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
+ struct init_config xrs_cfg = { 0 };
+ struct amdxdna_dev_hdl *ndev;
+ struct psp_config psp_conf;
+ const struct firmware *fw;
+ unsigned long bars = 0;
+ int i, nvec, ret;
+
+ ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->priv = xdna->dev_info->dev_priv;
+ ndev->xdna = xdna;
+
+ ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
+ ndev->priv->fw_path, ret);
+ return ret;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
+ goto release_fw;
+ }
+
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ set_bit(PSP_REG_BAR(ndev, i), &bars);
+
+ set_bit(xdna->dev_info->sram_bar, &bars);
+ set_bit(xdna->dev_info->smu_bar, &bars);
+ set_bit(xdna->dev_info->mbox_bar, &bars);
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ if (!test_bit(i, &bars))
+ continue;
+ tbl[i] = pcim_iomap(pdev, i, 0);
+ if (!tbl[i]) {
+ XDNA_ERR(xdna, "map bar %d failed", i);
+ ret = -ENOMEM;
+ goto release_fw;
+ }
+ }
+
+ ndev->sram_base = tbl[xdna->dev_info->sram_bar];
+ ndev->smu_base = tbl[xdna->dev_info->smu_bar];
+ ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
+ goto release_fw;
+ }
+
+ nvec = pci_msix_vec_count(pdev);
+ if (nvec <= 0) {
+ XDNA_ERR(xdna, "does not get number of interrupt vector");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
+ goto release_fw;
+ }
+
+ ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ if (ret) {
+ XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret);
+ goto free_irq;
+ }
+
+ psp_conf.fw_size = fw->size;
+ psp_conf.fw_buf = fw->data;
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
+ ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
+ if (!ndev->psp_hdl) {
+ XDNA_ERR(xdna, "failed to create psp");
+ ret = -ENOMEM;
+ goto disable_sva;
+ }
+ xdna->dev_handle = ndev;
+
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "start npu failed, ret %d", ret);
+ goto disable_sva;
+ }
+
+ ret = aie2_mgmt_fw_query(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
+ goto stop_hw;
+ }
+ ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
+
+ xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
+ for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
+ xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
+ xrs_cfg.sys_eff_factor = 1;
+ xrs_cfg.ddev = &xdna->ddev;
+ xrs_cfg.actions = &aie2_xrs_actions;
+ xrs_cfg.total_col = ndev->total_col;
+
+ xdna->xrs_hdl = xrsm_init(&xrs_cfg);
+ if (!xdna->xrs_hdl) {
+ XDNA_ERR(xdna, "Initialize resolver failed");
+ ret = -EINVAL;
+ goto stop_hw;
+ }
+
+ ret = aie2_error_async_events_alloc(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
+ goto stop_hw;
+ }
+
+ ret = aie2_error_async_events_send(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
+ goto async_event_free;
+ }
+
+ /* Issue a command to make sure firmware handled async events */
+ ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
+ if (ret) {
+ XDNA_ERR(xdna, "Re-query firmware version failed");
+ goto async_event_free;
+ }
+
+ release_firmware(fw);
+ return 0;
+
+async_event_free:
+ aie2_error_async_events_free(ndev);
+stop_hw:
+ aie2_hw_stop(xdna);
+disable_sva:
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+free_irq:
+ pci_free_irq_vectors(pdev);
+release_fw:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static void aie2_fini(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+
+ aie2_hw_stop(xdna);
+ aie2_error_async_events_free(ndev);
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ pci_free_irq_vectors(pdev);
+}
+
+static int aie2_get_aie_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_status status;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret;
+
+ ndev = xdna->dev_handle;
+ if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
+ return -EFAULT;
+ }
+
+ if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
+ XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
+ status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
+ return -EINVAL;
+ }
+
+ ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
+ status.buffer_size, &status.cols_filled);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
+ return ret;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int aie2_get_aie_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_metadata *meta;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ meta->col_size = ndev->metadata.size;
+ meta->cols = ndev->metadata.cols;
+ meta->rows = ndev->metadata.rows;
+
+ meta->version.major = ndev->metadata.version.major;
+ meta->version.minor = ndev->metadata.version.minor;
+
+ meta->core.row_count = ndev->metadata.core.row_count;
+ meta->core.row_start = ndev->metadata.core.row_start;
+ meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
+ meta->core.lock_count = ndev->metadata.core.lock_count;
+ meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
+
+ meta->mem.row_count = ndev->metadata.mem.row_count;
+ meta->mem.row_start = ndev->metadata.mem.row_start;
+ meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
+ meta->mem.lock_count = ndev->metadata.mem.lock_count;
+ meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
+
+ meta->shim.row_count = ndev->metadata.shim.row_count;
+ meta->shim.row_start = ndev->metadata.shim.row_start;
+ meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
+ meta->shim.lock_count = ndev->metadata.shim.lock_count;
+ meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
+ ret = -EFAULT;
+
+ kfree(meta);
+ return ret;
+}
+
+static int aie2_get_aie_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ version.major = ndev->version.major;
+ version.minor = ndev->version.minor;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_firmware_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_firmware_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ version.major = xdna->fw_ver.major;
+ version.minor = xdna->fw_ver.minor;
+ version.patch = xdna->fw_ver.sub;
+ version.build = xdna->fw_ver.build;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_power_mode mode = {};
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ mode.power_mode = ndev->pw_mode;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_clock_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_clock_metadata *clock;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return -ENOMEM;
+
+ snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
+ "MP-NPU Clock");
+ clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
+ snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
+ clock->h_clock.freq_mhz = ndev->hclk_freq;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
+ ret = -EFAULT;
+
+ kfree(clock);
+ return ret;
+}
+
+static int aie2_get_hwctx_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_hwctx __user *buf;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drm_query_hwctx *tmp;
+ struct amdxdna_client *tmp_client;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ bool overflow = false;
+ u32 req_bytes = 0;
+ u32 hw_i = 0;
+ int ret = 0;
+ int idx;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ buf = u64_to_user_ptr(args->buffer);
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ idx = srcu_read_lock(&tmp_client->hwctx_srcu);
+ amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
+ req_bytes += sizeof(*tmp);
+ if (args->buffer_size < req_bytes) {
+ /* Continue iterating to get the required size */
+ overflow = true;
+ continue;
+ }
+
+ memset(tmp, 0, sizeof(*tmp));
+ tmp->pid = tmp_client->pid;
+ tmp->context_id = hwctx->id;
+ tmp->start_col = hwctx->start_col;
+ tmp->num_col = hwctx->num_col;
+ tmp->command_submissions = hwctx->priv->seq;
+ tmp->command_completions = hwctx->priv->completed;
+
+ if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
+ ret = -EFAULT;
+ srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
+ goto out;
+ }
+ hw_i++;
+ }
+ srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
+ }
+
+ if (overflow) {
+ XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
+ args->buffer_size, req_bytes);
+ ret = -EINVAL;
+ }
+
+out:
+ kfree(tmp);
+ args->buffer_size = req_bytes;
+ return ret;
+}
+
+static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_QUERY_AIE_STATUS:
+ ret = aie2_get_aie_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_METADATA:
+ ret = aie2_get_aie_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_VERSION:
+ ret = aie2_get_aie_version(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
+ ret = aie2_get_clock_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
+ ret = aie2_get_hwctx_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
+ ret = aie2_get_firmware_version(client, args);
+ break;
+ case DRM_AMDXDNA_GET_POWER_MODE:
+ ret = aie2_get_power_mode(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int aie2_set_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_drm_set_power_mode power_state;
+ enum amdxdna_power_mode_type power_mode;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
+ sizeof(power_state))) {
+ XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
+ return -EFAULT;
+ }
+
+ if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
+ return -EINVAL;
+
+ power_mode = power_state.power_mode;
+ if (power_mode > POWER_MODE_TURBO) {
+ XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
+ return -EINVAL;
+ }
+
+ return aie2_pm_set_mode(xdna->dev_handle, power_mode);
+}
+
+static int aie2_set_state(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_SET_POWER_MODE:
+ ret = aie2_set_power_mode(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+const struct amdxdna_dev_ops aie2_ops = {
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_start,
+ .suspend = aie2_hw_stop,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .cmd_submit = aie2_cmd_submit,
+ .hmm_invalidate = aie2_hmm_invalidate,
+ .hwctx_suspend = aie2_hwctx_suspend,
+ .hwctx_resume = aie2_hwctx_resume,
+};
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
new file mode 100644
index 000000000000..f2d95531ddc2
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_PCI_H_
+#define _AIE2_PCI_H_
+
+#include <drm/amdxdna_accel.h>
+#include <linux/semaphore.h>
+
+#include "amdxdna_mailbox.h"
+
+#define AIE2_INTERVAL 20000 /* us */
+#define AIE2_TIMEOUT 1000000 /* us */
+
+/* Firmware determines device memory base address and size */
+#define AIE2_DEVM_BASE 0x4000000
+#define AIE2_DEVM_SIZE SZ_64M
+
+#define NDEV2PDEV(ndev) (to_pci_dev((ndev)->xdna->ddev.dev))
+
+#define AIE2_SRAM_OFF(ndev, addr) ((addr) - (ndev)->priv->sram_dev_addr)
+#define AIE2_MBOX_OFF(ndev, addr) ((addr) - (ndev)->priv->mbox_dev_addr)
+
+#define PSP_REG_BAR(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].bar_idx)
+#define PSP_REG_OFF(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].offset)
+#define SRAM_REG_OFF(ndev, idx) ((ndev)->priv->sram_offs[(idx)].offset)
+
+#define SMU_REG(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->smu_base + (_ndev)->priv->smu_regs_off[(idx)].offset); \
+})
+#define SRAM_GET_ADDR(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->sram_base + SRAM_REG_OFF((_ndev), (idx))); \
+})
+
+#define CHAN_SLOT_SZ SZ_8K
+#define MBOX_SIZE(ndev) \
+({ \
+ typeof(ndev) _ndev = (ndev); \
+ ((_ndev)->priv->mbox_size) ? (_ndev)->priv->mbox_size : \
+ pci_resource_len(NDEV2PDEV(_ndev), (_ndev)->xdna->dev_info->mbox_bar); \
+})
+
+enum aie2_smu_reg_idx {
+ SMU_CMD_REG = 0,
+ SMU_ARG_REG,
+ SMU_INTR_REG,
+ SMU_RESP_REG,
+ SMU_OUT_REG,
+ SMU_MAX_REGS /* Keep this at the end */
+};
+
+enum aie2_sram_reg_idx {
+ MBOX_CHANN_OFF = 0,
+ FW_ALIVE_OFF,
+ SRAM_MAX_INDEX /* Keep this at the end */
+};
+
+enum psp_reg_idx {
+ PSP_CMD_REG = 0,
+ PSP_ARG0_REG,
+ PSP_ARG1_REG,
+ PSP_ARG2_REG,
+ PSP_NUM_IN_REGS, /* number of input registers */
+ PSP_INTR_REG = PSP_NUM_IN_REGS,
+ PSP_STATUS_REG,
+ PSP_RESP_REG,
+ PSP_MAX_REGS /* Keep this at the end */
+};
+
+struct amdxdna_client;
+struct amdxdna_fw_ver;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+struct psp_config {
+ const void *fw_buf;
+ u32 fw_size;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+struct aie_version {
+ u16 major;
+ u16 minor;
+};
+
+struct aie_tile_metadata {
+ u16 row_count;
+ u16 row_start;
+ u16 dma_channel_count;
+ u16 lock_count;
+ u16 event_reg_count;
+};
+
+struct aie_metadata {
+ u32 size;
+ u16 cols;
+ u16 rows;
+ struct aie_version version;
+ struct aie_tile_metadata core;
+ struct aie_tile_metadata mem;
+ struct aie_tile_metadata shim;
+};
+
+enum rt_config_category {
+ AIE2_RT_CFG_INIT,
+ AIE2_RT_CFG_CLK_GATING,
+};
+
+struct rt_config {
+ u32 type;
+ u32 value;
+ u32 category;
+};
+
+struct dpm_clk_freq {
+ u32 npuclk;
+ u32 hclk;
+};
+
+/*
+ * Define the maximum number of pending commands in a hardware context.
+ * Must be power of 2!
+ */
+#define HWCTX_MAX_CMDS 4
+#define get_job_idx(seq) ((seq) & (HWCTX_MAX_CMDS - 1))
+struct amdxdna_hwctx_priv {
+ struct amdxdna_gem_obj *heap;
+ void *mbox_chann;
+
+ struct drm_gpu_scheduler sched;
+ struct drm_sched_entity entity;
+
+ struct mutex io_lock; /* protect seq and cmd order */
+ struct wait_queue_head job_free_wq;
+ u32 num_pending;
+ u64 seq;
+ struct semaphore job_sem;
+ bool job_done;
+
+ /* Completed job counter */
+ u64 completed;
+
+ struct amdxdna_gem_obj *cmd_buf[HWCTX_MAX_CMDS];
+ struct drm_syncobj *syncobj;
+};
+
+enum aie2_dev_status {
+ AIE2_DEV_UNINIT,
+ AIE2_DEV_INIT,
+ AIE2_DEV_START,
+};
+
+struct amdxdna_dev_hdl {
+ struct amdxdna_dev *xdna;
+ const struct amdxdna_dev_priv *priv;
+ void __iomem *sram_base;
+ void __iomem *smu_base;
+ void __iomem *mbox_base;
+ struct psp_device *psp_hdl;
+
+ struct xdna_mailbox_chann_res mgmt_x2i;
+ struct xdna_mailbox_chann_res mgmt_i2x;
+ u32 mgmt_chan_idx;
+ u32 mgmt_prot_major;
+ u32 mgmt_prot_minor;
+
+ u32 total_col;
+ struct aie_version version;
+ struct aie_metadata metadata;
+
+ /* power management and clock*/
+ enum amdxdna_power_mode_type pw_mode;
+ u32 dpm_level;
+ u32 dft_dpm_level;
+ u32 max_dpm_level;
+ u32 clk_gating;
+ u32 npuclk_freq;
+ u32 hclk_freq;
+
+ /* Mailbox and the management channel */
+ struct mailbox *mbox;
+ struct mailbox_channel *mgmt_chann;
+ struct async_events *async_events;
+
+ enum aie2_dev_status dev_status;
+ u32 hwctx_num;
+};
+
+#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
+ [reg_name] = {bar##_BAR_INDEX, (reg_addr) - bar##_BAR_BASE}
+
+struct aie2_bar_off_pair {
+ int bar_idx;
+ u32 offset;
+};
+
+struct aie2_hw_ops {
+ int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+};
+
+struct amdxdna_dev_priv {
+ const char *fw_path;
+ u64 protocol_major;
+ u64 protocol_minor;
+ const struct rt_config *rt_config;
+ const struct dpm_clk_freq *dpm_clk_tbl;
+
+#define COL_ALIGN_NONE 0
+#define COL_ALIGN_NATURE 1
+ u32 col_align;
+ u32 mbox_dev_addr;
+ /* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */
+ u32 mbox_size;
+ u32 sram_dev_addr;
+ struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX];
+ struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS];
+ struct aie2_bar_off_pair smu_regs_off[SMU_MAX_REGS];
+ struct aie2_hw_ops hw_ops;
+};
+
+extern const struct amdxdna_dev_ops aie2_ops;
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val);
+
+/* aie2 npu hw config */
+extern const struct dpm_clk_freq npu1_dpm_clk_table[];
+extern const struct dpm_clk_freq npu4_dpm_clk_table[];
+extern const struct rt_config npu1_default_rt_cfg[];
+extern const struct rt_config npu4_default_rt_cfg[];
+
+/* aie2_smu.c */
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev);
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev);
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+
+/* aie2_pm.c */
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev);
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target);
+
+/* aie2_psp.c */
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf);
+int aie2_psp_start(struct psp_device *psp);
+void aie2_psp_stop(struct psp_device *psp);
+
+/* aie2_error.c */
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev);
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev);
+int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev);
+int aie2_error_async_msg_thread(void *data);
+
+/* aie2_message.c */
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value);
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid);
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version);
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata);
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver);
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, const u32 *, size_t));
+int aie2_config_cu(struct amdxdna_hwctx *hwctx);
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+
+/* aie2_hwctx.c */
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx);
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+void aie2_restart_ctx(struct amdxdna_client *client);
+
+#endif /* _AIE2_PCI_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pm.c b/drivers/accel/amdxdna/aie2_pm.c
new file mode 100644
index 000000000000..426c38fce848
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pm.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+
+#define AIE2_CLK_GATING_ENABLE 1
+#define AIE2_CLK_GATING_DISABLE 0
+
+static int aie2_pm_set_clk_gating(struct amdxdna_dev_hdl *ndev, u32 val)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, &val);
+ if (ret)
+ return ret;
+
+ ndev->clk_gating = val;
+ return 0;
+}
+
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ if (ndev->dev_status != AIE2_DEV_UNINIT) {
+ /* Resume device */
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, ndev->clk_gating);
+ if (ret)
+ return ret;
+
+ return 0;
+ }
+
+ while (ndev->priv->dpm_clk_tbl[ndev->max_dpm_level].hclk)
+ ndev->max_dpm_level++;
+ ndev->max_dpm_level--;
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->max_dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, AIE2_CLK_GATING_ENABLE);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = POWER_MODE_DEFAULT;
+ ndev->dft_dpm_level = ndev->max_dpm_level;
+
+ return 0;
+}
+
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 clk_gating, dpm_level;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ if (ndev->pw_mode == target)
+ return 0;
+
+ switch (target) {
+ case POWER_MODE_TURBO:
+ if (ndev->hwctx_num) {
+ XDNA_ERR(xdna, "Can not set turbo when there is active hwctx");
+ return -EINVAL;
+ }
+
+ clk_gating = AIE2_CLK_GATING_DISABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_HIGH:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_DEFAULT:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->dft_dpm_level;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, clk_gating);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = target;
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_psp.c b/drivers/accel/amdxdna/aie2_psp.c
new file mode 100644
index 000000000000..dc3a072ce3b6
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_psp.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+#define PSP_STATUS_READY BIT(31)
+
+/* PSP commands */
+#define PSP_VALIDATE 1
+#define PSP_START 2
+#define PSP_RELEASE_TMR 3
+
+/* PSP special arguments */
+#define PSP_START_COPY_FW 1
+
+/* PSP response error code */
+#define PSP_ERROR_CANCEL 0xFFFF0002
+#define PSP_ERROR_BAD_STATE 0xFFFF0007
+
+#define PSP_FW_ALIGN 0x10000
+#define PSP_POLL_INTERVAL 20000 /* us */
+#define PSP_POLL_TIMEOUT 1000000 /* us */
+
+#define PSP_REG(p, reg) ((p)->psp_regs[reg])
+
+struct psp_device {
+ struct drm_device *ddev;
+ struct psp_config conf;
+ u32 fw_buf_sz;
+ u64 fw_paddr;
+ void *fw_buffer;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+static int psp_exec(struct psp_device *psp, u32 *reg_vals)
+{
+ u32 resp_code;
+ int ret, i;
+ u32 ready;
+
+ /* Write command and argument registers */
+ for (i = 0; i < PSP_NUM_IN_REGS; i++)
+ writel(reg_vals[i], PSP_REG(psp, i));
+
+ /* clear and set PSP INTR register to kick off */
+ writel(0, PSP_REG(psp, PSP_INTR_REG));
+ writel(1, PSP_REG(psp, PSP_INTR_REG));
+
+ /* PSP should be busy. Wait for ready, so we know task is done. */
+ ret = readx_poll_timeout(readl, PSP_REG(psp, PSP_STATUS_REG), ready,
+ FIELD_GET(PSP_STATUS_READY, ready),
+ PSP_POLL_INTERVAL, PSP_POLL_TIMEOUT);
+ if (ret) {
+ drm_err(psp->ddev, "PSP is not ready, ret 0x%x", ret);
+ return ret;
+ }
+
+ resp_code = readl(PSP_REG(psp, PSP_RESP_REG));
+ if (resp_code) {
+ drm_err(psp->ddev, "fw return error 0x%x", resp_code);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void aie2_psp_stop(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS] = { PSP_RELEASE_TMR, };
+ int ret;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret)
+ drm_err(psp->ddev, "release tmr failed, ret %d", ret);
+}
+
+int aie2_psp_start(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS];
+ int ret;
+
+ reg_vals[0] = PSP_VALIDATE;
+ reg_vals[1] = lower_32_bits(psp->fw_paddr);
+ reg_vals[2] = upper_32_bits(psp->fw_paddr);
+ reg_vals[3] = psp->fw_buf_sz;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to validate fw, ret %d", ret);
+ return ret;
+ }
+
+ memset(reg_vals, 0, sizeof(reg_vals));
+ reg_vals[0] = PSP_START;
+ reg_vals[1] = PSP_START_COPY_FW;
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to start fw, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf)
+{
+ struct psp_device *psp;
+ u64 offset;
+
+ psp = drmm_kzalloc(ddev, sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return NULL;
+
+ psp->ddev = ddev;
+ memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
+
+ psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN) + PSP_FW_ALIGN;
+ psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz, GFP_KERNEL);
+ if (!psp->fw_buffer) {
+ drm_err(ddev, "no memory for fw buffer");
+ return NULL;
+ }
+
+ /*
+ * AMD Platform Security Processor(PSP) requires host physical
+ * address to load NPU firmware.
+ */
+ psp->fw_paddr = virt_to_phys(psp->fw_buffer);
+ offset = ALIGN(psp->fw_paddr, PSP_FW_ALIGN) - psp->fw_paddr;
+ psp->fw_paddr += offset;
+ memcpy(psp->fw_buffer + offset, conf->fw_buf, conf->fw_size);
+
+ return psp;
+}
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
new file mode 100644
index 000000000000..73388443c676
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+
+#define SMU_RESULT_OK 1
+
+/* SMU commands */
+#define AIE2_SMU_POWER_ON 0x3
+#define AIE2_SMU_POWER_OFF 0x4
+#define AIE2_SMU_SET_MPNPUCLK_FREQ 0x5
+#define AIE2_SMU_SET_HCLK_FREQ 0x6
+#define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7
+#define AIE2_SMU_SET_HARD_DPMLEVEL 0x8
+
+static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd,
+ u32 reg_arg, u32 *out)
+{
+ u32 resp;
+ int ret;
+
+ writel(0, SMU_REG(ndev, SMU_RESP_REG));
+ writel(reg_arg, SMU_REG(ndev, SMU_ARG_REG));
+ writel(reg_cmd, SMU_REG(ndev, SMU_CMD_REG));
+
+ /* Clear and set SMU_INTR_REG to kick off */
+ writel(0, SMU_REG(ndev, SMU_INTR_REG));
+ writel(1, SMU_REG(ndev, SMU_INTR_REG));
+
+ ret = readx_poll_timeout(readl, SMU_REG(ndev, SMU_RESP_REG), resp,
+ resp, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d timed out", reg_cmd);
+ return ret;
+ }
+
+ if (out)
+ *out = readl(SMU_REG(ndev, SMU_OUT_REG));
+
+ if (resp != SMU_RESULT_OK) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d failed, 0x%x", reg_cmd, resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ u32 freq;
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
+ }
+ ndev->npuclk_freq = freq;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
+ }
+ ndev->hclk_freq = freq;
+ ndev->dpm_level = dpm_level;
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+}
+
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
+ dpm_level, ret);
+ return ret;
+ }
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
+ dpm_level, ret);
+ return ret;
+ }
+
+ ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
+ ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
+ ndev->dpm_level = dpm_level;
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+}
+
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Power on failed, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ndev->priv->hw_ops.set_dpm(ndev, 0);
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL);
+ if (ret)
+ XDNA_ERR(ndev->xdna, "Power off failed, ret %d", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.c b/drivers/accel/amdxdna/aie2_solver.c
new file mode 100644
index 000000000000..2013d1f13aae
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+
+#include "aie2_solver.h"
+
+struct partition_node {
+ struct list_head list;
+ u32 nshared; /* # shared requests */
+ u32 start_col; /* start column */
+ u32 ncols; /* # columns */
+ bool exclusive; /* can not be shared if set */
+};
+
+struct solver_node {
+ struct list_head list;
+ u64 rid; /* Request ID from consumer */
+
+ struct partition_node *pt_node;
+ void *cb_arg;
+ u32 dpm_level;
+ u32 cols_len;
+ u32 start_cols[] __counted_by(cols_len);
+};
+
+struct solver_rgroup {
+ u32 rgid;
+ u32 nnode;
+ u32 npartition_node;
+
+ DECLARE_BITMAP(resbit, XRS_MAX_COL);
+ struct list_head node_list;
+ struct list_head pt_node_list;
+};
+
+struct solver_state {
+ struct solver_rgroup rgp;
+ struct init_config cfg;
+ struct xrs_action_ops *actions;
+};
+
+static u32 calculate_gops(struct aie_qos *rqos)
+{
+ u32 service_rate = 0;
+
+ if (rqos->latency)
+ service_rate = (1000 / rqos->latency);
+
+ if (rqos->fps > service_rate)
+ return rqos->fps * rqos->gops;
+
+ return service_rate * rqos->gops;
+}
+
+/*
+ * qos_meet() - Check the QOS request can be met.
+ */
+static int qos_meet(struct solver_state *xrs, struct aie_qos *rqos, u32 cgops)
+{
+ u32 request_gops = calculate_gops(rqos) * xrs->cfg.sys_eff_factor;
+
+ if (request_gops <= cgops)
+ return 0;
+
+ return -EINVAL;
+}
+
+/*
+ * sanity_check() - Do a basic sanity check on allocation request.
+ */
+static int sanity_check(struct solver_state *xrs, struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 cu_clk_freq;
+
+ if (cdop->ncols > xrs->cfg.total_col)
+ return -EINVAL;
+
+ /*
+ * We can find at least one CDOs groups that meet the
+ * GOPs requirement.
+ */
+ cu_clk_freq = xrs->cfg.clk_list.cu_clk_list[xrs->cfg.clk_list.num_levels - 1];
+
+ if (qos_meet(xrs, rqos, cdop->qos_cap.opc * cu_clk_freq / 1000))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool is_valid_qos_dpm_params(struct aie_qos *rqos)
+{
+ /*
+ * gops is retrieved from the xmodel, so it's always set
+ * fps and latency are the configurable params from the application
+ */
+ if (rqos->gops > 0 && (rqos->fps > 0 || rqos->latency > 0))
+ return true;
+
+ return false;
+}
+
+static int set_dpm_level(struct solver_state *xrs, struct alloc_requests *req, u32 *dpm_level)
+{
+ struct solver_rgroup *rgp = &xrs->rgp;
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 freq, max_dpm_level, level;
+ struct solver_node *node;
+
+ max_dpm_level = xrs->cfg.clk_list.num_levels - 1;
+ /* If no QoS parameters are passed, set it to the max DPM level */
+ if (!is_valid_qos_dpm_params(rqos)) {
+ level = max_dpm_level;
+ goto set_dpm;
+ }
+
+ /* Find one CDO group that meet the GOPs requirement. */
+ for (level = 0; level < max_dpm_level; level++) {
+ freq = xrs->cfg.clk_list.cu_clk_list[level];
+ if (!qos_meet(xrs, rqos, cdop->qos_cap.opc * freq / 1000))
+ break;
+ }
+
+ /* set the dpm level which fits all the sessions */
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->dpm_level > level)
+ level = node->dpm_level;
+ }
+
+set_dpm:
+ *dpm_level = level;
+ return xrs->cfg.actions->set_dft_dpm_level(xrs->cfg.ddev, level);
+}
+
+static struct solver_node *rg_search_node(struct solver_rgroup *rgp, u64 rid)
+{
+ struct solver_node *node;
+
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->rid == rid)
+ return node;
+ }
+
+ return NULL;
+}
+
+static void remove_partition_node(struct solver_rgroup *rgp,
+ struct partition_node *pt_node)
+{
+ pt_node->nshared--;
+ if (pt_node->nshared > 0)
+ return;
+
+ list_del(&pt_node->list);
+ rgp->npartition_node--;
+
+ bitmap_clear(rgp->resbit, pt_node->start_col, pt_node->ncols);
+ kfree(pt_node);
+}
+
+static void remove_solver_node(struct solver_rgroup *rgp,
+ struct solver_node *node)
+{
+ list_del(&node->list);
+ rgp->nnode--;
+
+ if (node->pt_node)
+ remove_partition_node(rgp, node->pt_node);
+
+ kfree(node);
+}
+
+static int get_free_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node;
+ u32 ncols = req->cdo.ncols;
+ u32 col, i;
+
+ for (i = 0; i < snode->cols_len; i++) {
+ col = snode->start_cols[i];
+ if (find_next_bit(xrs->rgp.resbit, XRS_MAX_COL, col) >= col + ncols)
+ break;
+ }
+
+ if (i == snode->cols_len)
+ return -ENODEV;
+
+ pt_node = kzalloc(sizeof(*pt_node), GFP_KERNEL);
+ if (!pt_node)
+ return -ENOMEM;
+
+ pt_node->nshared = 1;
+ pt_node->start_col = col;
+ pt_node->ncols = ncols;
+
+ /*
+ * Always set exclusive to false for now.
+ */
+ pt_node->exclusive = false;
+
+ list_add_tail(&pt_node->list, &xrs->rgp.pt_node_list);
+ xrs->rgp.npartition_node++;
+ bitmap_set(xrs->rgp.resbit, pt_node->start_col, pt_node->ncols);
+
+ snode->pt_node = pt_node;
+
+ return 0;
+}
+
+static int allocate_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node, *rpt_node = NULL;
+ int idx, ret;
+
+ ret = get_free_partition(xrs, snode, req);
+ if (!ret)
+ return ret;
+
+ /* try to get a share-able partition */
+ list_for_each_entry(pt_node, &xrs->rgp.pt_node_list, list) {
+ if (pt_node->exclusive)
+ continue;
+
+ if (rpt_node && pt_node->nshared >= rpt_node->nshared)
+ continue;
+
+ for (idx = 0; idx < snode->cols_len; idx++) {
+ if (snode->start_cols[idx] != pt_node->start_col)
+ continue;
+
+ if (req->cdo.ncols != pt_node->ncols)
+ continue;
+
+ rpt_node = pt_node;
+ break;
+ }
+ }
+
+ if (!rpt_node)
+ return -ENODEV;
+
+ rpt_node->nshared++;
+ snode->pt_node = rpt_node;
+
+ return 0;
+}
+
+static struct solver_node *create_solver_node(struct solver_state *xrs,
+ struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct solver_node *node;
+ int ret;
+
+ node = kzalloc(struct_size(node, start_cols, cdop->cols_len), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->rid = req->rid;
+ node->cols_len = cdop->cols_len;
+ memcpy(node->start_cols, cdop->start_cols, cdop->cols_len * sizeof(u32));
+
+ ret = allocate_partition(xrs, node, req);
+ if (ret)
+ goto free_node;
+
+ list_add_tail(&node->list, &xrs->rgp.node_list);
+ xrs->rgp.nnode++;
+ return node;
+
+free_node:
+ kfree(node);
+ return ERR_PTR(ret);
+}
+
+static void fill_load_action(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct xrs_action_load *action)
+{
+ action->rid = snode->rid;
+ action->part.start_col = snode->pt_node->start_col;
+ action->part.ncols = snode->pt_node->ncols;
+}
+
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg)
+{
+ struct xrs_action_load load_act;
+ struct solver_node *snode;
+ struct solver_state *xrs;
+ u32 dpm_level;
+ int ret;
+
+ xrs = (struct solver_state *)hdl;
+
+ ret = sanity_check(xrs, req);
+ if (ret) {
+ drm_err(xrs->cfg.ddev, "invalid request");
+ return ret;
+ }
+
+ if (rg_search_node(&xrs->rgp, req->rid)) {
+ drm_err(xrs->cfg.ddev, "rid %lld is in-use", req->rid);
+ return -EEXIST;
+ }
+
+ snode = create_solver_node(xrs, req);
+ if (IS_ERR(snode))
+ return PTR_ERR(snode);
+
+ fill_load_action(xrs, snode, &load_act);
+ ret = xrs->cfg.actions->load(cb_arg, &load_act);
+ if (ret)
+ goto free_node;
+
+ ret = set_dpm_level(xrs, req, &dpm_level);
+ if (ret)
+ goto free_node;
+
+ snode->dpm_level = dpm_level;
+ snode->cb_arg = cb_arg;
+
+ drm_dbg(xrs->cfg.ddev, "start col %d ncols %d\n",
+ snode->pt_node->start_col, snode->pt_node->ncols);
+
+ return 0;
+
+free_node:
+ remove_solver_node(&xrs->rgp, snode);
+
+ return ret;
+}
+
+int xrs_release_resource(void *hdl, u64 rid)
+{
+ struct solver_state *xrs = hdl;
+ struct solver_node *node;
+
+ node = rg_search_node(&xrs->rgp, rid);
+ if (!node) {
+ drm_err(xrs->cfg.ddev, "node not exist");
+ return -ENODEV;
+ }
+
+ xrs->cfg.actions->unload(node->cb_arg);
+ remove_solver_node(&xrs->rgp, node);
+
+ return 0;
+}
+
+void *xrsm_init(struct init_config *cfg)
+{
+ struct solver_rgroup *rgp;
+ struct solver_state *xrs;
+
+ xrs = drmm_kzalloc(cfg->ddev, sizeof(*xrs), GFP_KERNEL);
+ if (!xrs)
+ return NULL;
+
+ memcpy(&xrs->cfg, cfg, sizeof(*cfg));
+
+ rgp = &xrs->rgp;
+ INIT_LIST_HEAD(&rgp->node_list);
+ INIT_LIST_HEAD(&rgp->pt_node_list);
+
+ return xrs;
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.h b/drivers/accel/amdxdna/aie2_solver.h
new file mode 100644
index 000000000000..a2e3c52229e9
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_SOLVER_H
+#define _AIE2_SOLVER_H
+
+#define XRS_MAX_COL 128
+
+/*
+ * Structure used to describe a partition. A partition is column based
+ * allocation unit described by its start column and number of columns.
+ */
+struct aie_part {
+ u32 start_col;
+ u32 ncols;
+};
+
+/*
+ * The QoS capabilities of a given AIE partition.
+ */
+struct aie_qos_cap {
+ u32 opc; /* operations per cycle */
+ u32 dma_bw; /* DMA bandwidth */
+};
+
+/*
+ * QoS requirement of a resource allocation.
+ */
+struct aie_qos {
+ u32 gops; /* Giga operations */
+ u32 fps; /* Frames per second */
+ u32 dma_bw; /* DMA bandwidth */
+ u32 latency; /* Frame response latency */
+ u32 exec_time; /* Frame execution time */
+ u32 priority; /* Request priority */
+};
+
+/*
+ * Structure used to describe a relocatable CDO (Configuration Data Object).
+ */
+struct cdo_parts {
+ u32 *start_cols; /* Start column array */
+ u32 cols_len; /* Length of start column array */
+ u32 ncols; /* # of column */
+ struct aie_qos_cap qos_cap; /* CDO QoS capabilities */
+};
+
+/*
+ * Structure used to describe a request to allocate.
+ */
+struct alloc_requests {
+ u64 rid;
+ struct cdo_parts cdo;
+ struct aie_qos rqos; /* Requested QoS */
+};
+
+/*
+ * Load callback argument
+ */
+struct xrs_action_load {
+ u32 rid;
+ struct aie_part part;
+};
+
+/*
+ * Define the power level available
+ *
+ * POWER_LEVEL_MIN:
+ * Lowest power level. Usually set when all actions are unloaded.
+ *
+ * POWER_LEVEL_n
+ * Power levels 0 - n, is a step increase in system frequencies
+ */
+enum power_level {
+ POWER_LEVEL_MIN = 0x0,
+ POWER_LEVEL_0 = 0x1,
+ POWER_LEVEL_1 = 0x2,
+ POWER_LEVEL_2 = 0x3,
+ POWER_LEVEL_3 = 0x4,
+ POWER_LEVEL_4 = 0x5,
+ POWER_LEVEL_5 = 0x6,
+ POWER_LEVEL_6 = 0x7,
+ POWER_LEVEL_7 = 0x8,
+ POWER_LEVEL_NUM,
+};
+
+/*
+ * Structure used to describe the frequency table.
+ * Resource solver chooses the frequency from the table
+ * to meet the QOS requirements.
+ */
+struct clk_list_info {
+ u32 num_levels; /* available power levels */
+ u32 cu_clk_list[POWER_LEVEL_NUM]; /* available aie clock frequencies in Mhz*/
+};
+
+struct xrs_action_ops {
+ int (*load)(void *cb_arg, struct xrs_action_load *action);
+ int (*unload)(void *cb_arg);
+ int (*set_dft_dpm_level)(struct drm_device *ddev, u32 level);
+};
+
+/*
+ * Structure used to describe information for solver during initialization.
+ */
+struct init_config {
+ u32 total_col;
+ u32 sys_eff_factor; /* system efficiency factor */
+ u32 latency_adj; /* latency adjustment in ms */
+ struct clk_list_info clk_list; /* List of frequencies available in system */
+ struct drm_device *ddev;
+ struct xrs_action_ops *actions;
+};
+
+/*
+ * xrsm_init() - Register resource solver. Resource solver client needs
+ * to call this function to register itself.
+ *
+ * @cfg: The system metrics for resource solver to use
+ *
+ * Return: A resource solver handle
+ *
+ * Note: We should only create one handle per AIE array to be managed.
+ */
+void *xrsm_init(struct init_config *cfg);
+
+/*
+ * xrs_allocate_resource() - Request to allocate resources for a given context
+ * and a partition metadata. (See struct part_meta)
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @req: Input to the Resource solver including request id
+ * and partition metadata.
+ * @cb_arg: callback argument pointer
+ *
+ * Return: 0 when successful.
+ * Or standard error number when failing
+ *
+ * Note:
+ * There is no lock mechanism inside resource solver. So it is
+ * the caller's responsibility to lock down XCLBINs and grab
+ * necessary lock.
+ */
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg);
+
+/*
+ * xrs_release_resource() - Request to free resources for a given context.
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @rid: The Request ID to identify the requesting context
+ */
+int xrs_release_resource(void *hdl, u64 rid);
+#endif /* _AIE2_SOLVER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
new file mode 100644
index 000000000000..d11b1c83d9c3
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define MAX_HWCTX_ID 255
+#define MAX_ARG_COUNT 4095
+
+struct amdxdna_fence {
+ struct dma_fence base;
+ spinlock_t lock; /* for base */
+ struct amdxdna_hwctx *hwctx;
+};
+
+static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
+{
+ return KBUILD_MODNAME;
+}
+
+static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct amdxdna_fence *xdna_fence;
+
+ xdna_fence = container_of(fence, struct amdxdna_fence, base);
+
+ return xdna_fence->hwctx->name;
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = amdxdna_fence_get_driver_name,
+ .get_timeline_name = amdxdna_fence_get_timeline_name,
+};
+
+static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->hwctx = hwctx;
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
+ return &fence->base;
+}
+
+void amdxdna_hwctx_suspend(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
+ xdna->dev_info->ops->hwctx_suspend(hwctx);
+ mutex_unlock(&client->hwctx_lock);
+}
+
+void amdxdna_hwctx_resume(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
+ xdna->dev_info->ops->hwctx_resume(hwctx);
+ mutex_unlock(&client->hwctx_lock);
+}
+
+static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
+ struct srcu_struct *ss)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ synchronize_srcu(ss);
+
+ /* At this point, user is not able to submit new commands */
+ mutex_lock(&xdna->dev_lock);
+ xdna->dev_info->ops->hwctx_fini(hwctx);
+ mutex_unlock(&xdna->dev_lock);
+
+ kfree(hwctx->name);
+ kfree(hwctx);
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, count;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ num_masks = 0;
+ else
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+
+ if (size) {
+ count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
+ if (unlikely(count <= num_masks)) {
+ *size = 0;
+ return NULL;
+ }
+ *size = (count - num_masks) * sizeof(u32);
+ }
+ return &cmd->data[num_masks];
+}
+
+int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, i;
+ u32 *cu_mask;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ return -1;
+
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+ cu_mask = cmd->data;
+ for (i = 0; i < num_masks; i++) {
+ if (cu_mask[i])
+ return ffs(cu_mask[i]) - 1;
+ }
+
+ return -1;
+}
+
+/*
+ * This should be called in close() and remove(). DO NOT call in other syscalls.
+ * This guarantee that when hwctx and resources will be released, if user
+ * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
+ */
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ XDNA_DBG(client->xdna, "PID %d close HW context %d",
+ client->pid, hwctx->id);
+ xa_erase(&client->hwctx_xa, hwctx->id);
+ mutex_unlock(&client->hwctx_lock);
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+ mutex_lock(&client->hwctx_lock);
+ }
+ mutex_unlock(&client->hwctx_lock);
+}
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_create_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
+ if (!hwctx) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
+ XDNA_ERR(xdna, "Access QoS info failed");
+ ret = -EFAULT;
+ goto free_hwctx;
+ }
+
+ hwctx->client = client;
+ hwctx->fw_ctx_id = -1;
+ hwctx->num_tiles = args->num_tiles;
+ hwctx->mem_size = args->mem_size;
+ hwctx->max_opc = args->max_opc;
+ ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
+ XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
+ &client->next_hwctxid, GFP_KERNEL);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
+ goto free_hwctx;
+ }
+
+ hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
+ if (!hwctx->name) {
+ ret = -ENOMEM;
+ goto rm_id;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->hwctx_init(hwctx);
+ if (ret) {
+ mutex_unlock(&xdna->dev_lock);
+ XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ goto free_name;
+ }
+ args->handle = hwctx->id;
+ args->syncobj_handle = hwctx->syncobj_hdl;
+ mutex_unlock(&xdna->dev_lock);
+
+ XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
+ drm_dev_exit(idx);
+ return 0;
+
+free_name:
+ kfree(hwctx->name);
+rm_id:
+ xa_erase(&client->hwctx_xa, hwctx->id);
+free_hwctx:
+ kfree(hwctx);
+exit:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_destroy_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret = 0, idx;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ hwctx = xa_erase(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ ret = -EINVAL;
+ XDNA_DBG(xdna, "PID %d HW context %d not exist",
+ client->pid, args->handle);
+ goto out;
+ }
+
+ /*
+ * The pushed jobs are handled by DRM scheduler during destroy.
+ * SRCU to synchronize with exec command ioctls.
+ */
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+
+ XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
+out:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_config_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+ u32 buf_size;
+ void *buf;
+ u64 val;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!xdna->dev_info->ops->hwctx_config)
+ return -EOPNOTSUPP;
+
+ val = args->param_val;
+ buf_size = args->param_val_size;
+
+ switch (args->param_type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ /* For those types that param_val is pointer */
+ if (buf_size > PAGE_SIZE) {
+ XDNA_ERR(xdna, "Config CU param buffer too large");
+ return -E2BIG;
+ }
+
+ /* Hwctx needs to keep buf */
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ break;
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ /* For those types that param_val is a value */
+ buf = NULL;
+ buf_size = 0;
+ break;
+ default:
+ XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
+
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ mutex_unlock(&xdna->dev_lock);
+ kfree(buf);
+ return ret;
+}
+
+static void
+amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ if (!job->bos[i])
+ break;
+ drm_gem_object_put(job->bos[i]);
+ }
+}
+
+static int
+amdxdna_arg_bos_lookup(struct amdxdna_client *client,
+ struct amdxdna_sched_job *job,
+ u32 *bo_hdls, u32 bo_cnt)
+{
+ struct drm_gem_object *gobj;
+ int i, ret;
+
+ job->bo_cnt = bo_cnt;
+ for (i = 0; i < job->bo_cnt; i++) {
+ struct amdxdna_gem_obj *abo;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
+ if (!gobj) {
+ ret = -ENOENT;
+ goto put_shmem_bo;
+ }
+ abo = to_xdna_obj(gobj);
+
+ mutex_lock(&abo->lock);
+ if (abo->pinned) {
+ mutex_unlock(&abo->lock);
+ job->bos[i] = gobj;
+ continue;
+ }
+
+ ret = amdxdna_gem_pin_nolock(abo);
+ if (ret) {
+ mutex_unlock(&abo->lock);
+ drm_gem_object_put(gobj);
+ goto put_shmem_bo;
+ }
+ abo->pinned = true;
+ mutex_unlock(&abo->lock);
+
+ job->bos[i] = gobj;
+ }
+
+ return 0;
+
+put_shmem_bo:
+ amdxdna_arg_bos_put(job);
+ return ret;
+}
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
+{
+ trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
+ amdxdna_arg_bos_put(job);
+ amdxdna_gem_put_obj(job->cmd_bo);
+}
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_sched_job *job;
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
+ job = kzalloc(struct_size(job, bos, arg_bo_cnt), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
+ job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
+ if (!job->cmd_bo) {
+ XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
+ ret = -EINVAL;
+ goto free_job;
+ }
+ } else {
+ job->cmd_bo = NULL;
+ }
+
+ ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
+ if (ret) {
+ XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
+ goto cmd_put;
+ }
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
+ client->pid, hwctx_hdl);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ if (hwctx->status != HWCTX_STAT_READY) {
+ XDNA_ERR(xdna, "HW Context is not ready");
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ job->hwctx = hwctx;
+ job->mm = current->mm;
+
+ job->fence = amdxdna_fence_create(hwctx);
+ if (!job->fence) {
+ XDNA_ERR(xdna, "Failed to create fence");
+ ret = -ENOMEM;
+ goto unlock_srcu;
+ }
+ kref_init(&job->refcnt);
+
+ ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
+ if (ret)
+ goto put_fence;
+
+ /*
+ * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
+ * resource after synchronize_srcu(). The submitted jobs should be
+ * handled by the queue, for example DRM scheduler, in device layer.
+ * For here we can unlock SRCU.
+ */
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
+
+ return 0;
+
+put_fence:
+ dma_fence_put(job->fence);
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ amdxdna_arg_bos_put(job);
+cmd_put:
+ amdxdna_gem_put_obj(job->cmd_bo);
+free_job:
+ kfree(job);
+ return ret;
+}
+
+/*
+ * The submit command ioctl submits a command to firmware. One firmware command
+ * may contain multiple command BOs for processing as a whole.
+ * The command sequence number is returned which can be used for wait command ioctl.
+ */
+static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
+ struct amdxdna_drm_exec_cmd *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ u32 *arg_bo_hdls;
+ u32 cmd_bo_hdl;
+ int ret;
+
+ if (!args->arg_count || args->arg_count > MAX_ARG_COUNT) {
+ XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
+ return -EINVAL;
+ }
+
+ /* Only support single command for now. */
+ if (args->cmd_count != 1) {
+ XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
+ return -EINVAL;
+ }
+
+ cmd_bo_hdl = (u32)args->cmd_handles;
+ arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
+ if (!arg_bo_hdls)
+ return -ENOMEM;
+ ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
+ args->arg_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_cmd_bo_hdls;
+ }
+
+ ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
+ args->arg_count, args->hwctx, &args->seq);
+ if (ret)
+ XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
+
+free_cmd_bo_hdls:
+ kfree(arg_bo_hdls);
+ if (!ret)
+ XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
+ return ret;
+}
+
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_exec_cmd *args = data;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ switch (args->type) {
+ case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
+ return amdxdna_drm_submit_execbuf(client, args);
+ }
+
+ XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
+ return -EINVAL;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
new file mode 100644
index 000000000000..80b0304193ec
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_CTX_H_
+#define _AMDXDNA_CTX_H_
+
+#include <linux/bitfield.h>
+
+#include "amdxdna_gem.h"
+
+struct amdxdna_hwctx_priv;
+
+enum ert_cmd_opcode {
+ ERT_START_CU = 0,
+ ERT_CMD_CHAIN = 19,
+ ERT_START_NPU = 20,
+};
+
+enum ert_cmd_state {
+ ERT_CMD_STATE_INVALID,
+ ERT_CMD_STATE_NEW,
+ ERT_CMD_STATE_QUEUED,
+ ERT_CMD_STATE_RUNNING,
+ ERT_CMD_STATE_COMPLETED,
+ ERT_CMD_STATE_ERROR,
+ ERT_CMD_STATE_ABORT,
+ ERT_CMD_STATE_SUBMITTED,
+ ERT_CMD_STATE_TIMEOUT,
+ ERT_CMD_STATE_NORESPONSE,
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_START_NPU in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
+ */
+struct amdxdna_cmd_start_npu {
+ u64 buffer; /* instruction buffer address */
+ u32 buffer_size; /* size of buffer in bytes */
+ u32 prop_count; /* properties count */
+ u32 prop_args[]; /* properties and regular kernel arguments */
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_CMD_CHAIN in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is cmd BO handles.
+ */
+struct amdxdna_cmd_chain {
+ u32 command_count;
+ u32 submit_index;
+ u32 error_index;
+ u32 reserved[3];
+ u64 data[] __counted_by(command_count);
+};
+
+/* Exec buffer command header format */
+#define AMDXDNA_CMD_STATE GENMASK(3, 0)
+#define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
+#define AMDXDNA_CMD_COUNT GENMASK(22, 12)
+#define AMDXDNA_CMD_OPCODE GENMASK(27, 23)
+struct amdxdna_cmd {
+ u32 header;
+ u32 data[];
+};
+
+struct amdxdna_hwctx {
+ struct amdxdna_client *client;
+ struct amdxdna_hwctx_priv *priv;
+ char *name;
+
+ u32 id;
+ u32 max_opc;
+ u32 num_tiles;
+ u32 mem_size;
+ u32 fw_ctx_id;
+ u32 col_list_len;
+ u32 *col_list;
+ u32 start_col;
+ u32 num_col;
+#define HWCTX_STAT_INIT 0
+#define HWCTX_STAT_READY 1
+#define HWCTX_STAT_STOP 2
+ u32 status;
+ u32 old_status;
+
+ struct amdxdna_qos_info qos;
+ struct amdxdna_hwctx_param_config_cu *cus;
+ u32 syncobj_hdl;
+};
+
+#define drm_job_to_xdna_job(j) \
+ container_of(j, struct amdxdna_sched_job, base)
+
+struct amdxdna_sched_job {
+ struct drm_sched_job base;
+ struct kref refcnt;
+ struct amdxdna_hwctx *hwctx;
+ struct mm_struct *mm;
+ /* The fence to notice DRM scheduler that job is done by hardware */
+ struct dma_fence *fence;
+ /* user can wait on this fence */
+ struct dma_fence *out_fence;
+ bool job_done;
+ u64 seq;
+ struct amdxdna_gem_obj *cmd_bo;
+ size_t bo_cnt;
+ struct drm_gem_object *bos[] __counted_by(bo_cnt);
+};
+
+static inline u32
+amdxdna_cmd_get_op(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_OPCODE, cmd->header);
+}
+
+static inline void
+amdxdna_cmd_set_state(struct amdxdna_gem_obj *abo, enum ert_cmd_state s)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ cmd->header &= ~AMDXDNA_CMD_STATE;
+ cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, s);
+}
+
+static inline enum ert_cmd_state
+amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_STATE, cmd->header);
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
+int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
+
+static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
+{
+ return GENMASK(hwctx->start_col + hwctx->num_col - 1,
+ hwctx->start_col);
+}
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
+void amdxdna_hwctx_suspend(struct amdxdna_client *client);
+void amdxdna_hwctx_resume(struct amdxdna_client *client);
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq);
+
+int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
+ u64 seq, u32 timeout);
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_CTX_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
new file mode 100644
index 000000000000..606433d73236
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iosys-map.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+
+static int
+amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
+{
+ struct amdxdna_client *client = abo->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_mem *mem = &abo->mem;
+ u64 offset;
+ u32 align;
+ int ret;
+
+ align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
+ ret = drm_mm_insert_node_generic(&abo->dev_heap->mm, &abo->mm_node,
+ mem->size, align,
+ 0, DRM_MM_INSERT_BEST);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ return ret;
+ }
+
+ mem->dev_addr = abo->mm_node.start;
+ offset = mem->dev_addr - abo->dev_heap->mem.dev_addr;
+ mem->userptr = abo->dev_heap->mem.userptr + offset;
+ mem->pages = &abo->dev_heap->base.pages[offset >> PAGE_SHIFT];
+ mem->nr_pages = mem->size >> PAGE_SHIFT;
+
+ if (use_vmap) {
+ mem->kva = vmap(mem->pages, mem->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!mem->kva) {
+ XDNA_ERR(xdna, "Failed to vmap");
+ drm_mm_remove_node(&abo->mm_node);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ if (abo->type == AMDXDNA_BO_DEV) {
+ mutex_lock(&abo->client->mm_lock);
+ drm_mm_remove_node(&abo->mm_node);
+ mutex_unlock(&abo->client->mm_lock);
+
+ vunmap(abo->mem.kva);
+ drm_gem_object_put(to_gobj(abo->dev_heap));
+ drm_gem_object_release(gobj);
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+ return;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV_HEAP)
+ drm_mm_takedown(&abo->mm);
+
+ drm_gem_vunmap_unlocked(gobj, &map);
+ mutex_destroy(&abo->lock);
+ drm_gem_shmem_free(&abo->base);
+}
+
+static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
+ .free = amdxdna_gem_obj_free,
+};
+
+static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
+ mem.notifier);
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+
+ XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
+ abo->mem.userptr, abo->mem.size, abo->type);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
+ .invalidate = amdxdna_hmm_invalidate,
+};
+
+static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+
+ if (!xdna->dev_info->ops->hmm_invalidate)
+ return;
+
+ mmu_interval_notifier_remove(&abo->mem.notifier);
+ kvfree(abo->mem.pfns);
+ abo->mem.pfns = NULL;
+}
+
+static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
+ size_t len)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ u32 nr_pages;
+ int ret;
+
+ if (!xdna->dev_info->ops->hmm_invalidate)
+ return 0;
+
+ if (abo->mem.pfns)
+ return -EEXIST;
+
+ nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
+ abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
+ GFP_KERNEL);
+ if (!abo->mem.pfns)
+ return -ENOMEM;
+
+ ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
+ current->mm,
+ addr,
+ len,
+ &amdxdna_hmm_ops);
+ if (ret) {
+ XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
+ kvfree(abo->mem.pfns);
+ }
+ abo->mem.userptr = addr;
+
+ return ret;
+}
+
+static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ unsigned long num_pages;
+ int ret;
+
+ ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret)
+ goto hmm_unreg;
+
+ num_pages = gobj->size >> PAGE_SHIFT;
+ /* Try to insert the pages */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
+ if (ret)
+ XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
+
+ return 0;
+
+hmm_unreg:
+ amdxdna_hmm_unregister(abo);
+ return ret;
+}
+
+static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
+{
+ return drm_gem_shmem_vm_ops.fault(vmf);
+}
+
+static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
+{
+ drm_gem_shmem_vm_ops.open(vma);
+}
+
+static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gobj = vma->vm_private_data;
+
+ amdxdna_hmm_unregister(to_xdna_obj(gobj));
+ drm_gem_shmem_vm_ops.close(vma);
+}
+
+static const struct vm_operations_struct amdxdna_gem_vm_ops = {
+ .fault = amdxdna_gem_vm_fault,
+ .open = amdxdna_gem_vm_open,
+ .close = amdxdna_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
+ .free = amdxdna_gem_obj_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = amdxdna_gem_obj_mmap,
+ .vm_ops = &amdxdna_gem_vm_ops,
+};
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = kzalloc(sizeof(*abo), GFP_KERNEL);
+ if (!abo)
+ return ERR_PTR(-ENOMEM);
+
+ abo->pinned = false;
+ abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
+ mutex_init(&abo->lock);
+
+ abo->mem.userptr = AMDXDNA_INVALID_ADDR;
+ abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
+ abo->mem.size = size;
+
+ return abo;
+}
+
+/* For drm_driver->gem_create_object callback */
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_create_obj(dev, size);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
+
+ to_gobj(abo)->funcs = &amdxdna_gem_shmem_funcs;
+
+ return to_gobj(abo);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_alloc_shmem(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct drm_gem_shmem_object *shmem;
+ struct amdxdna_gem_obj *abo;
+
+ shmem = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+
+ abo = to_xdna_obj(&shmem->base);
+ abo->client = client;
+ abo->type = AMDXDNA_BO_SHMEM;
+
+ return abo;
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_dev_heap(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct drm_gem_shmem_object *shmem;
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->size > xdna->dev_info->dev_mem_size) {
+ XDNA_DBG(xdna, "Invalid dev heap size 0x%llx, limit 0x%lx",
+ args->size, xdna->dev_info->dev_mem_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&client->mm_lock);
+ if (client->dev_heap) {
+ XDNA_DBG(client->xdna, "dev heap is already created");
+ ret = -EBUSY;
+ goto mm_unlock;
+ }
+
+ shmem = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
+ goto mm_unlock;
+ }
+
+ shmem->map_wc = false;
+ abo = to_xdna_obj(&shmem->base);
+
+ abo->type = AMDXDNA_BO_DEV_HEAP;
+ abo->client = client;
+ abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
+ drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
+
+ client->dev_heap = abo;
+ drm_gem_object_get(to_gobj(abo));
+ mutex_unlock(&client->mm_lock);
+
+ return abo;
+
+mm_unlock:
+ mutex_unlock(&client->mm_lock);
+ return ERR_PTR(ret);
+}
+
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp, bool use_vmap)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+ struct amdxdna_gem_obj *abo, *heap;
+ int ret;
+
+ mutex_lock(&client->mm_lock);
+ heap = client->dev_heap;
+ if (!heap) {
+ ret = -EINVAL;
+ goto mm_unlock;
+ }
+
+ if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
+ XDNA_ERR(xdna, "Invalid dev heap userptr");
+ ret = -EINVAL;
+ goto mm_unlock;
+ }
+
+ if (args->size > heap->mem.size) {
+ XDNA_ERR(xdna, "Invalid dev bo size 0x%llx, limit 0x%lx",
+ args->size, heap->mem.size);
+ ret = -EINVAL;
+ goto mm_unlock;
+ }
+
+ abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto mm_unlock;
+ }
+ to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
+ abo->type = AMDXDNA_BO_DEV;
+ abo->client = client;
+ abo->dev_heap = heap;
+ ret = amdxdna_gem_insert_node_locked(abo, use_vmap);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ goto mm_unlock;
+ }
+
+ drm_gem_object_get(to_gobj(heap));
+ drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
+
+ mutex_unlock(&client->mm_lock);
+ return abo;
+
+mm_unlock:
+ mutex_unlock(&client->mm_lock);
+ return ERR_PTR(ret);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_cmd_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct drm_gem_shmem_object *shmem;
+ struct amdxdna_gem_obj *abo;
+ struct iosys_map map;
+ int ret;
+
+ if (args->size > XDNA_MAX_CMD_BO_SIZE) {
+ XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (args->size < sizeof(struct amdxdna_cmd)) {
+ XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ shmem = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+ abo = to_xdna_obj(&shmem->base);
+
+ abo->type = AMDXDNA_BO_CMD;
+ abo->client = filp->driver_priv;
+
+ ret = drm_gem_vmap_unlocked(to_gobj(abo), &map);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
+ goto release_obj;
+ }
+ abo->mem.kva = map.vaddr;
+
+ return abo;
+
+release_obj:
+ drm_gem_shmem_free(shmem);
+ return ERR_PTR(ret);
+}
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_create_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->flags || args->vaddr || !args->size)
+ return -EINVAL;
+
+ XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
+ args->type, args->vaddr, args->size, args->flags);
+ switch (args->type) {
+ case AMDXDNA_BO_SHMEM:
+ abo = amdxdna_drm_alloc_shmem(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV_HEAP:
+ abo = amdxdna_drm_create_dev_heap(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV:
+ abo = amdxdna_drm_alloc_dev_bo(dev, args, filp, false);
+ break;
+ case AMDXDNA_BO_CMD:
+ abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (IS_ERR(abo))
+ return PTR_ERR(abo);
+
+ /* ready to publish object to userspace */
+ ret = drm_gem_handle_create(filp, to_gobj(abo), &args->handle);
+ if (ret) {
+ XDNA_ERR(xdna, "Create handle failed");
+ goto put_obj;
+ }
+
+ XDNA_DBG(xdna, "BO hdl %d type %d userptr 0x%llx xdna_addr 0x%llx size 0x%lx",
+ args->handle, args->type, abo->mem.userptr,
+ abo->mem.dev_addr, abo->mem.size);
+put_obj:
+ /* Dereference object reference. Handle holds it now. */
+ drm_gem_object_put(to_gobj(abo));
+ return ret;
+}
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ int ret;
+
+ switch (abo->type) {
+ case AMDXDNA_BO_SHMEM:
+ case AMDXDNA_BO_DEV_HEAP:
+ ret = drm_gem_shmem_pin(&abo->base);
+ break;
+ case AMDXDNA_BO_DEV:
+ ret = drm_gem_shmem_pin(&abo->dev_heap->base);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
+ return ret;
+}
+
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
+{
+ int ret;
+
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->dev_heap;
+
+ mutex_lock(&abo->lock);
+ ret = amdxdna_gem_pin_nolock(abo);
+ mutex_unlock(&abo->lock);
+
+ return ret;
+}
+
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
+{
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->dev_heap;
+
+ mutex_lock(&abo->lock);
+ drm_gem_shmem_unpin(&abo->base);
+ mutex_unlock(&abo->lock);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdl);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Can not find bo %d", bo_hdl);
+ return NULL;
+ }
+
+ abo = to_xdna_obj(gobj);
+ if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type)
+ return abo;
+
+ drm_gem_object_put(gobj);
+ return NULL;
+}
+
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_drm_get_bo_info *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret = 0;
+
+ if (args->ext || args->ext_flags || args->pad)
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Lookup GEM object %d failed", args->handle);
+ return -ENOENT;
+ }
+
+ abo = to_xdna_obj(gobj);
+ args->vaddr = abo->mem.userptr;
+ args->xdna_addr = abo->mem.dev_addr;
+
+ if (abo->type != AMDXDNA_BO_DEV)
+ args->map_offset = drm_vma_node_offset_addr(&gobj->vma_node);
+ else
+ args->map_offset = AMDXDNA_INVALID_ADDR;
+
+ XDNA_DBG(xdna, "BO hdl %d map_offset 0x%llx vaddr 0x%llx xdna_addr 0x%llx",
+ args->handle, args->map_offset, args->vaddr, args->xdna_addr);
+
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
+/*
+ * The sync bo ioctl is to make sure the CPU cache is in sync with memory.
+ * This is required because NPU is not cache coherent device. CPU cache
+ * flushing/invalidation is expensive so it is best to handle this outside
+ * of the command submission path. This ioctl allows explicit cache
+ * flushing/invalidation outside of the critical path.
+ */
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_sync_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -ENOENT;
+ }
+ abo = to_xdna_obj(gobj);
+
+ ret = amdxdna_gem_pin(abo);
+ if (ret) {
+ XDNA_ERR(xdna, "Pin BO %d failed, ret %d", args->handle, ret);
+ goto put_obj;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV)
+ drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
+ else
+ drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
+
+ amdxdna_gem_unpin(abo);
+
+ XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
+ args->handle, args->offset, args->size);
+
+put_obj:
+ drm_gem_object_put(gobj);
+ return ret;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
new file mode 100644
index 000000000000..8ccc0375dd9d
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_GEM_H_
+#define _AMDXDNA_GEM_H_
+
+struct amdxdna_mem {
+ u64 userptr;
+ void *kva;
+ u64 dev_addr;
+ size_t size;
+ struct page **pages;
+ u32 nr_pages;
+ struct mmu_interval_notifier notifier;
+ unsigned long *pfns;
+ bool map_invalid;
+};
+
+struct amdxdna_gem_obj {
+ struct drm_gem_shmem_object base;
+ struct amdxdna_client *client;
+ u8 type;
+ bool pinned;
+ struct mutex lock; /* Protects: pinned */
+ struct amdxdna_mem mem;
+
+ /* Below members is uninitialized when needed */
+ struct drm_mm mm; /* For AMDXDNA_BO_DEV_HEAP */
+ struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
+ struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
+ u32 assigned_hwctx;
+};
+
+#define to_gobj(obj) (&(obj)->base.base)
+
+static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
+{
+ return container_of(gobj, struct amdxdna_gem_obj, base.base);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type);
+static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
+{
+ drm_gem_object_put(to_gobj(abo));
+}
+
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp, bool use_vmap);
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo);
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo);
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo);
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_GEM_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
new file mode 100644
index 000000000000..814b16bb1953
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/xarray.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_mailbox.h"
+
+#define MB_ERR(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_DBG(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_WARN_ONCE(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+
+#define MAGIC_VAL 0x1D000000U
+#define MAGIC_VAL_MASK 0xFF000000
+#define MAX_MSG_ID_ENTRIES 256
+#define MSG_RX_TIMER 200 /* milliseconds */
+#define MAILBOX_NAME "xdna_mailbox"
+
+enum channel_res_type {
+ CHAN_RES_X2I,
+ CHAN_RES_I2X,
+ CHAN_RES_NUM
+};
+
+struct mailbox {
+ struct device *dev;
+ struct xdna_mailbox_res res;
+};
+
+struct mailbox_channel {
+ struct mailbox *mb;
+ struct xdna_mailbox_chann_res res[CHAN_RES_NUM];
+ int msix_irq;
+ u32 iohub_int_addr;
+ struct xarray chan_xa;
+ u32 next_msgid;
+ u32 x2i_tail;
+
+ /* Received msg related fields */
+ struct workqueue_struct *work_q;
+ struct work_struct rx_work;
+ u32 i2x_head;
+ bool bad_state;
+};
+
+#define MSG_BODY_SZ GENMASK(10, 0)
+#define MSG_PROTO_VER GENMASK(23, 16)
+struct xdna_msg_header {
+ __u32 total_size;
+ __u32 sz_ver;
+ __u32 id;
+ __u32 opcode;
+} __packed;
+
+static_assert(sizeof(struct xdna_msg_header) == 16);
+
+struct mailbox_pkg {
+ struct xdna_msg_header header;
+ __u32 payload[];
+};
+
+/* The protocol version. */
+#define MSG_PROTOCOL_VERSION 0x1
+/* The tombstone value. */
+#define TOMBSTONE 0xDEADFACE
+
+struct mailbox_msg {
+ void *handle;
+ int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ size_t pkg_size; /* package size in bytes */
+ struct mailbox_pkg pkg;
+};
+
+static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ writel(data, ringbuf_addr);
+}
+
+static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ return readl(ringbuf_addr);
+}
+
+static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+ int ret, value;
+
+ /* Poll till value is not zero */
+ ret = readx_poll_timeout(readl, ringbuf_addr, value,
+ value, 1 /* us */, 100);
+ if (ret < 0)
+ return ret;
+
+ *val = value;
+ return 0;
+}
+
+static inline void
+mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
+ mb_chann->i2x_head = headptr_val;
+}
+
+static inline void
+mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
+ mb_chann->x2i_tail = tailptr_val;
+}
+
+static inline u32
+mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
+}
+
+static inline u32
+mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
+}
+
+static inline u32
+mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mb_chann->res[type].rb_size;
+}
+
+static inline int mailbox_validate_msgid(int msg_id)
+{
+ return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
+}
+
+static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ u32 msg_id;
+ int ret;
+
+ ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
+ XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
+ &mb_chann->next_msgid, GFP_NOWAIT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Add MAGIC_VAL to the higher bits.
+ */
+ msg_id |= MAGIC_VAL;
+ return msg_id;
+}
+
+static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
+{
+ msg_id &= ~MAGIC_VAL_MASK;
+ xa_erase_irq(&mb_chann->chan_xa, msg_id);
+}
+
+static void mailbox_release_msg(struct mailbox_channel *mb_chann,
+ struct mailbox_msg *mb_msg)
+{
+ MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
+ mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
+ mb_msg->notify_cb(mb_msg->handle, NULL, 0);
+ kfree(mb_msg);
+}
+
+static int
+mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ void __iomem *write_addr;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ u32 tmp_tail;
+
+ head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
+ tail = mb_chann->x2i_tail;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);
+ start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
+ tmp_tail = tail + mb_msg->pkg_size;
+
+ if (tail < head && tmp_tail >= head)
+ goto no_space;
+
+ if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&
+ mb_msg->pkg_size >= head))
+ goto no_space;
+
+ if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ writel(TOMBSTONE, write_addr);
+
+ /* tombstone is set. Write from the start of the ringbuf */
+ tail = 0;
+ }
+
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
+ mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
+
+ trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
+ mb_msg->pkg.header.opcode,
+ mb_msg->pkg.header.id);
+
+ return 0;
+
+no_space:
+ return -ENOSPC;
+}
+
+static int
+mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
+ void *data)
+{
+ struct mailbox_msg *mb_msg;
+ int msg_id;
+ int ret;
+
+ msg_id = header->id;
+ if (!mailbox_validate_msgid(msg_id)) {
+ MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ msg_id &= ~MAGIC_VAL_MASK;
+ mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
+ if (!mb_msg) {
+ MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+ ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
+ if (unlikely(ret))
+ MB_ERR(mb_chann, "Message callback ret %d", ret);
+
+ kfree(mb_msg);
+ return ret;
+}
+
+static int mailbox_get_msg(struct mailbox_channel *mb_chann)
+{
+ struct xdna_msg_header header;
+ void __iomem *read_addr;
+ u32 msg_size, rest;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ int ret;
+
+ if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))
+ return -EINVAL;
+ head = mb_chann->i2x_head;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
+ start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
+
+ if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
+ return -EINVAL;
+ }
+
+ /* ringbuf empty */
+ if (head == tail)
+ return -ENOENT;
+
+ if (head == ringbuf_size)
+ head = 0;
+
+ /* Peek size of the message or TOMBSTONE */
+ read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
+ header.total_size = readl(read_addr);
+ /* size is TOMBSTONE, set next read from 0 */
+ if (header.total_size == TOMBSTONE) {
+ if (head < tail) {
+ MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
+ head, tail);
+ return -EINVAL;
+ }
+ mailbox_set_headptr(mb_chann, 0);
+ return 0;
+ }
+
+ if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
+ return -EINVAL;
+ }
+ msg_size = sizeof(header) + header.total_size;
+
+ if (msg_size > ringbuf_size - head || msg_size > tail - head) {
+ MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
+ msg_size, tail, head);
+ return -EINVAL;
+ }
+
+ rest = sizeof(header) - sizeof(u32);
+ read_addr += sizeof(u32);
+ memcpy_fromio((u32 *)&header + 1, read_addr, rest);
+ read_addr += rest;
+
+ ret = mailbox_get_resp(mb_chann, &header, (u32 *)read_addr);
+
+ mailbox_set_headptr(mb_chann, head + msg_size);
+ /* After update head, it can equal to ringbuf_size. This is expected. */
+ trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
+ header.opcode, header.id);
+
+ return ret;
+}
+
+static irqreturn_t mailbox_irq_handler(int irq, void *p)
+{
+ struct mailbox_channel *mb_chann = p;
+
+ trace_mbox_irq_handle(MAILBOX_NAME, irq);
+ /* Schedule a rx_work to call the callback functions */
+ queue_work(mb_chann->work_q, &mb_chann->rx_work);
+ /* Clear IOHUB register */
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void mailbox_rx_worker(struct work_struct *rx_work)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state, work aborted");
+ return;
+ }
+
+ while (1) {
+ /*
+ * If return is 0, keep consuming next message, until there is
+ * no messages or an error happened.
+ */
+ ret = mailbox_get_msg(mb_chann);
+ if (ret == -ENOENT)
+ break;
+
+ /* Other error means device doesn't look good, disable irq. */
+ if (unlikely(ret)) {
+ MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
+ WRITE_ONCE(mb_chann->bad_state, true);
+ disable_irq(mb_chann->msix_irq);
+ break;
+ }
+ }
+}
+
+int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout)
+{
+ struct xdna_msg_header *header;
+ struct mailbox_msg *mb_msg;
+ size_t pkg_size;
+ int ret;
+
+ pkg_size = sizeof(*header) + msg->send_size;
+ if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
+ MB_ERR(mb_chann, "Message size larger than ringbuf size");
+ return -EINVAL;
+ }
+
+ if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
+ MB_ERR(mb_chann, "Message must be 4 bytes align");
+ return -EINVAL;
+ }
+
+ /* The fist word in payload can NOT be TOMBSTONE */
+ if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
+ MB_ERR(mb_chann, "Tomb stone in data");
+ return -EINVAL;
+ }
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state");
+ return -EPIPE;
+ }
+
+ mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
+ if (!mb_msg)
+ return -ENOMEM;
+
+ mb_msg->handle = msg->handle;
+ mb_msg->notify_cb = msg->notify_cb;
+ mb_msg->pkg_size = pkg_size;
+
+ header = &mb_msg->pkg.header;
+ /*
+ * Hardware use total_size and size to split huge message.
+ * We do not support it here. Thus the values are the same.
+ */
+ header->total_size = msg->send_size;
+ header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
+ FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
+ header->opcode = msg->opcode;
+ memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
+
+ ret = mailbox_acquire_msgid(mb_chann, mb_msg);
+ if (unlikely(ret < 0)) {
+ MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
+ goto msg_id_failed;
+ }
+ header->id = ret;
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+
+ ret = mailbox_send_msg(mb_chann, mb_msg);
+ if (ret) {
+ MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
+ goto release_id;
+ }
+
+ return 0;
+
+release_id:
+ mailbox_release_msgid(mb_chann, header->id);
+msg_id_failed:
+ kfree(mb_msg);
+ return ret;
+}
+
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mb,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 iohub_int_addr,
+ int mb_irq)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
+ pr_err("Ring buf size must be power of 2");
+ return NULL;
+ }
+
+ mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
+ if (!mb_chann)
+ return NULL;
+
+ mb_chann->mb = mb;
+ mb_chann->msix_irq = mb_irq;
+ mb_chann->iohub_int_addr = iohub_int_addr;
+ memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
+ memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
+
+ xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
+ mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
+
+ INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
+ mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
+ if (!mb_chann->work_q) {
+ MB_ERR(mb_chann, "Create workqueue failed");
+ goto free_and_out;
+ }
+
+ /* Everything look good. Time to enable irq handler */
+ ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
+ if (ret) {
+ MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
+ goto destroy_wq;
+ }
+
+ mb_chann->bad_state = false;
+
+ MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
+ return mb_chann;
+
+destroy_wq:
+ destroy_workqueue(mb_chann->work_q);
+free_and_out:
+ kfree(mb_chann);
+ return NULL;
+}
+
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
+{
+ struct mailbox_msg *mb_msg;
+ unsigned long msg_id;
+
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+ free_irq(mb_chann->msix_irq, mb_chann);
+ destroy_workqueue(mb_chann->work_q);
+ /* We can clean up and release resources */
+
+ xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
+ mailbox_release_msg(mb_chann, mb_msg);
+
+ xa_destroy(&mb_chann->chan_xa);
+
+ MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
+ kfree(mb_chann);
+ return 0;
+}
+
+void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
+{
+ /* Disable an irq and wait. This might sleep. */
+ disable_irq(mb_chann->msix_irq);
+
+ /* Cancel RX work and wait for it to finish */
+ cancel_work_sync(&mb_chann->rx_work);
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+}
+
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res)
+{
+ struct mailbox *mb;
+
+ mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return NULL;
+ mb->dev = ddev->dev;
+
+ /* mailbox and ring buf base and size information */
+ memcpy(&mb->res, res, sizeof(*res));
+
+ return mb;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.h b/drivers/accel/amdxdna/amdxdna_mailbox.h
new file mode 100644
index 000000000000..57954c303bdd
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MAILBOX_H_
+#define _AIE2_MAILBOX_H_
+
+struct mailbox;
+struct mailbox_channel;
+
+/*
+ * xdna_mailbox_msg - message struct
+ *
+ * @opcode: opcode for firmware
+ * @handle: handle used for the notify callback
+ * @notify_cb: callback function to notify the sender when there is response
+ * @send_data: pointing to sending data
+ * @send_size: size of the sending data
+ *
+ * The mailbox will split the sending data in to multiple firmware message if
+ * the size of the data is too big. This is transparent to the sender. The
+ * sender will receive one notification.
+ */
+struct xdna_mailbox_msg {
+ u32 opcode;
+ void *handle;
+ int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ u8 *send_data;
+ size_t send_size;
+};
+
+/*
+ * xdna_mailbox_res - mailbox hardware resource
+ *
+ * @ringbuf_base: ring buffer base address
+ * @ringbuf_size: ring buffer size
+ * @mbox_base: mailbox base address
+ * @mbox_size: mailbox size
+ */
+struct xdna_mailbox_res {
+ void __iomem *ringbuf_base;
+ size_t ringbuf_size;
+ void __iomem *mbox_base;
+ size_t mbox_size;
+ const char *name;
+};
+
+/*
+ * xdna_mailbox_chann_res - resources
+ *
+ * @rb_start_addr: ring buffer start address
+ * @rb_size: ring buffer size
+ * @mb_head_ptr_reg: mailbox head pointer register
+ * @mb_tail_ptr_reg: mailbox tail pointer register
+ */
+struct xdna_mailbox_chann_res {
+ u32 rb_start_addr;
+ u32 rb_size;
+ u32 mb_head_ptr_reg;
+ u32 mb_tail_ptr_reg;
+};
+
+/*
+ * xdna_mailbox_create() -- create mailbox subsystem and initialize
+ *
+ * @ddev: device pointer
+ * @res: SRAM and mailbox resources
+ *
+ * Return: If success, return a handle of mailbox subsystem.
+ * Otherwise, return NULL pointer.
+ */
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res);
+
+/*
+ * xdna_mailbox_create_channel() -- Create a mailbox channel instance
+ *
+ * @mailbox: the handle return from xdna_mailbox_create()
+ * @x2i: host to firmware mailbox resources
+ * @i2x: firmware to host mailbox resources
+ * @xdna_mailbox_intr_reg: register addr of MSI-X interrupt
+ * @mb_irq: Linux IRQ number associated with mailbox MSI-X interrupt vector index
+ *
+ * Return: If success, return a handle of mailbox channel. Otherwise, return NULL.
+ */
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mailbox,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 xdna_mailbox_intr_reg,
+ int mb_irq);
+
+/*
+ * xdna_mailbox_destroy_channel() -- destroy mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_stop_channel() -- stop mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+void xdna_mailbox_stop_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_send_msg() -- Send a message
+ *
+ * @mailbox_chann: Mailbox channel handle
+ * @msg: message struct for message information
+ * @tx_timeout: the timeout value for sending the message in ms.
+ *
+ * Return: If success return 0, otherwise, return error code
+ */
+int xdna_mailbox_send_msg(struct mailbox_channel *mailbox_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout);
+
+#endif /* _AIE2_MAILBOX_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
new file mode 100644
index 000000000000..5139a9c96a91
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/completion.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+int xdna_msg_cb(void *handle, const u32 *data, size_t size)
+{
+ struct xdna_notify *cb_arg = handle;
+ int ret;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(cb_arg->size != size)) {
+ cb_arg->error = -EINVAL;
+ goto out;
+ }
+
+ print_hex_dump_debug("resp data: ", DUMP_PREFIX_OFFSET,
+ 16, 4, data, cb_arg->size, true);
+ memcpy(cb_arg->data, data, cb_arg->size);
+out:
+ ret = cb_arg->error;
+ complete(&cb_arg->comp);
+ return ret;
+}
+
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg)
+{
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ ret = xdna_mailbox_send_msg(chann, msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed, ret %d", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&hdl->comp,
+ msecs_to_jiffies(RX_TIMEOUT));
+ if (!ret) {
+ XDNA_ERR(xdna, "Wait for completion timeout");
+ return -ETIME;
+ }
+
+ return hdl->error;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
new file mode 100644
index 000000000000..23e1317b79fe
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_MAILBOX_HELPER_H
+#define _AMDXDNA_MAILBOX_HELPER_H
+
+#define TX_TIMEOUT 2000 /* milliseconds */
+#define RX_TIMEOUT 5000 /* milliseconds */
+
+struct amdxdna_dev;
+
+struct xdna_notify {
+ struct completion comp;
+ u32 *data;
+ size_t size;
+ int error;
+};
+
+#define DECLARE_XDNA_MSG_COMMON(name, op, status) \
+ struct name##_req req = { 0 }; \
+ struct name##_resp resp = { status }; \
+ struct xdna_notify hdl = { \
+ .error = 0, \
+ .data = (u32 *)&resp, \
+ .size = sizeof(resp), \
+ .comp = COMPLETION_INITIALIZER_ONSTACK(hdl.comp), \
+ }; \
+ struct xdna_mailbox_msg msg = { \
+ .send_data = (u8 *)&req, \
+ .send_size = sizeof(req), \
+ .handle = &hdl, \
+ .opcode = op, \
+ .notify_cb = xdna_msg_cb, \
+ }
+
+int xdna_msg_cb(void *handle, const u32 *data, size_t size);
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg);
+
+#endif /* _AMDXDNA_MAILBOX_HELPER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
new file mode 100644
index 000000000000..97d4a032171f
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iommu.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+
+/*
+ * Bind the driver base on (vendor_id, device_id) pair and later use the
+ * (device_id, rev_id) pair as a key to select the devices. The devices with
+ * same device_id have very similar interface to host driver.
+ */
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static const struct amdxdna_device_id amdxdna_ids[] = {
+ { 0x1502, 0x0, &dev_npu1_info },
+ { 0x17f0, 0x0, &dev_npu2_info },
+ { 0x17f0, 0x10, &dev_npu4_info },
+ { 0x17f0, 0x11, &dev_npu5_info },
+ { 0x17f0, 0x20, &dev_npu6_info },
+ {0}
+};
+
+static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_client *client;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret);
+ return ret;
+ }
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client) {
+ ret = -ENOMEM;
+ goto put_rpm;
+ }
+
+ client->pid = pid_nr(rcu_access_pointer(filp->pid));
+ client->xdna = xdna;
+
+ client->sva = iommu_sva_bind_device(xdna->ddev.dev, current->mm);
+ if (IS_ERR(client->sva)) {
+ ret = PTR_ERR(client->sva);
+ XDNA_ERR(xdna, "SVA bind device failed, ret %d", ret);
+ goto failed;
+ }
+ client->pasid = iommu_sva_get_pasid(client->sva);
+ if (client->pasid == IOMMU_PASID_INVALID) {
+ XDNA_ERR(xdna, "SVA get pasid failed");
+ ret = -ENODEV;
+ goto unbind_sva;
+ }
+ mutex_init(&client->hwctx_lock);
+ init_srcu_struct(&client->hwctx_srcu);
+ xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
+ mutex_init(&client->mm_lock);
+
+ mutex_lock(&xdna->dev_lock);
+ list_add_tail(&client->node, &xdna->client_list);
+ mutex_unlock(&xdna->dev_lock);
+
+ filp->driver_priv = client;
+ client->filp = filp;
+
+ XDNA_DBG(xdna, "pid %d opened", client->pid);
+ return 0;
+
+unbind_sva:
+ iommu_sva_unbind_device(client->sva);
+failed:
+ kfree(client);
+put_rpm:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return ret;
+}
+
+static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+
+ XDNA_DBG(xdna, "closing pid %d", client->pid);
+
+ xa_destroy(&client->hwctx_xa);
+ cleanup_srcu_struct(&client->hwctx_srcu);
+ mutex_destroy(&client->hwctx_lock);
+ mutex_destroy(&client->mm_lock);
+ if (client->dev_heap)
+ drm_gem_object_put(to_gobj(client->dev_heap));
+
+ iommu_sva_unbind_device(client->sva);
+
+ XDNA_DBG(xdna, "pid %d closed", client->pid);
+ kfree(client);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+}
+
+static int amdxdna_flush(struct file *f, fl_owner_t id)
+{
+ struct drm_file *filp = f->private_data;
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = client->xdna;
+ int idx;
+
+ XDNA_DBG(xdna, "PID %d flushing...", client->pid);
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return 0;
+
+ mutex_lock(&xdna->dev_lock);
+ list_del_init(&client->node);
+ mutex_unlock(&xdna->dev_lock);
+ amdxdna_hwctx_remove_all(client);
+
+ drm_dev_exit(idx);
+ return 0;
+}
+
+static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_info *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->get_aie_info)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->get_aie_info(client, args);
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+}
+
+static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_set_state *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->set_aie_state)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->set_aie_state(client, args);
+ mutex_unlock(&xdna->dev_lock);
+
+ return ret;
+}
+
+static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
+ /* Context */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0),
+ /* BO */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0),
+ /* Execution */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
+ /* AIE hardware */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
+};
+
+static const struct file_operations amdxdna_fops = {
+ .owner = THIS_MODULE,
+ .open = accel_open,
+ .release = drm_release,
+ .flush = amdxdna_flush,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+};
+
+const struct drm_driver amdxdna_drm_drv = {
+ .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+ .fops = &amdxdna_fops,
+ .name = "amdxdna_accel_driver",
+ .desc = "AMD XDNA DRM implementation",
+ .open = amdxdna_drm_open,
+ .postclose = amdxdna_drm_close,
+ .ioctls = amdxdna_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
+
+ .gem_create_object = amdxdna_gem_create_object_cb,
+};
+
+static const struct amdxdna_dev_info *
+amdxdna_get_dev_info(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) {
+ if (pdev->device == amdxdna_ids[i].device &&
+ pdev->revision == amdxdna_ids[i].revision)
+ return amdxdna_ids[i].dev_info;
+ }
+ return NULL;
+}
+
+static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev);
+ if (IS_ERR(xdna))
+ return PTR_ERR(xdna);
+
+ xdna->dev_info = amdxdna_get_dev_info(pdev);
+ if (!xdna->dev_info)
+ return -ENODEV;
+
+ drmm_mutex_init(&xdna->ddev, &xdna->dev_lock);
+ init_rwsem(&xdna->notifier_lock);
+ INIT_LIST_HEAD(&xdna->client_list);
+ pci_set_drvdata(pdev, xdna);
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&xdna->notifier_lock);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->init(xdna);
+ mutex_unlock(&xdna->dev_lock);
+ if (ret) {
+ XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
+ return ret;
+ }
+
+ ret = amdxdna_sysfs_init(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Create amdxdna attrs failed: %d", ret);
+ goto failed_dev_fini;
+ }
+
+ pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+
+ ret = drm_dev_register(&xdna->ddev, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
+ pm_runtime_forbid(dev);
+ goto failed_sysfs_fini;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+
+failed_sysfs_fini:
+ amdxdna_sysfs_fini(xdna);
+failed_dev_fini:
+ mutex_lock(&xdna->dev_lock);
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+}
+
+static void amdxdna_remove(struct pci_dev *pdev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct amdxdna_client *client;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_forbid(dev);
+
+ drm_dev_unplug(&xdna->ddev);
+ amdxdna_sysfs_fini(xdna);
+
+ mutex_lock(&xdna->dev_lock);
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ while (client) {
+ list_del_init(&client->node);
+ mutex_unlock(&xdna->dev_lock);
+
+ amdxdna_hwctx_remove_all(client);
+
+ mutex_lock(&xdna->dev_lock);
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ }
+
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+}
+
+static int amdxdna_dev_suspend_nolock(struct amdxdna_dev *xdna)
+{
+ if (xdna->dev_info->ops->suspend)
+ xdna->dev_info->ops->suspend(xdna);
+
+ return 0;
+}
+
+static int amdxdna_dev_resume_nolock(struct amdxdna_dev *xdna)
+{
+ if (xdna->dev_info->ops->resume)
+ return xdna->dev_info->ops->resume(xdna);
+
+ return 0;
+}
+
+static int amdxdna_pmops_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ struct amdxdna_client *client;
+
+ mutex_lock(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_suspend(client);
+
+ amdxdna_dev_suspend_nolock(xdna);
+ mutex_unlock(&xdna->dev_lock);
+
+ return 0;
+}
+
+static int amdxdna_pmops_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ struct amdxdna_client *client;
+ int ret;
+
+ XDNA_INFO(xdna, "firmware resuming...");
+ mutex_lock(&xdna->dev_lock);
+ ret = amdxdna_dev_resume_nolock(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "resume NPU firmware failed");
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+ }
+
+ XDNA_INFO(xdna, "hardware context resuming...");
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_resume(client);
+ mutex_unlock(&xdna->dev_lock);
+
+ return 0;
+}
+
+static int amdxdna_rpmops_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ int ret;
+
+ mutex_lock(&xdna->dev_lock);
+ ret = amdxdna_dev_suspend_nolock(xdna);
+ mutex_unlock(&xdna->dev_lock);
+
+ XDNA_DBG(xdna, "Runtime suspend done ret: %d", ret);
+ return ret;
+}
+
+static int amdxdna_rpmops_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ int ret;
+
+ mutex_lock(&xdna->dev_lock);
+ ret = amdxdna_dev_resume_nolock(xdna);
+ mutex_unlock(&xdna->dev_lock);
+
+ XDNA_DBG(xdna, "Runtime resume done ret: %d", ret);
+ return ret;
+}
+
+static const struct dev_pm_ops amdxdna_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
+ RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL)
+};
+
+static struct pci_driver amdxdna_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci_ids,
+ .probe = amdxdna_probe,
+ .remove = amdxdna_remove,
+ .driver.pm = &amdxdna_pm_ops,
+};
+
+module_pci_driver(amdxdna_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_DESCRIPTION("amdxdna driver");
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
new file mode 100644
index 000000000000..37848a8d8031
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_PCI_DRV_H_
+#define _AMDXDNA_PCI_DRV_H_
+
+#include <linux/xarray.h>
+
+#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
+#define XDNA_WARN(xdna, fmt, args...) drm_warn(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_ERR(xdna, fmt, args...) drm_err(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_DBG(xdna, fmt, args...) drm_dbg(&(xdna)->ddev, fmt, ##args)
+#define XDNA_INFO_ONCE(xdna, fmt, args...) drm_info_once(&(xdna)->ddev, fmt, ##args)
+
+#define XDNA_MBZ_DBG(xdna, ptr, sz) \
+ ({ \
+ int __i; \
+ int __ret = 0; \
+ u8 *__ptr = (u8 *)(ptr); \
+ for (__i = 0; __i < (sz); __i++) { \
+ if (__ptr[__i]) { \
+ XDNA_DBG(xdna, "MBZ check failed"); \
+ __ret = -EINVAL; \
+ break; \
+ } \
+ } \
+ __ret; \
+ })
+
+#define to_xdna_dev(drm_dev) \
+ ((struct amdxdna_dev *)container_of(drm_dev, struct amdxdna_dev, ddev))
+
+extern const struct drm_driver amdxdna_drm_drv;
+
+struct amdxdna_client;
+struct amdxdna_dev;
+struct amdxdna_drm_get_info;
+struct amdxdna_drm_set_state;
+struct amdxdna_gem_obj;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+/*
+ * struct amdxdna_dev_ops - Device hardware operation callbacks
+ */
+struct amdxdna_dev_ops {
+ int (*init)(struct amdxdna_dev *xdna);
+ void (*fini)(struct amdxdna_dev *xdna);
+ int (*resume)(struct amdxdna_dev *xdna);
+ void (*suspend)(struct amdxdna_dev *xdna);
+ int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
+ void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
+ int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+ void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+ void (*hwctx_suspend)(struct amdxdna_hwctx *hwctx);
+ void (*hwctx_resume)(struct amdxdna_hwctx *hwctx);
+ int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+ int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
+ int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
+};
+
+/*
+ * struct amdxdna_dev_info - Device hardware information
+ * Record device static information, like reg, mbox, PSP, SMU bar index
+ */
+struct amdxdna_dev_info {
+ int reg_bar;
+ int mbox_bar;
+ int sram_bar;
+ int psp_bar;
+ int smu_bar;
+ int device_type;
+ int first_col;
+ u32 dev_mem_buf_shift;
+ u64 dev_mem_base;
+ size_t dev_mem_size;
+ char *vbnv;
+ const struct amdxdna_dev_priv *dev_priv;
+ const struct amdxdna_dev_ops *ops;
+};
+
+struct amdxdna_fw_ver {
+ u32 major;
+ u32 minor;
+ u32 sub;
+ u32 build;
+};
+
+struct amdxdna_dev {
+ struct drm_device ddev;
+ struct amdxdna_dev_hdl *dev_handle;
+ const struct amdxdna_dev_info *dev_info;
+ void *xrs_hdl;
+
+ struct mutex dev_lock; /* per device lock */
+ struct list_head client_list;
+ struct amdxdna_fw_ver fw_ver;
+ struct rw_semaphore notifier_lock; /* for mmu notifier*/
+};
+
+/*
+ * struct amdxdna_device_id - PCI device info
+ */
+struct amdxdna_device_id {
+ unsigned short device;
+ u8 revision;
+ const struct amdxdna_dev_info *dev_info;
+};
+
+/*
+ * struct amdxdna_client - amdxdna client
+ * A per fd data structure for managing context and other user process stuffs.
+ */
+struct amdxdna_client {
+ struct list_head node;
+ pid_t pid;
+ struct mutex hwctx_lock; /* protect hwctx */
+ /* do NOT wait this srcu when hwctx_lock is held */
+ struct srcu_struct hwctx_srcu;
+ struct xarray hwctx_xa;
+ u32 next_hwctxid;
+ struct amdxdna_dev *xdna;
+ struct drm_file *filp;
+
+ struct mutex mm_lock; /* protect memory related */
+ struct amdxdna_gem_obj *dev_heap;
+
+ struct iommu_sva *sva;
+ int pasid;
+};
+
+#define amdxdna_for_each_hwctx(client, hwctx_id, entry) \
+ xa_for_each(&(client)->hwctx_xa, hwctx_id, entry)
+
+/* Add device info below */
+extern const struct amdxdna_dev_info dev_npu1_info;
+extern const struct amdxdna_dev_info dev_npu2_info;
+extern const struct amdxdna_dev_info dev_npu4_info;
+extern const struct amdxdna_dev_info dev_npu5_info;
+extern const struct amdxdna_dev_info dev_npu6_info;
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna);
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna);
+
+#endif /* _AMDXDNA_PCI_DRV_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_sysfs.c b/drivers/accel/amdxdna/amdxdna_sysfs.c
new file mode 100644
index 000000000000..f27e4ee960a0
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_sysfs.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/types.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+static ssize_t vbnv_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", xdna->dev_info->vbnv);
+}
+static DEVICE_ATTR_RO(vbnv);
+
+static ssize_t device_type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", xdna->dev_info->device_type);
+}
+static DEVICE_ATTR_RO(device_type);
+
+static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d.%d.%d.%d\n", xdna->fw_ver.major,
+ xdna->fw_ver.minor, xdna->fw_ver.sub,
+ xdna->fw_ver.build);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static struct attribute *amdxdna_attrs[] = {
+ &dev_attr_device_type.attr,
+ &dev_attr_vbnv.attr,
+ &dev_attr_fw_version.attr,
+ NULL,
+};
+
+static struct attribute_group amdxdna_attr_group = {
+ .attrs = amdxdna_attrs,
+};
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna)
+{
+ int ret;
+
+ ret = sysfs_create_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+ if (ret)
+ XDNA_ERR(xdna, "Create attr group failed");
+
+ return ret;
+}
+
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna)
+{
+ sysfs_remove_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+}
diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c
new file mode 100644
index 000000000000..e4f6dac7d00f
--- /dev/null
+++ b/drivers/accel/amdxdna/npu1_regs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* Address definition from NPU1 docs */
+#define MPNPU_PUB_SEC_INTR 0x3010090
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010094
+#define MPNPU_PUB_SCRATCH2 0x30100A0
+#define MPNPU_PUB_SCRATCH3 0x30100A4
+#define MPNPU_PUB_SCRATCH4 0x30100A8
+#define MPNPU_PUB_SCRATCH5 0x30100AC
+#define MPNPU_PUB_SCRATCH6 0x30100B0
+#define MPNPU_PUB_SCRATCH7 0x30100B4
+#define MPNPU_PUB_SCRATCH9 0x30100BC
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x30A0000
+#define MPNPU_SRAM_X2I_MAILBOX_1 0x30A2000
+#define MPNPU_SRAM_I2X_MAILBOX_15 0x30BF000
+
+#define MPNPU_APERTURE0_BASE 0x3000000
+#define MPNPU_APERTURE1_BASE 0x3080000
+#define MPNPU_APERTURE2_BASE 0x30C0000
+
+/* PCIe BAR Index for NPU1 */
+#define NPU1_REG_BAR_INDEX 0
+#define NPU1_MBOX_BAR_INDEX 4
+#define NPU1_PSP_BAR_INDEX 0
+#define NPU1_SMU_BAR_INDEX 0
+#define NPU1_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU1_REG_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_MBOX_BAR_BASE MPNPU_APERTURE2_BASE
+#define NPU1_PSP_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SMU_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SRAM_BAR_BASE MPNPU_APERTURE1_BASE
+
+const struct rt_config npu1_default_rt_cfg[] = {
+ { 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 0 },
+};
+
+const struct dpm_clk_freq npu1_dpm_clk_table[] = {
+ {400, 800},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {720, 1309},
+ {720, 1309},
+ {847, 1600},
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu1_dev_priv = {
+ .fw_path = "amdnpu/1502_00/npu.sbin",
+ .protocol_major = 0x5,
+ .protocol_minor = 0x7,
+ .rt_config = npu1_default_rt_cfg,
+ .dpm_clk_tbl = npu1_dpm_clk_table,
+ .col_align = COL_ALIGN_NONE,
+ .mbox_dev_addr = NPU1_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU1_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU1_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU1_SRAM, MPNPU_SRAM_I2X_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU1_PSP, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU1_PSP, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU1_PSP, MPNPU_PUB_SEC_INTR),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU1_SMU, MPNPU_PUB_SCRATCH5),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU1_SMU, MPNPU_PUB_PWRMGMT_INTR),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU1_SMU, MPNPU_PUB_SCRATCH6),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ },
+ .hw_ops = {
+ .set_dpm = npu1_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu1_info = {
+ .reg_bar = NPU1_REG_BAR_INDEX,
+ .mbox_bar = NPU1_MBOX_BAR_INDEX,
+ .sram_bar = NPU1_SRAM_BAR_INDEX,
+ .psp_bar = NPU1_PSP_BAR_INDEX,
+ .smu_bar = NPU1_SMU_BAR_INDEX,
+ .first_col = 1,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu1",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu1_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu2_regs.c b/drivers/accel/amdxdna/npu2_regs.c
new file mode 100644
index 000000000000..a081cac75ee0
--- /dev/null
+++ b/drivers/accel/amdxdna/npu2_regs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU2 */
+#define NPU2_REG_BAR_INDEX 0
+#define NPU2_MBOX_BAR_INDEX 0
+#define NPU2_PSP_BAR_INDEX 4
+#define NPU2_SMU_BAR_INDEX 5
+#define NPU2_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU2_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU2_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU2_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu2_dev_priv = {
+ .fw_path = "amdnpu/17f0_00/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 0x6,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU2_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU2_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU2_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU2_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU2_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU2_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU2_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU2_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU2_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU2_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu2_info = {
+ .reg_bar = NPU2_REG_BAR_INDEX,
+ .mbox_bar = NPU2_MBOX_BAR_INDEX,
+ .sram_bar = NPU2_SRAM_BAR_INDEX,
+ .psp_bar = NPU2_PSP_BAR_INDEX,
+ .smu_bar = NPU2_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu2",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu2_dev_priv,
+ .ops = &aie2_ops, /* NPU2 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c
new file mode 100644
index 000000000000..9f2e33182ec6
--- /dev/null
+++ b/drivers/accel/amdxdna/npu4_regs.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU4 */
+#define NPU4_REG_BAR_INDEX 0
+#define NPU4_MBOX_BAR_INDEX 0
+#define NPU4_PSP_BAR_INDEX 4
+#define NPU4_SMU_BAR_INDEX 5
+#define NPU4_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU4_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU4_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU4_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+const struct rt_config npu4_default_rt_cfg[] = {
+ { 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 4, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 0 },
+};
+
+const struct dpm_clk_freq npu4_dpm_clk_table[] = {
+ {396, 792},
+ {600, 1056},
+ {792, 1152},
+ {975, 1267},
+ {975, 1267},
+ {1056, 1408},
+ {1152, 1584},
+ {1267, 1800},
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu4_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU4_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU4_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU4_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU4_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU4_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU4_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU4_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU4_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU4_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU4_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu4_info = {
+ .reg_bar = NPU4_REG_BAR_INDEX,
+ .mbox_bar = NPU4_MBOX_BAR_INDEX,
+ .sram_bar = NPU4_SRAM_BAR_INDEX,
+ .psp_bar = NPU4_PSP_BAR_INDEX,
+ .smu_bar = NPU4_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu4",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu4_dev_priv,
+ .ops = &aie2_ops, /* NPU4 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu5_regs.c b/drivers/accel/amdxdna/npu5_regs.c
new file mode 100644
index 000000000000..5f1cf83461c4
--- /dev/null
+++ b/drivers/accel/amdxdna/npu5_regs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU5 */
+#define NPU5_REG_BAR_INDEX 0
+#define NPU5_MBOX_BAR_INDEX 0
+#define NPU5_PSP_BAR_INDEX 4
+#define NPU5_SMU_BAR_INDEX 5
+#define NPU5_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU5_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU5_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu5_dev_priv = {
+ .fw_path = "amdnpu/17f0_11/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU5_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU5_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU5_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU5_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU5_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU5_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU5_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU5_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU5_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU5_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu5_info = {
+ .reg_bar = NPU5_REG_BAR_INDEX,
+ .mbox_bar = NPU5_MBOX_BAR_INDEX,
+ .sram_bar = NPU5_SRAM_BAR_INDEX,
+ .psp_bar = NPU5_PSP_BAR_INDEX,
+ .smu_bar = NPU5_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu5",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu5_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu6_regs.c b/drivers/accel/amdxdna/npu6_regs.c
new file mode 100644
index 000000000000..94a7005685a7
--- /dev/null
+++ b/drivers/accel/amdxdna/npu6_regs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU6 */
+#define NPU6_REG_BAR_INDEX 0
+#define NPU6_MBOX_BAR_INDEX 0
+#define NPU6_PSP_BAR_INDEX 4
+#define NPU6_SMU_BAR_INDEX 5
+#define NPU6_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU6_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU6_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu6_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU6_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU6_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU6_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU6_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU6_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU6_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU6_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU6_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU6_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU6_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+
+};
+
+const struct amdxdna_dev_info dev_npu6_info = {
+ .reg_bar = NPU6_REG_BAR_INDEX,
+ .mbox_bar = NPU6_MBOX_BAR_INDEX,
+ .sram_bar = NPU6_SRAM_BAR_INDEX,
+ .psp_bar = NPU6_PSP_BAR_INDEX,
+ .smu_bar = NPU6_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu6",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu6_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 708dfd10f39c..5409b2c656c8 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -101,7 +101,6 @@ static const struct drm_driver hl_driver = {
.major = LINUX_VERSION_MAJOR,
.minor = LINUX_VERSION_PATCHLEVEL,
.patchlevel = LINUX_VERSION_SUBLEVEL,
- .date = "20190505",
.fops = &hl_fops,
.open = hl_device_open,
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index ca2bf47ce248..1e8ffbe25eee 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -458,15 +458,7 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
-#ifdef DRIVER_DATE
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-#else
- .date = UTS_RELEASE,
.major = 1,
-#endif
};
static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 949f4233946c..87d7411ae059 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -78,8 +78,8 @@ static int ivpu_resume(struct ivpu_device *vdev)
int ret;
retry:
- pci_restore_state(to_pci_dev(vdev->drm.dev));
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_up(vdev);
if (ret) {
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 4ddf89308ff5..81819b9ef8d4 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -208,7 +208,6 @@ static const struct drm_driver qaic_accel_driver = {
.name = QAIC_NAME,
.desc = QAIC_DESC,
- .date = "20190618",
.fops = &qaic_accel_fops,
.open = qaic_open,
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index 6d772143d612..21d58aed0deb 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -772,8 +772,7 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev)
cancel_work_sync(&context->fw_work);
cancel_work_sync(&context->dump_work);
- if (context->mem_dump)
- vfree(context->mem_dump);
+ vfree(context->mem_dump);
sahara_release_image(context);
mhi_unprepare_from_transfer(mhi_dev);
}
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 4ec20fd56985..3fde4496f8a2 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -19,6 +19,7 @@
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <asm/cpuid.h>
#include <asm/mwait.h>
#include <xen/xen.h>
@@ -46,10 +47,8 @@ static void power_saving_mwait_init(void)
if (!boot_cpu_has(X86_FEATURE_MWAIT))
return;
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return;
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+ cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8274a17872ed..a972831dbd66 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -610,16 +610,28 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
return 0;
}
+/**
+ * acpi_video_device_EDID() - Get EDID from ACPI _DDC
+ * @device: video output device (LCD, CRT, ..)
+ * @edid: address for returned EDID pointer
+ * @length: _DDC length to request (must be a multiple of 128)
+ *
+ * Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written
+ * to the @edid address, and the length of the EDID is returned. The caller is
+ * responsible for freeing the edid pointer.
+ *
+ * Return the length of EDID (positive value) on success or error (negative
+ * value).
+ */
static int
-acpi_video_device_EDID(struct acpi_video_device *device,
- union acpi_object **edid, int length)
+acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length)
{
- int status;
+ acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
-
+ int ret;
*edid = NULL;
@@ -636,16 +648,17 @@ acpi_video_device_EDID(struct acpi_video_device *device,
obj = buffer.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER)
- *edid = obj;
- else {
+ if (obj && obj->type == ACPI_TYPE_BUFFER) {
+ *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
+ ret = *edid ? obj->buffer.length : -ENOMEM;
+ } else {
acpi_handle_debug(device->dev->handle,
"Invalid _DDC data for length %d\n", length);
- status = -EFAULT;
- kfree(obj);
+ ret = -EFAULT;
}
- return status;
+ kfree(obj);
+ return ret;
}
/* bus */
@@ -1435,9 +1448,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
- union acpi_object *buffer = NULL;
- acpi_status status;
- int i, length;
+ int i, length, ret;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1477,16 +1488,10 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
}
for (length = 512; length > 0; length -= 128) {
- status = acpi_video_device_EDID(video_device, &buffer,
- length);
- if (ACPI_SUCCESS(status))
- break;
+ ret = acpi_video_device_EDID(video_device, edid, length);
+ if (ret > 0)
+ return ret;
}
- if (!length)
- continue;
-
- *edid = buffer->buffer.pointer;
- return length;
}
return -ENODEV;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 07789f0b59bc..b72772494655 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -173,8 +173,6 @@ static struct gen_pool *ghes_estatus_pool;
static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
static atomic_t ghes_estatus_cache_alloced;
-static int ghes_panic_timeout __read_mostly = 30;
-
static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
{
phys_addr_t paddr;
@@ -983,14 +981,16 @@ static void __ghes_panic(struct ghes *ghes,
struct acpi_hest_generic_status *estatus,
u64 buf_paddr, enum fixed_addresses fixmap_idx)
{
+ const char *msg = GHES_PFX "Fatal hardware error";
+
__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
- /* reboot to log the error! */
if (!panic_timeout)
- panic_timeout = ghes_panic_timeout;
- panic("Fatal hardware error!");
+ pr_emerg("%s but panic disabled\n", msg);
+
+ panic(msg);
}
static int ghes_proc(struct ghes *ghes)
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 3d5342f8d7b3..6760330a8af5 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -717,7 +717,7 @@ static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
}
list_del_init(&hook->list);
- pr_info("extension unregistered: %s\n", hook->name);
+ pr_info("hook unregistered: %s\n", hook->name);
}
void battery_hook_unregister(struct acpi_battery_hook *hook)
@@ -751,18 +751,18 @@ void battery_hook_register(struct acpi_battery_hook *hook)
if (hook->add_battery(battery->bat, hook)) {
/*
* If a add-battery returns non-zero,
- * the registration of the extension has failed,
+ * the registration of the hook has failed,
* and we will not add it to the list of loaded
* hooks.
*/
- pr_err("extension failed to load: %s", hook->name);
+ pr_err("hook failed to load: %s", hook->name);
battery_hook_unregister_unlocked(hook);
goto end;
}
power_supply_changed(battery->bat);
}
- pr_info("new extension: %s\n", hook->name);
+ pr_info("new hook: %s\n", hook->name);
end:
mutex_unlock(&hook_mutex);
}
@@ -805,10 +805,10 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
if (hook_node->add_battery(battery->bat, hook_node)) {
/*
- * The notification of the extensions has failed, to
- * prevent further errors we will unload the extension.
+ * The notification of the hook has failed, to
+ * prevent further errors we will unload the hook.
*/
- pr_err("error in extension, unloading: %s",
+ pr_err("error in hook, unloading: %s",
hook_node->name);
battery_hook_unregister_unlocked(hook_node);
}
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index d1d9c9289087..35ece8e9f15d 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -29,7 +29,7 @@ BGRT_SHOW(type, image_type);
BGRT_SHOW(xoffset, image_offset_x);
BGRT_SHOW(yoffset, image_offset_y);
-static BIN_ATTR_SIMPLE_RO(image);
+static __ro_after_init BIN_ATTR_SIMPLE_RO(image);
static struct attribute *bgrt_attributes[] = {
&bgrt_attr_version.attr,
@@ -40,14 +40,14 @@ static struct attribute *bgrt_attributes[] = {
NULL,
};
-static struct bin_attribute *bgrt_bin_attributes[] = {
+static const struct bin_attribute *const bgrt_bin_attributes[] = {
&bin_attr_image,
NULL,
};
static const struct attribute_group bgrt_attribute_group = {
.attrs = bgrt_attributes,
- .bin_attrs = bgrt_bin_attributes,
+ .bin_attrs_new = bgrt_bin_attributes,
};
int __init acpi_parse_bgrt(struct acpi_table_header *table)
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 624fce67ce43..952216c67d58 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -152,6 +152,7 @@ static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1064", 0},
{"INTC106B", 0},
{"INTC10A3", 0},
+ {"INTC10D7", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 3d3edd81b172..e8caf4106ff9 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -236,6 +236,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC106D", 0},
{"INTC10A4", 0},
{"INTC10A5", 0},
+ {"INTC10D8", 0},
+ {"INTC10D9", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 014ada759954..aef7aca2161d 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -55,6 +55,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC10A3"},
{"INTC10A4"},
{"INTC10A5"},
+ {"INTC10D4"},
+ {"INTC10D5"},
+ {"INTC10D6"},
+ {"INTC10D7"},
+ {"INTC10D8"},
+ {"INTC10D9"},
{""},
};
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index db25a3898af7..488b51e2cb31 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -19,6 +19,7 @@
{"INTC1063", }, /* Fan for Meteor Lake generation */ \
{"INTC106A", }, /* Fan for Lunar Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
+ {"INTC10D6", }, /* Fan for Panther Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
#define ACPI_FPS_NAME_LEN 20
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 3ea9cfcff46e..10016f52f4f4 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -371,19 +371,25 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = sysfs_create_link(&pdev->dev.kobj,
&cdev->device.kobj,
"thermal_cooling");
- if (result)
+ if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
+ goto err_unregister;
+ }
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
- goto err_end;
+ goto err_remove_link;
}
return 0;
+err_remove_link:
+ sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
+err_unregister:
+ thermal_cooling_device_unregister(cdev);
err_end:
if (fan->acpi4)
acpi_fan_delete_attributes(device);
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index 92b658f92dc0..5b85989f96be 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -624,8 +624,7 @@ static void init_crs_csi2_swnodes(struct crs_csi2 *csi2)
if (!fwnode_property_present(adev_fwnode, "rotation")) {
struct acpi_pld_info *pld;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_get_physical_device_location(handle, &pld)) {
swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] =
PROPERTY_ENTRY_U32("rotation",
pld->rotation * 45U);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index fed446aace42..5ff343096ece 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -607,7 +607,27 @@ acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
void acpi_os_sleep(u64 ms)
{
- msleep(ms);
+ u64 usec = ms * USEC_PER_MSEC, delta_us = 50;
+
+ /*
+ * Use a hrtimer because the timer wheel timers are optimized for
+ * cancelation before they expire and this timer is not going to be
+ * canceled.
+ *
+ * Set the delta between the requested sleep time and the effective
+ * deadline to at least 50 us in case there is an opportunity for timer
+ * coalescing.
+ *
+ * Moreover, longer sleeps can be assumed to need somewhat less timer
+ * precision, so sacrifice some of it for making the timer a more likely
+ * candidate for coalescing by setting the delta to 1% of the sleep time
+ * if it is above 5 ms (this value is chosen so that the delta is a
+ * continuous function of the sleep time).
+ */
+ if (ms > 5)
+ delta_us = (USEC_PER_MSEC / 100) * ms;
+
+ usleep_range(usec, usec + delta_us);
}
void acpi_os_stall(u32 us)
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index d2f7fd7743a1..fc92e43d0fe9 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -2,16 +2,28 @@
/* Platform profile sysfs interface */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/platform_profile.h>
#include <linux/sysfs.h>
-static struct platform_profile_handler *cur_profile;
+#define to_pprof_handler(d) (container_of(d, struct platform_profile_handler, dev))
+
static DEFINE_MUTEX(profile_lock);
+struct platform_profile_handler {
+ const char *name;
+ struct device dev;
+ int minor;
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ const struct platform_profile_ops *ops;
+};
+
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@@ -19,99 +31,373 @@ static const char * const profile_names[] = {
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
+ [PLATFORM_PROFILE_CUSTOM] = "custom",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
-static ssize_t platform_profile_choices_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0;
- int err, i;
-
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+static DEFINE_IDA(platform_profile_ida);
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+/**
+ * _commmon_choices_show - Show the available profile choices
+ * @choices: The available profile choices
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t _commmon_choices_show(unsigned long *choices, char *buf)
+{
+ int i, len = 0;
- for_each_set_bit(i, cur_profile->choices, PLATFORM_PROFILE_LAST) {
+ for_each_set_bit(i, choices, PLATFORM_PROFILE_LAST) {
if (len == 0)
len += sysfs_emit_at(buf, len, "%s", profile_names[i]);
else
len += sysfs_emit_at(buf, len, " %s", profile_names[i]);
}
len += sysfs_emit_at(buf, len, "\n");
- mutex_unlock(&profile_lock);
+
return len;
}
-static ssize_t platform_profile_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/**
+ * _store_class_profile - Set the profile for a class device
+ * @dev: The class device
+ * @data: The profile to set
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_class_profile(struct device *dev, void *data)
{
- enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
+ struct platform_profile_handler *handler;
+ int *bit = (int *)data;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ if (!test_bit(*bit, handler->choices))
+ return -EOPNOTSUPP;
+
+ return handler->ops->profile_set(dev, *bit);
+}
+
+/**
+ * _notify_class_profile - Notify the class device of a profile change
+ * @dev: The class device
+ * @data: Unused
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _notify_class_profile(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ lockdep_assert_held(&profile_lock);
+ sysfs_notify(&handler->dev.kobj, NULL, "profile");
+ kobject_uevent(&handler->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * get_class_profile - Show the current profile for a class device
+ * @dev: The class device
+ * @profile: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int get_class_profile(struct device *dev,
+ enum platform_profile_option *profile)
+{
+ struct platform_profile_handler *handler;
+ enum platform_profile_option val;
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ err = handler->ops->profile_get(dev, &val);
+ if (err) {
+ pr_err("Failed to get profile for handler %s\n", handler->name);
return err;
+ }
+
+ if (WARN_ON(val >= PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+ *profile = val;
+
+ return 0;
+}
+
+/**
+ * name_show - Show the name of the profile handler
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ return sysfs_emit(buf, "%s\n", handler->name);
+}
+static DEVICE_ATTR_RO(name);
+
+/**
+ * choices_show - Show the available profile choices
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+ return _commmon_choices_show(handler->choices, buf);
+}
+static DEVICE_ATTR_RO(choices);
+
+/**
+ * profile_show - Show the current profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = get_class_profile(dev, &profile);
+ if (err)
+ return err;
}
- err = cur_profile->profile_get(cur_profile, &profile);
- mutex_unlock(&profile_lock);
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * profile_store - Set the profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int index, ret;
+
+ index = sysfs_match_string(profile_names, buf);
+ if (index < 0)
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = _store_class_profile(dev, &index);
+ if (ret)
+ return ret;
+ }
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return count;
+}
+static DEVICE_ATTR_RW(profile);
+
+static struct attribute *profile_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_choices.attr,
+ &dev_attr_profile.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(profile);
+
+static void pprof_device_release(struct device *dev)
+{
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+
+ kfree(pprof);
+}
+
+static const struct class platform_profile_class = {
+ .name = "platform-profile",
+ .dev_groups = profile_groups,
+ .dev_release = pprof_device_release,
+};
+
+/**
+ * _aggregate_choices - Aggregate the available profile choices
+ * @dev: The device
+ * @data: The available profile choices
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_choices(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler;
+ unsigned long *aggregate = data;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
+ bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
+ else
+ bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
+
+ return 0;
+}
+
+/**
+ * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int err;
+
+ set_bit(PLATFORM_PROFILE_LAST, aggregate);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ aggregate, _aggregate_choices);
+ if (err)
+ return err;
+ }
+
+ /* no profile handler registered any more */
+ if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+
+ return _commmon_choices_show(aggregate, buf);
+}
+
+/**
+ * _aggregate_profiles - Aggregate the profiles for legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_profiles(struct device *dev, void *data)
+{
+ enum platform_profile_option *profile = data;
+ enum platform_profile_option val;
+ int err;
+
+ err = get_class_profile(dev, &val);
if (err)
return err;
- /* Check that profile is valid index */
- if (WARN_ON((profile < 0) || (profile >= ARRAY_SIZE(profile_names))))
- return -EIO;
+ if (*profile != PLATFORM_PROFILE_LAST && *profile != val)
+ *profile = PLATFORM_PROFILE_CUSTOM;
+ else
+ *profile = val;
- return sysfs_emit(buf, "%s\n", profile_names[profile]);
+ return 0;
}
-static ssize_t platform_profile_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+/**
+ * _store_and_notify - Store and notify a class from legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_and_notify(struct device *dev, void *data)
{
- int err, i;
+ enum platform_profile_option *profile = data;
+ int err;
- err = mutex_lock_interruptible(&profile_lock);
+ err = _store_class_profile(dev, profile);
if (err)
return err;
+ return _notify_class_profile(dev, NULL);
+}
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+/**
+ * platform_profile_show - Show the current profile for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
}
+ /* no profile handler registered any more */
+ if (profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * platform_profile_store - Set the profile for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t platform_profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int ret;
+ int i;
+
/* Scan for a matching profile */
i = sysfs_match_string(profile_names, buf);
- if (i < 0) {
- mutex_unlock(&profile_lock);
+ if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
+ set_bit(PLATFORM_PROFILE_LAST, choices);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = class_for_each_device(&platform_profile_class, NULL,
+ choices, _aggregate_choices);
+ if (ret)
+ return ret;
+ if (!test_bit(i, choices))
+ return -EOPNOTSUPP;
+
+ ret = class_for_each_device(&platform_profile_class, NULL, &i,
+ _store_and_notify);
+ if (ret)
+ return ret;
}
- /* Check that platform supports this profile choice */
- if (!test_bit(i, cur_profile->choices)) {
- mutex_unlock(&profile_lock);
- return -EOPNOTSUPP;
- }
-
- err = cur_profile->profile_set(cur_profile, i);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
- mutex_unlock(&profile_lock);
- if (err)
- return err;
return count;
}
@@ -124,98 +410,249 @@ static struct attribute *platform_profile_attrs[] = {
NULL
};
+static int profile_class_registered(struct device *dev, const void *data)
+{
+ return 1;
+}
+
+static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ if (!class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered))
+ return 0;
+ return attr->mode;
+}
+
static const struct attribute_group platform_profile_group = {
- .attrs = platform_profile_attrs
+ .attrs = platform_profile_attrs,
+ .is_visible = profile_class_is_visible,
};
-void platform_profile_notify(void)
+/**
+ * platform_profile_notify - Notify class device and legacy sysfs interface
+ * @dev: The class device
+ */
+void platform_profile_notify(struct device *dev)
{
- if (!cur_profile)
- return;
+ scoped_cond_guard(mutex_intr, return, &profile_lock) {
+ _notify_class_profile(dev, NULL);
+ }
sysfs_notify(acpi_kobj, NULL, "platform_profile");
}
EXPORT_SYMBOL_GPL(platform_profile_notify);
+/**
+ * platform_profile_cycle - Cycles profiles available on all registered class devices
+ *
+ * Return: 0 on success, -errno on failure
+ */
int platform_profile_cycle(void)
{
- enum platform_profile_option profile;
- enum platform_profile_option next;
+ enum platform_profile_option next = PLATFORM_PROFILE_LAST;
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+ set_bit(PLATFORM_PROFILE_LAST, choices);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+ if (profile == PLATFORM_PROFILE_CUSTOM ||
+ profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
- err = cur_profile->profile_get(cur_profile, &profile);
- if (err) {
- mutex_unlock(&profile_lock);
- return err;
- }
+ err = class_for_each_device(&platform_profile_class, NULL,
+ choices, _aggregate_choices);
+ if (err)
+ return err;
- next = find_next_bit_wrap(cur_profile->choices, PLATFORM_PROFILE_LAST,
- profile + 1);
+ /* never iterate into a custom if all drivers supported it */
+ clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
- if (WARN_ON(next == PLATFORM_PROFILE_LAST)) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
- }
+ next = find_next_bit_wrap(choices,
+ PLATFORM_PROFILE_LAST,
+ profile + 1);
- err = cur_profile->profile_set(cur_profile, next);
- mutex_unlock(&profile_lock);
+ err = class_for_each_device(&platform_profile_class, NULL, &next,
+ _store_and_notify);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (err)
+ return err;
+ }
- return err;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_cycle);
-int platform_profile_register(struct platform_profile_handler *pprof)
+/**
+ * platform_profile_register - Creates and registers a platform profile class device
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
{
+ struct device *ppdev;
+ int minor;
int err;
- mutex_lock(&profile_lock);
- /* We can only have one active profile */
- if (cur_profile) {
- mutex_unlock(&profile_lock);
- return -EEXIST;
+ /* Sanity check */
+ if (WARN_ON_ONCE(!dev || !name || !ops || !ops->profile_get ||
+ !ops->profile_set || !ops->probe))
+ return ERR_PTR(-EINVAL);
+
+ struct platform_profile_handler *pprof __free(kfree) = kzalloc(
+ sizeof(*pprof), GFP_KERNEL);
+ if (!pprof)
+ return ERR_PTR(-ENOMEM);
+
+ err = ops->probe(drvdata, pprof->choices);
+ if (err) {
+ dev_err(dev, "platform_profile probe failed\n");
+ return ERR_PTR(err);
}
- /* Sanity check the profile handler field are set */
- if (!pprof || bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST) ||
- !pprof->profile_set || !pprof->profile_get) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
+ if (bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST)) {
+ dev_err(dev, "Failed to register platform_profile class device with empty choices\n");
+ return ERR_PTR(-EINVAL);
}
- err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ guard(mutex)(&profile_lock);
+
+ /* create class interface for individual handler */
+ minor = ida_alloc(&platform_profile_ida, GFP_KERNEL);
+ if (minor < 0)
+ return ERR_PTR(minor);
+
+ pprof->name = name;
+ pprof->ops = ops;
+ pprof->minor = minor;
+ pprof->dev.class = &platform_profile_class;
+ pprof->dev.parent = dev;
+ dev_set_drvdata(&pprof->dev, drvdata);
+ dev_set_name(&pprof->dev, "platform-profile-%d", pprof->minor);
+ /* device_register() takes ownership of pprof/ppdev */
+ ppdev = &no_free_ptr(pprof)->dev;
+ err = device_register(ppdev);
if (err) {
- mutex_unlock(&profile_lock);
- return err;
+ put_device(ppdev);
+ goto cleanup_ida;
}
- cur_profile = pprof;
- mutex_unlock(&profile_lock);
- return 0;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ err = sysfs_update_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ goto cleanup_cur;
+
+ return ppdev;
+
+cleanup_cur:
+ device_unregister(ppdev);
+
+cleanup_ida:
+ ida_free(&platform_profile_ida, minor);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(platform_profile_register);
-int platform_profile_remove(void)
+/**
+ * platform_profile_remove - Unregisters a platform profile class device
+ * @dev: Class device
+ *
+ * Return: 0
+ */
+int platform_profile_remove(struct device *dev)
{
- sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+ int id;
+ guard(mutex)(&profile_lock);
+
+ id = pprof->minor;
+ device_unregister(&pprof->dev);
+ ida_free(&platform_profile_ida, id);
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ sysfs_update_group(acpi_kobj, &platform_profile_group);
- mutex_lock(&profile_lock);
- cur_profile = NULL;
- mutex_unlock(&profile_lock);
return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
+static void devm_platform_profile_release(struct device *dev, void *res)
+{
+ struct device **ppdev = res;
+
+ platform_profile_remove(*ppdev);
+}
+
+/**
+ * devm_platform_profile_register - Device managed version of platform_profile_register
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *devm_platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
+{
+ struct device *ppdev;
+ struct device **dr;
+
+ dr = devres_alloc(devm_platform_profile_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ppdev = platform_profile_register(dev, name, drvdata, ops);
+ if (IS_ERR(ppdev)) {
+ devres_free(dr);
+ return ppdev;
+ }
+
+ *dr = ppdev;
+ devres_add(dev, dr);
+
+ return ppdev;
+}
+EXPORT_SYMBOL_GPL(devm_platform_profile_register);
+
+static int __init platform_profile_init(void)
+{
+ int err;
+
+ err = class_register(&platform_profile_class);
+ if (err)
+ return err;
+
+ err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ class_unregister(&platform_profile_class);
+
+ return err;
+}
+
+static void __exit platform_profile_exit(void)
+{
+ sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ class_unregister(&platform_profile_class);
+}
+module_init(platform_profile_init);
+module_exit(platform_profile_exit);
+
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
MODULE_DESCRIPTION("ACPI platform profile sysfs interface");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 80a52a4e66dd..98d93ed58315 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1492,7 +1492,7 @@ acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
- return false;
+ return true;
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
@@ -1656,6 +1656,7 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
acpi_fwnode_device_dma_supported, \
.device_get_dma_attr = acpi_fwnode_device_get_dma_attr, \
.property_present = acpi_fwnode_property_present, \
+ .property_read_bool = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
.property_read_string_array = \
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 74dcccdc6482..9f4efa8f75a6 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -723,10 +723,8 @@ int acpi_tie_acpi_dev(struct acpi_device *adev)
static void acpi_store_pld_crc(struct acpi_device *adev)
{
struct acpi_pld_info *pld;
- acpi_status status;
- status = acpi_get_physical_device_location(adev->handle, &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(adev->handle, &pld))
return;
adev->pld_crc = crc32(~0, pld, sizeof(*pld));
@@ -1769,6 +1767,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"CSC3557", },
{"INT33FE", },
{"INT3515", },
+ {"TXNW2781", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
{"CLSA0101", },
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 687524b50085..a48ebbf768f9 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -319,7 +319,7 @@ struct acpi_data_attr {
};
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_table_attr *table_attr =
@@ -372,7 +372,7 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
}
table_attr->attr.size = table_header->length;
- table_attr->attr.read = acpi_table_show;
+ table_attr->attr.read_new = acpi_table_show;
table_attr->attr.attr.name = table_attr->filename;
table_attr->attr.attr.mode = 0400;
@@ -412,7 +412,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
}
static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
@@ -495,7 +495,7 @@ static int acpi_table_data_init(struct acpi_table_header *th)
if (!data_attr)
return -ENOMEM;
sysfs_attr_init(&data_attr->attr.attr);
- data_attr->attr.read = acpi_data_show;
+ data_attr->attr.read_new = acpi_data_show;
data_attr->attr.attr.mode = 0400;
return acpi_data_objs[i].fn(th, data_attr);
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 9e1b01c35070..2295abbecd14 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -56,7 +56,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_apic *)header;
pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -66,7 +66,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_x2apic *)header;
pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -160,7 +160,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_sapic *)header;
pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -183,7 +183,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
p->uid, p->base_address,
p->arm_mpidr,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -218,7 +218,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("CORE PIC (processor_id[0x%02x] core_id[0x%02x] %s)\n",
p->processor_id, p->core_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -228,7 +228,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("RISC-V INTC (acpi_uid[0x%04x] hart_id[0x%llx] %s)\n",
p->uid, p->hart_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 6de542d99518..526563a0d188 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -494,7 +494,7 @@ bool acpi_device_dep(acpi_handle target, acpi_handle match)
}
EXPORT_SYMBOL_GPL(acpi_device_dep);
-acpi_status
+bool
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
acpi_status status;
@@ -502,9 +502,8 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
union acpi_object *output;
status = acpi_evaluate_object(handle, "_PLD", NULL, &buffer);
-
if (ACPI_FAILURE(status))
- return status;
+ return false;
output = buffer.pointer;
@@ -523,7 +522,7 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
out:
kfree(buffer.pointer);
- return status;
+ return ACPI_SUCCESS(status);
}
EXPORT_SYMBOL(acpi_get_physical_device_location);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index ef353ca13c35..a4b98e95ab85 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3017,8 +3017,7 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
ktime_t t_start_time = ktime_get();
- char *secctx = NULL;
- u32 secctx_sz = 0;
+ struct lsm_context lsmctx = { };
struct list_head sgc_head;
struct list_head pf_head;
const void __user *user_buffer = (const void __user *)
@@ -3297,8 +3296,8 @@ static void binder_transaction(struct binder_proc *proc,
size_t added_size;
security_cred_getsecid(proc->cred, &secid);
- ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
- if (ret) {
+ ret = security_secid_to_secctx(secid, &lsmctx);
+ if (ret < 0) {
binder_txn_error("%d:%d failed to get security context\n",
thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
@@ -3306,7 +3305,7 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
- added_size = ALIGN(secctx_sz, sizeof(u64));
+ added_size = ALIGN(lsmctx.len, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
@@ -3340,23 +3339,23 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
- if (secctx) {
+ if (lsmctx.context) {
int err;
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
t->security_ctx = t->buffer->user_data + buf_offset;
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
- secctx, secctx_sz);
+ lsmctx.context, lsmctx.len);
if (err) {
t->security_ctx = 0;
WARN_ON(1);
}
- security_release_secctx(secctx, secctx_sz);
- secctx = NULL;
+ security_release_secctx(&lsmctx);
+ lsmctx.context = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
@@ -3400,7 +3399,7 @@ static void binder_transaction(struct binder_proc *proc,
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -3779,8 +3778,8 @@ err_copy_data_failed:
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
- if (secctx)
- security_release_secctx(secctx, secctx_sz);
+ if (lsmctx.context)
+ security_release_secctx(&lsmctx);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8f40f75ba08c..06781bdde0d2 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -396,7 +396,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
.shost_groups = ahci_shost_groups, \
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .tag_alloc_policy_rr = true, \
.device_configure = ata_scsi_device_configure
extern struct ata_port_operations ahci_ops;
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index f2f36e55a1f4..4b01bb6880b0 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -935,7 +935,7 @@ static const struct scsi_host_template pata_macio_sht = {
.device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static struct ata_port_operations pata_macio_ops = {
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b8f363370e1a..21c72650f9cc 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -672,7 +672,7 @@ static const struct scsi_host_template mv6_sht = {
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
.device_configure = ata_scsi_device_configure
};
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 36d99043ef50..823cce5ea1e9 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -385,7 +385,7 @@ static const struct scsi_host_template nv_adma_sht = {
.device_configure = nv_adma_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static const struct scsi_host_template nv_swncq_sht = {
@@ -396,7 +396,7 @@ static const struct scsi_host_template nv_swncq_sht = {
.device_configure = nv_swncq_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
/*
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 72c03cbdaff4..935b13e79dec 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -378,7 +378,6 @@ static const struct scsi_host_template sil24_sht = {
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
- .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.device_configure = ata_scsi_device_configure
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index a802678a6f74..32e1863ef4b2 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -36,7 +36,6 @@ struct img_ascii_lcd_config {
* @base: the base address of the LCD registers
* @regmap: the regmap through which LCD registers are accessed
* @offset: the offset within regmap to the start of the LCD registers
- * @cfg: pointer to the LCD model configuration
*/
struct img_ascii_lcd_ctx {
struct linedisp linedisp;
@@ -45,7 +44,6 @@ struct img_ascii_lcd_ctx {
struct regmap *regmap;
};
u32 offset;
- const struct img_ascii_lcd_config *cfg;
};
/*
@@ -71,7 +69,7 @@ static void boston_update(struct linedisp *linedisp)
#endif
}
-static struct img_ascii_lcd_config boston_config = {
+static const struct img_ascii_lcd_config boston_config = {
.num_chars = 8,
.ops = {
.update = boston_update,
@@ -100,7 +98,7 @@ static void malta_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config malta_config = {
+static const struct img_ascii_lcd_config malta_config = {
.num_chars = 8,
.external_regmap = true,
.ops = {
@@ -202,7 +200,7 @@ static void sead3_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config sead3_config = {
+static const struct img_ascii_lcd_config sead3_config = {
.num_chars = 16,
.external_regmap = true,
.ops = {
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
index 951819e71b4a..5db06e825c94 100644
--- a/drivers/base/physical_location.c
+++ b/drivers/base/physical_location.c
@@ -13,13 +13,11 @@
bool dev_add_physical_location(struct device *dev)
{
struct acpi_pld_info *pld;
- acpi_status status;
if (!has_acpi_companion(dev))
return false;
- status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld))
return false;
dev->physical_location =
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 4a67e83300e1..cbc9a7a75def 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -496,6 +496,7 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
+ bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
@@ -512,11 +513,23 @@ struct dpm_watchdog {
static void dpm_watchdog_handler(struct timer_list *t)
{
struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct timer_list *timer = &wd->timer;
+ unsigned int time_left;
+
+ if (wd->fatal) {
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+ }
+
+ time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
+ dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
+ CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
+ show_stack(wd->tsk, NULL, KERN_WARNING);
- dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL, KERN_EMERG);
- panic("%s %s: unrecoverable failure\n",
- dev_driver_string(wd->dev), dev_name(wd->dev));
+ wd->fatal = true;
+ mod_timer(timer, jiffies + HZ * time_left);
}
/**
@@ -530,10 +543,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
+ wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
- timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@@ -914,7 +928,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
if (dev->power.direct_complete) {
- /* Match the pm_runtime_disable() in __device_suspend(). */
+ /* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index f8163b559bf9..f84018125b46 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 5a5a9e978e85..8aa28c08b289 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+static void devm_pm_clear_wake_irq(void *dev)
+{
+ dev_pm_clear_wake_irq(dev);
+}
+
+/**
+ * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ *
+ * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
+ * but the device will be auto clear wake capability on driver detach.
+ */
+int devm_pm_set_wake_irq(struct device *dev, int irq)
+{
+ int ret;
+
+ ret = dev_pm_set_wake_irq(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
+
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 837d77e3af2b..c1392743df9c 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -71,6 +71,44 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_property_present);
/**
+ * device_property_read_bool - Return the value for a boolean property of a device
+ * @dev: Device whose property is being checked
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the device firmware description.
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
+ */
+bool device_property_read_bool(const struct device *dev, const char *propname)
+{
+ return fwnode_property_read_bool(dev_fwnode(dev), propname);
+}
+EXPORT_SYMBOL_GPL(device_property_read_bool);
+
+/**
+ * fwnode_property_read_bool - Return the value for a boolean property of a firmware node
+ * @fwnode: Firmware node whose property to check
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the firmware description.
+ */
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ bool ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
+ ret = fwnode_call_bool_op(fwnode, property_read_bool, propname);
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_read_bool, propname);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_bool);
+
+/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 23da7b31d715..2319c30283a6 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -73,8 +73,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
rcu_read_unlock();
- entry = kmalloc((last - index + 1) * sizeof(unsigned long),
- map->alloc_flags);
+ entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -204,7 +203,7 @@ static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
* overheads.
*/
if (max - min > 1 && regmap_can_raw_write(map)) {
- buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
+ buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
if (!buf) {
ret = -ENOMEM;
goto out;
@@ -320,7 +319,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
unsigned long *entry;
int i, ret;
- entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
+ entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 188438186589..a9d17f316e55 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -275,18 +275,16 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
pos = (reg - base_reg) / map->reg_stride;
offset = (rbnode->base_reg - base_reg) / map->reg_stride;
- blk = krealloc(rbnode->block,
- blklen * map->cache_word_size,
- map->alloc_flags);
+ blk = krealloc_array(rbnode->block, blklen, map->cache_word_size, map->alloc_flags);
if (!blk)
return -ENOMEM;
rbnode->block = blk;
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present),
- map->alloc_flags);
+ present = krealloc_array(rbnode->cache_present,
+ BITS_TO_LONGS(blklen), sizeof(*present),
+ map->alloc_flags);
if (!present)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d3659ba3cc11..b1f8508c3966 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -154,7 +154,7 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->num_reg_defaults = config->num_reg_defaults;
map->num_reg_defaults_raw = config->num_reg_defaults_raw;
map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_word_size = BITS_TO_BYTES(config->val_bits);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
map->cache = NULL;
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index c99eada83780..86644bbd0710 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -1,45 +1,187 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2020 Intel Corporation.
+#include <linux/bits.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
+#include <sound/sdca_function.h>
#include "internal.h"
+struct regmap_mbq_context {
+ struct device *dev;
+
+ struct regmap_sdw_mbq_cfg cfg;
+
+ int val_size;
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+};
+
+static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ int size = ctx->val_size;
+
+ if (ctx->cfg.mbq_size) {
+ size = ctx->cfg.mbq_size(ctx->dev, reg);
+ if (!size || size > ctx->val_size)
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ if (ctx->cfg.deferrable)
+ return ctx->cfg.deferrable(ctx->dev, reg);
+
+ return false;
+}
+
+static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
+ struct regmap_mbq_context *ctx)
+{
+ struct device *dev = &slave->dev;
+ int val, ret = 0;
+
+ dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
+
+ reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
+ SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
+
+ if (ctx->readable_reg(dev, reg)) {
+ ret = read_poll_timeout(sdw_read_no_pm, val,
+ val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
+ ctx->cfg.timeout_us, ctx->cfg.retry_us,
+ false, slave, reg);
+ if (val < 0)
+ return val;
+ if (ret)
+ dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
+ } else {
+ fsleep(ctx->cfg.timeout_us);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int val,
+ int mbq_size, bool deferrable)
+{
+ int shift = mbq_size * BITS_PER_BYTE;
+ int ret;
+
+ while (--mbq_size > 0) {
+ shift -= BITS_PER_BYTE;
+
+ ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
+ (val >> shift) & 0xff);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = sdw_write_no_pm(slave, reg, val & 0xff);
+ if (deferrable && ret == -ENODATA)
+ return -EAGAIN;
+
+ return ret;
+}
+
static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
- ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
- if (ret < 0)
- return ret;
+ if (mbq_size < 0)
+ return mbq_size;
+
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int *val,
+ int mbq_size, bool deferrable)
+{
+ int shift = BITS_PER_BYTE;
+ int read;
+
+ read = sdw_read_no_pm(slave, reg);
+ if (read < 0) {
+ if (deferrable && read == -ENODATA)
+ return -EAGAIN;
+
+ return read;
+ }
+
+ *val = read;
+
+ while (--mbq_size > 0) {
+ read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
+ if (read < 0)
+ return read;
+
+ *val |= read << shift;
+ shift += BITS_PER_BYTE;
+ }
- return sdw_write_no_pm(slave, reg, val & 0xff);
+ return 0;
}
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- int read0;
- int read1;
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
+ int ret;
- read0 = sdw_read_no_pm(slave, reg);
- if (read0 < 0)
- return read0;
+ if (mbq_size < 0)
+ return mbq_size;
- read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
- if (read1 < 0)
- return read1;
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
- *val = (read1 << 8) | read0;
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
+ }
- return 0;
+ return ret;
}
static const struct regmap_bus regmap_sdw_mbq = {
@@ -51,8 +193,7 @@ static const struct regmap_bus regmap_sdw_mbq = {
static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
{
- /* MBQ-based controls are only 16-bits for now */
- if (config->val_bits != 16)
+ if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
return -ENOTSUPP;
/* Registers are 32 bits wide */
@@ -65,35 +206,69 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
return 0;
}
+static struct regmap_mbq_context *
+regmap_sdw_mbq_gen_context(struct device *dev,
+ const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config)
+{
+ struct regmap_mbq_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+
+ if (mbq_config)
+ ctx->cfg = *mbq_config;
+
+ ctx->val_size = config->val_bits / BITS_PER_BYTE;
+ ctx->readable_reg = config->readable_reg;
+
+ return ctx;
+}
+
struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 5962ea1230a1..f2843f814675 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -769,14 +769,13 @@ struct regmap *__regmap_init(struct device *dev,
map->alloc_flags = GFP_KERNEL;
map->reg_base = config->reg_base;
+ map->reg_shift = config->pad_bits % 8;
- map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.reg_shift = config->reg_shift;
- map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
- config->val_bits + config->pad_bits, 8);
- map->reg_shift = config->pad_bits % 8;
+ map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
+ map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
+ map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
if (config->reg_stride)
map->reg_stride = config->reg_stride;
else
@@ -3116,7 +3115,7 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
EXPORT_SYMBOL_GPL(regmap_fields_read);
static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
- unsigned int *regs, void *val, size_t val_count)
+ const unsigned int *regs, void *val, size_t val_count)
{
u32 *u32 = val;
u16 *u16 = val;
@@ -3210,7 +3209,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read);
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val,
+int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
size_t val_count)
{
if (val_count == 0)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index eb6eb25b343b..b1726a3515f6 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -677,6 +677,7 @@ static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
.property_present = software_node_property_present,
+ .property_read_bool = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
.get_name = software_node_get_name,
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 49ced65bef4c..9edd4468f755 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1819,7 +1819,6 @@ static int fd_alloc_drive(int drive)
unit[drive].tag_set.nr_maps = 1;
unit[drive].tag_set.queue_depth = 2;
unit[drive].tag_set.numa_node = NUMA_NO_NODE;
- unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
goto out_cleanup_trackbuf;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2028795ec61c..00b74a845328 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -368,7 +368,6 @@ aoeblk_gdalloc(void *vp)
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(set);
if (err) {
pr_err("aoe: cannot allocate tag set for %ld.%d\n",
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4ba98c6654be..110f9aca2667 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -2088,7 +2088,6 @@ static int __init atari_floppy_init (void)
unit[i].tag_set.nr_maps = 1;
unit[i].tag_set.queue_depth = 2;
unit[i].tag_set.numa_node = NUMA_NO_NODE;
- unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
if (ret)
goto err;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3affb538b989..abf0486f0d4f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4596,7 +4596,6 @@ static int __init do_floppy_init(void)
tag_sets[drive].nr_maps = 1;
tag_sets[drive].queue_depth = 2;
tag_sets[drive].numa_node = NUMA_NO_NODE;
- tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(&tag_sets[drive]);
if (err)
goto out_put_disk;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 8f6761c27c68..1ec7417c7f00 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,7 +68,6 @@ struct loop_device {
struct list_head idle_worker_list;
struct rb_root worker_tree;
struct timer_list timer;
- bool use_dio;
bool sysfs_inited;
struct request_queue *lo_queue;
@@ -182,41 +181,44 @@ static bool lo_bdev_can_use_dio(struct loop_device *lo,
return true;
}
-static void __loop_update_dio(struct loop_device *lo, bool dio)
+static bool lo_can_use_dio(struct loop_device *lo)
{
- struct file *file = lo->lo_backing_file;
- struct inode *inode = file->f_mapping->host;
- struct block_device *backing_bdev = NULL;
- bool use_dio;
+ struct inode *inode = lo->lo_backing_file->f_mapping->host;
- if (S_ISBLK(inode->i_mode))
- backing_bdev = I_BDEV(inode);
- else if (inode->i_sb->s_bdev)
- backing_bdev = inode->i_sb->s_bdev;
+ if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
+ return false;
- use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
- (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
+ if (S_ISBLK(inode->i_mode))
+ return lo_bdev_can_use_dio(lo, I_BDEV(inode));
+ if (inode->i_sb->s_bdev)
+ return lo_bdev_can_use_dio(lo, inode->i_sb->s_bdev);
+ return true;
+}
- if (lo->use_dio == use_dio)
- return;
+/*
+ * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by
+ * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently
+ * disabled when the device block size is too small or the offset is unaligned.
+ *
+ * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and
+ * not the originally passed in one.
+ */
+static inline void loop_update_dio(struct loop_device *lo)
+{
+ bool dio_in_use = lo->lo_flags & LO_FLAGS_DIRECT_IO;
- /* flush dirty pages before changing direct IO */
- vfs_fsync(file, 0);
+ lockdep_assert_held(&lo->lo_mutex);
+ WARN_ON_ONCE(lo->lo_state == Lo_bound &&
+ lo->lo_queue->mq_freeze_depth == 0);
- /*
- * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
- * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
- * will get updated by ioctl(LOOP_GET_STATUS)
- */
- if (lo->lo_state == Lo_bound)
- blk_mq_freeze_queue(lo->lo_queue);
- lo->use_dio = use_dio;
- if (use_dio)
+ if (lo->lo_backing_file->f_flags & O_DIRECT)
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- else
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
- if (lo->lo_state == Lo_bound)
- blk_mq_unfreeze_queue(lo->lo_queue);
+
+ /* flush dirty pages before starting to issue direct I/O */
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !dio_in_use)
+ vfs_fsync(lo->lo_backing_file, 0);
}
/**
@@ -311,6 +313,13 @@ static void loop_clear_limits(struct loop_device *lo, int mode)
lim.discard_granularity = 0;
}
+ /*
+ * XXX: this updates the queue limits without freezing the queue, which
+ * is against the locking protocol and dangerous. But we can't just
+ * freeze the queue as we're inside the ->queue_rq method here. So this
+ * should move out into a workqueue unless we get the file operations to
+ * advertise if they support specific fallocate operations.
+ */
queue_limits_commit_update(lo->lo_queue, &lim);
}
@@ -520,12 +529,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
}
}
-static inline void loop_update_dio(struct loop_device *lo)
-{
- __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
- lo->use_dio);
-}
-
static void loop_reread_partitions(struct loop_device *lo)
{
int rc;
@@ -964,7 +967,6 @@ loop_set_status_from_info(struct loop_device *lo,
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_flags = info->lo_flags;
return 0;
}
@@ -977,12 +979,12 @@ static unsigned int loop_default_blocksize(struct loop_device *lo,
return SECTOR_SIZE;
}
-static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
+static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
+ unsigned int bsize)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct block_device *backing_bdev = NULL;
- struct queue_limits lim;
u32 granularity = 0, max_discard_sectors = 0;
if (S_ISBLK(inode->i_mode))
@@ -995,22 +997,20 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
loop_get_discard_config(lo, &granularity, &max_discard_sectors);
- lim = queue_limits_start_update(lo->lo_queue);
- lim.logical_block_size = bsize;
- lim.physical_block_size = bsize;
- lim.io_min = bsize;
- lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
+ lim->logical_block_size = bsize;
+ lim->physical_block_size = bsize;
+ lim->io_min = bsize;
+ lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
- lim.features |= BLK_FEAT_WRITE_CACHE;
+ lim->features |= BLK_FEAT_WRITE_CACHE;
if (backing_bdev && !bdev_nonrot(backing_bdev))
- lim.features |= BLK_FEAT_ROTATIONAL;
- lim.max_hw_discard_sectors = max_discard_sectors;
- lim.max_write_zeroes_sectors = max_discard_sectors;
+ lim->features |= BLK_FEAT_ROTATIONAL;
+ lim->max_hw_discard_sectors = max_discard_sectors;
+ lim->max_write_zeroes_sectors = max_discard_sectors;
if (max_discard_sectors)
- lim.discard_granularity = granularity;
+ lim->discard_granularity = granularity;
else
- lim.discard_granularity = 0;
- return queue_limits_commit_update(lo->lo_queue, &lim);
+ lim->discard_granularity = 0;
}
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
@@ -1019,6 +1019,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
{
struct file *file = fget(config->fd);
struct address_space *mapping;
+ struct queue_limits lim;
int error;
loff_t size;
bool partscan;
@@ -1063,6 +1064,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
error = loop_set_status_from_info(lo, &config->info);
if (error)
goto out_unlock;
+ lo->lo_flags = config->info.lo_flags;
if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
!file->f_op->write_iter)
@@ -1084,13 +1086,15 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- error = loop_reconfigure_limits(lo, config->block_size);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, config->block_size);
+ /* No need to freeze the queue as the device isn't bound yet. */
+ error = queue_limits_commit_update(lo->lo_queue, &lim);
if (error)
goto out_unlock;
@@ -1150,7 +1154,12 @@ static void __loop_clr_fd(struct loop_device *lo)
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- /* reset the block size to the default */
+ /*
+ * Reset the block size to the default.
+ *
+ * No queue freezing needed because this is called from the final
+ * ->release call only, so there can't be any outstanding I/O.
+ */
lim = queue_limits_start_update(lo->lo_queue);
lim.logical_block_size = SECTOR_SIZE;
lim.physical_block_size = SECTOR_SIZE;
@@ -1244,7 +1253,6 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
@@ -1263,21 +1271,19 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
invalidate_bdev(lo->lo_device);
}
- /* I/O need to be drained during transfer transition */
+ /* I/O needs to be drained before changing lo_offset or lo_sizelimit */
blk_mq_freeze_queue(lo->lo_queue);
- prev_lo_flags = lo->lo_flags;
-
err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
- /* Mask out flags that can't be set using LOOP_SET_STATUS. */
- lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For those flags, use the previous values instead */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For flags that can't be cleared, use previous values too */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) &&
+ (info->lo_flags & LO_FLAGS_PARTSCAN);
+
+ lo->lo_flags &= ~(LOOP_SET_STATUS_SETTABLE_FLAGS |
+ LOOP_SET_STATUS_CLEARABLE_FLAGS);
+ lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
if (size_changed) {
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
@@ -1285,17 +1291,13 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_set_size(lo, new_size);
}
- /* update dio if lo_offset or transfer is changed */
- __loop_update_dio(lo, lo->use_dio);
+ /* update the direct I/O flag if lo_offset changed */
+ loop_update_dio(lo);
out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
-
- if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
- !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
+ if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- partscan = true;
- }
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
@@ -1444,20 +1446,32 @@ static int loop_set_capacity(struct loop_device *lo)
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
{
- int error = -ENXIO;
- if (lo->lo_state != Lo_bound)
- goto out;
+ bool use_dio = !!arg;
- __loop_update_dio(lo, !!arg);
- if (lo->use_dio == !!arg)
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+ if (use_dio == !!(lo->lo_flags & LO_FLAGS_DIRECT_IO))
return 0;
- error = -EINVAL;
- out:
- return error;
+
+ if (use_dio) {
+ if (!lo_can_use_dio(lo))
+ return -EINVAL;
+ /* flush dirty pages before starting to use direct I/O */
+ vfs_fsync(lo->lo_backing_file, 0);
+ }
+
+ blk_mq_freeze_queue(lo->lo_queue);
+ if (use_dio)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ else
+ lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+ blk_mq_unfreeze_queue(lo->lo_queue);
+ return 0;
}
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
+ struct queue_limits lim;
int err = 0;
if (lo->lo_state != Lo_bound)
@@ -1469,8 +1483,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, arg);
+
blk_mq_freeze_queue(lo->lo_queue);
- err = loop_reconfigure_limits(lo, arg);
+ err = queue_limits_commit_update(lo->lo_queue, &lim);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
@@ -1854,7 +1871,7 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->use_aio = false;
break;
default:
- cmd->use_aio = lo->use_dio;
+ cmd->use_aio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
break;
}
@@ -2023,8 +2040,7 @@ static int loop_add(int i)
lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
- lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
- BLK_MQ_F_NO_SCHED_BY_DEFAULT;
+ lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 43701b7b10a7..95361099a2dc 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3416,7 +3416,6 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->tags.reserved_tags = 1;
dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd->tags.numa_node = dd->numa_node;
- dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b852050d8a96..b63a0f29a54a 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -62,6 +62,7 @@ struct nbd_sock {
bool dead;
int fallback_index;
int cookie;
+ struct work_struct work;
};
struct recv_thread_args {
@@ -141,6 +142,9 @@ struct nbd_device {
*/
#define NBD_CMD_INFLIGHT 2
+/* Just part of request header or data payload is sent successfully */
+#define NBD_CMD_PARTIAL_SEND 3
+
struct nbd_cmd {
struct nbd_device *nbd;
struct mutex lock;
@@ -327,8 +331,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize)
{
struct queue_limits lim;
int error;
@@ -368,7 +371,7 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim.logical_block_size = blksize;
lim.physical_block_size = blksize;
- error = queue_limits_commit_update(nbd->disk->queue, &lim);
+ error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim);
if (error)
return error;
@@ -379,18 +382,6 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
return 0;
}
-static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
-{
- int error;
-
- blk_mq_freeze_queue(nbd->disk->queue);
- error = __nbd_set_size(nbd, bytesize, blksize);
- blk_mq_unfreeze_queue(nbd->disk->queue);
-
- return error;
-}
-
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -466,6 +457,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
+ /* partial send is handled in nbd_sock's work function */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return BLK_EH_RESET_TIMER;
+ }
+
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
@@ -615,6 +612,30 @@ static inline int was_interrupted(int result)
}
/*
+ * We've already sent header or part of data payload, have no choice but
+ * to set pending and schedule it in work.
+ *
+ * And we have to return BLK_STS_OK to block core, otherwise this same
+ * request may be re-dispatched with different tag, but our header has
+ * been sent out with old tag, and this way does confuse reply handling.
+ */
+static void nbd_sched_pending_work(struct nbd_device *nbd,
+ struct nbd_sock *nsock,
+ struct nbd_cmd *cmd, int sent)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ /* pending work should be scheduled only once */
+ WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags));
+
+ nsock->pending = req;
+ nsock->sent = sent;
+ set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+ refcount_inc(&nbd->config_refs);
+ schedule_work(&nsock->work);
+}
+
+/*
* Returns BLK_STS_RESOURCE if the caller should retry after a delay.
* Returns BLK_STS_IOERR if sending failed.
*/
@@ -699,8 +720,8 @@ static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
* completely done.
*/
if (sent) {
- nsock->pending = req;
- nsock->sent = sent;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
@@ -737,14 +758,8 @@ send_pages:
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result < 0) {
if (was_interrupted(result)) {
- /* We've already sent the header, we
- * have no choice but to set pending and
- * return BUSY.
- */
- nsock->pending = req;
- nsock->sent = sent;
- set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -770,6 +785,14 @@ out:
return BLK_STS_OK;
requeue:
+ /*
+ * Can't requeue in case we are dealing with partial send
+ *
+ * We must run from pending work function.
+ * */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))
+ return BLK_STS_OK;
+
/* retry on a different socket */
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
@@ -778,6 +801,44 @@ requeue:
return BLK_STS_OK;
}
+/* handle partial sending */
+static void nbd_pending_cmd_work(struct work_struct *work)
+{
+ struct nbd_sock *nsock = container_of(work, struct nbd_sock, work);
+ struct request *req = nsock->pending;
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_device *nbd = cmd->nbd;
+ unsigned long deadline = READ_ONCE(req->deadline);
+ unsigned int wait_ms = 2;
+
+ mutex_lock(&cmd->lock);
+
+ WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags));
+ if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)))
+ goto out;
+
+ mutex_lock(&nsock->tx_lock);
+ while (true) {
+ nbd_send_cmd(nbd, cmd, cmd->index);
+ if (!nsock->pending)
+ break;
+
+ /* don't bother timeout handler for partial sending */
+ if (READ_ONCE(jiffies) + msecs_to_jiffies(wait_ms) >= deadline) {
+ cmd->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
+ break;
+ }
+ msleep(wait_ms);
+ wait_ms *= 2;
+ }
+ mutex_unlock(&nsock->tx_lock);
+ clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+out:
+ mutex_unlock(&cmd->lock);
+ nbd_config_put(nbd);
+}
+
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply)
{
@@ -1224,6 +1285,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->pending = NULL;
nsock->sent = 0;
nsock->cookie = 0;
+ INIT_WORK(&nsock->work, nbd_pending_cmd_work);
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
blk_mq_unfreeze_queue(nbd->disk->queue);
@@ -1841,8 +1903,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd->tag_set.queue_depth = 128;
nbd->tag_set.numa_node = NUMA_NO_NODE;
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_BLOCKING;
+ nbd->tag_set.flags = BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
nbd->backend = NULL;
@@ -2180,6 +2241,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
+ clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
mutex_unlock(&nbd->config_lock);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 3c3d8d200abb..d94ef37480bd 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -266,6 +266,10 @@ static bool g_zone_full;
module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false");
+static bool g_rotational;
+module_param_named(rotational, g_rotational, bool, S_IRUGO);
+MODULE_PARM_DESC(rotational, "Set the rotational feature for the device. Default: false");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -468,6 +472,7 @@ NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
NULLB_DEVICE_ATTR(fua, bool, NULL);
+NULLB_DEVICE_ATTR(rotational, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -621,6 +626,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_shared_tags,
&nullb_device_attr_shared_tag_bitmap,
&nullb_device_attr_fua,
+ &nullb_device_attr_rotational,
NULL,
};
@@ -706,7 +712,8 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"shared_tags,size,submit_queues,use_per_node_hctx,"
"virt_boundary,zoned,zone_capacity,zone_max_active,"
"zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors,zone_full\n");
+ "zone_size,zone_append_max_sectors,zone_full,"
+ "rotational\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -793,6 +800,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
dev->fua = g_fua;
+ dev->rotational = g_rotational;
return dev;
}
@@ -899,7 +907,7 @@ static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
if (radix_tree_insert(root, idx, t_page)) {
null_free_page(t_page);
t_page = radix_tree_lookup(root, idx);
- WARN_ON(!t_page || t_page->page->index != idx);
+ WARN_ON(!t_page || t_page->page->private != idx);
} else if (is_cache)
nullb->dev->curr_cache += PAGE_SIZE;
@@ -922,7 +930,7 @@ static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
(void **)t_pages, pos, FREE_BATCH);
for (i = 0; i < nr_pages; i++) {
- pos = t_pages[i]->page->index;
+ pos = t_pages[i]->page->private;
ret = radix_tree_delete_item(root, pos, t_pages[i]);
WARN_ON(ret != t_pages[i]);
null_free_page(ret);
@@ -948,7 +956,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
t_page = radix_tree_lookup(root, idx);
- WARN_ON(t_page && t_page->page->index != idx);
+ WARN_ON(t_page && t_page->page->private != idx);
if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
return t_page;
@@ -991,7 +999,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_lock_irq(&nullb->lock);
idx = sector >> PAGE_SECTORS_SHIFT;
- t_page->page->index = idx;
+ t_page->page->private = idx;
t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
radix_tree_preload_end();
@@ -1011,7 +1019,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
struct nullb_page *t_page, *ret;
void *dst, *src;
- idx = c_page->page->index;
+ idx = c_page->page->private;
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
@@ -1070,7 +1078,7 @@ again:
* avoid race, we don't allow page free
*/
for (i = 0; i < nr_pages; i++) {
- nullb->cache_flush_pos = c_pages[i]->page->index;
+ nullb->cache_flush_pos = c_pages[i]->page->private;
/*
* We found the page which is being flushed to disk by other
* threads
@@ -1783,9 +1791,8 @@ static int null_init_global_tag_set(void)
tag_set.nr_hw_queues = g_submit_queues;
tag_set.queue_depth = g_hw_queue_depth;
tag_set.numa_node = g_home_node;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (g_no_sched)
- tag_set.flags |= BLK_MQ_F_NO_SCHED;
+ tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (g_shared_tag_bitmap)
tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (g_blocking)
@@ -1809,9 +1816,8 @@ static int null_setup_tagset(struct nullb *nullb)
nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
nullb->tag_set->numa_node = nullb->dev->home_node;
- nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
if (nullb->dev->no_sched)
- nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
+ nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (nullb->dev->shared_tag_bitmap)
nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (nullb->dev->blocking)
@@ -1938,6 +1944,9 @@ static int null_add_dev(struct nullb_device *dev)
lim.features |= BLK_FEAT_FUA;
}
+ if (dev->rotational)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index a7bb32f73ec3..6f9fe6171087 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -107,6 +107,7 @@ struct nullb_device {
bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
bool fua; /* Support FUA */
+ bool rotational; /* Fake rotational device */
};
struct nullb {
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index ff45ed766469..dc9e4a14b885 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -384,9 +384,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
unsigned int devidx;
struct queue_limits lim = {
.logical_block_size = dev->blk_size,
- .max_hw_sectors = dev->bounce_size >> 9,
+ .max_hw_sectors = BOUNCE_SIZE >> 9,
.max_segments = -1,
- .max_segment_size = dev->bounce_size,
+ .max_segment_size = BOUNCE_SIZE,
.dma_alignment = dev->blk_size - 1,
.features = BLK_FEAT_WRITE_CACHE |
BLK_FEAT_ROTATIONAL,
@@ -434,8 +434,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
- error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
+ error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 0);
if (error)
goto fail_teardown;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index ac421dbeeb11..5b393e4a1ddf 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4964,7 +4964,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->tag_set.ops = &rbd_mq_ops;
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
- rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index c34695d2eea7..82467ecde7ec 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1209,8 +1209,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->ops = &rnbd_mq_ops;
tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE;
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_TAG_QUEUE_SHARED;
+ tag_set->flags = BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
/* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 08ce6d96d04c..2ee6e9bd4e28 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -167,7 +167,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
- bio_set_prio(bio, prio);
+ bio->bi_ioprio = prio;
submit_bio(bio);
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
index 9cca05dcf772..ddf3629d8894 100644
--- a/drivers/block/rnull.rs
+++ b/drivers/block/rnull.rs
@@ -32,25 +32,31 @@ module! {
license: "GPL v2",
}
+#[pin_data]
struct NullBlkModule {
- _disk: Pin<KBox<Mutex<GenDisk<NullBlkDevice>>>>,
+ #[pin]
+ _disk: Mutex<GenDisk<NullBlkDevice>>,
}
-impl kernel::Module for NullBlkModule {
- fn init(_module: &'static ThisModule) -> Result<Self> {
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
pr_info!("Rust null_blk loaded\n");
- let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
- let disk = gen_disk::GenDiskBuilder::new()
- .capacity_sectors(4096 << 11)
- .logical_block_size(4096)?
- .physical_block_size(4096)?
- .rotational(false)
- .build(format_args!("rnullb{}", 0), tagset)?;
+ // Use a immediately-called closure as a stable `try` block
+ let disk = /* try */ (|| {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
- let disk = KBox::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?;
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(4096 << 11)
+ .logical_block_size(4096)?
+ .physical_block_size(4096)?
+ .rotational(false)
+ .build(format_args!("rnullb{}", 0), tagset)
+ })();
- Ok(Self { _disk: disk })
+ try_pin_init!(Self {
+ _disk <- new_mutex!(disk?, "nullb:disk"),
+ })
}
}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 2d38331ee667..88dcae6ec575 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -829,7 +829,7 @@ static int probe_disk(struct vdc_port *port)
}
err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
- VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
+ VDC_TX_RING_SIZE, 0);
if (err)
return err;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index be4ac58afe41..eda33c5eb5e2 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -818,7 +818,7 @@ static int swim_floppy_init(struct swim_priv *swd)
for (drive = 0; drive < swd->floppy_count; drive++) {
err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
- &swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE);
+ &swim_mq_ops, 2, 0);
if (err)
goto exit_put_disks;
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 90be1017f7bf..9914153b365b 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1208,8 +1208,7 @@ static int swim3_attach(struct macio_dev *mdev,
fs = &floppy_states[floppy_count];
memset(fs, 0, sizeof(*fs));
- rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 0);
if (rc)
goto out_unregister;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 934ab9332c80..529085181f35 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -2213,7 +2213,6 @@ static int ublk_add_tag_set(struct ublk_device *ub)
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
- ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3efe378f1386..bbaa26b523b8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -13,7 +13,6 @@
#include <linux/string_helpers.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
@@ -1106,9 +1105,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
lim.features |= BLK_FEAT_WRITE_CACHE;
else
lim.features &= ~BLK_FEAT_WRITE_CACHE;
- blk_mq_freeze_queue(disk->queue);
- i = queue_limits_commit_update(disk->queue, &lim);
- blk_mq_unfreeze_queue(disk->queue);
+ i = queue_limits_commit_update_frozen(disk->queue, &lim);
if (i)
return i;
return count;
@@ -1181,7 +1178,8 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(&set->map[i]);
else
- blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ blk_mq_map_hw_queues(&set->map[i],
+ &vblk->vdev->dev, 0);
}
}
@@ -1481,7 +1479,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 59ce113b882a..edcd08a9dcef 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1131,7 +1131,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
} else
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
- info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 4b7219be1bb8..8c1c7f4211eb 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -354,7 +354,6 @@ static int __init z2_init(void)
tag_set.nr_maps = 1;
tag_set.queue_depth = 16;
tag_set.numa_node = NUMA_NO_NODE;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&tag_set);
if (ret)
goto out_unregister_blkdev;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 45df5eeabc5e..7903a4da40ac 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1468,6 +1468,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
if (!zram->mem_pool) {
vfree(zram->table);
+ zram->table = NULL;
return false;
}
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index a1153ada74d2..0a60660fc8ce 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -553,6 +553,9 @@ static const char *btbcm_get_board_name(struct device *dev)
/* get rid of any '/' in the compatible string */
board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!board_type)
+ return NULL;
+
strreplace(board_type, '/', '-');
return board_type;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index d496cf2c3411..d2540b28bc7a 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <linux/unaligned.h>
@@ -506,13 +507,13 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
bt_dev_info(hdev, "Secure boot is %s",
- version->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(version->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- version->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- version->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- version->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
version->min_fw_build_nn, version->min_fw_build_cw,
2000 + version->min_fw_build_yy);
@@ -927,16 +928,16 @@ int btintel_read_boot_params(struct hci_dev *hdev,
le16_to_cpu(params->dev_revid));
bt_dev_info(hdev, "Secure boot is %s",
- params->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(params->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- params->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- params->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- params->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
params->min_fw_build_nn, params->min_fw_build_cw,
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 18f34998a120..e26b07a9387d 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/string_choices.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <linux/mmc/sdio_func.h>
@@ -88,7 +89,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
else
adapter->psmode = 0;
BT_DBG("PS Mode:%s",
- (adapter->psmode) ? "Enable" : "Disable");
+ str_enable_disable(adapter->psmode));
} else {
BT_DBG("PS Mode command failed");
}
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 224eafc27dbe..68846c5bd4f7 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -1329,7 +1329,6 @@ int btmtk_usb_setup(struct hci_dev *hdev)
fwname = FIRMWARE_MT7668;
break;
case 0x7922:
- case 0x7961:
case 0x7925:
/* Reset the device to ensure it's in the initial state before
* downloading the firmware to ensure.
@@ -1337,7 +1336,8 @@ int btmtk_usb_setup(struct hci_dev *hdev)
if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags))
btmtk_usb_subsys_reset(hdev, dev_id);
-
+ fallthrough;
+ case 0x7961:
btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
fw_version, fw_flavor);
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index a1dfcfe43d3a..bd5464bde174 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1249,7 +1249,7 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
-static void btmtksdio_cmd_timeout(struct hci_dev *hdev)
+static void btmtksdio_reset(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
u32 status;
@@ -1360,7 +1360,7 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->open = btmtksdio_open;
hdev->close = btmtksdio_close;
- hdev->cmd_timeout = btmtksdio_cmd_timeout;
+ hdev->reset = btmtksdio_reset;
hdev->flush = btmtksdio_flush;
hdev->setup = btmtksdio_setup;
hdev->shutdown = btmtksdio_shutdown;
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index dfbbac92242a..cdf09d9a9ad2 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -272,6 +272,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+static bool qca_filename_has_extension(const char *filename)
+{
+ const char *suffix = strrchr(filename, '.');
+
+ /* File extensions require a dot, but not as the first or last character */
+ if (!suffix || suffix == filename || *(suffix + 1) == '\0')
+ return 0;
+
+ /* Avoid matching directories with names that look like files with extensions */
+ return !strchr(suffix, '/');
+}
+
+static bool qca_get_alt_nvm_file(char *filename, size_t max_size)
+{
+ char fwname[64];
+ const char *suffix;
+
+ /* nvm file name has an extension, replace with .bin */
+ if (qca_filename_has_extension(filename)) {
+ suffix = strrchr(filename, '.');
+ strscpy(fwname, filename, suffix - filename + 1);
+ snprintf(fwname + (suffix - filename),
+ sizeof(fwname) - (suffix - filename), ".bin");
+ /* If nvm file is already the default one, return false to skip the retry. */
+ if (strcmp(fwname, filename) == 0)
+ return false;
+
+ snprintf(filename, max_size, "%s", fwname);
+ return true;
+ }
+ return false;
+}
+
static int qca_tlv_check_data(struct hci_dev *hdev,
struct qca_fw_config *config,
u8 *fw_data, size_t fw_size,
@@ -564,6 +597,19 @@ static int qca_download_firmware(struct hci_dev *hdev,
config->fwname, ret);
return ret;
}
+ }
+ /* If the board-specific file is missing, try loading the default
+ * one, unless that was attempted already.
+ */
+ else if (config->type == TLV_TYPE_NVM &&
+ qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
+ ret = request_firmware(&fw, config->fwname, &hdev->dev);
+ if (ret) {
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
+ return ret;
+ }
} else {
bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
config->fwname, ret);
@@ -700,39 +746,43 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
return 0;
}
-static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
+static void qca_get_nvm_name_by_board(char *fwname, size_t max_size,
+ const char *stem, enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
const char *variant;
+ const char *prefix;
- /* hsp gf chip */
- if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
- variant = "g";
- else
- variant = "";
+ /* Set the default value to variant and prefix */
+ variant = "";
+ prefix = "b";
- if (bid == 0x0)
- snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
- else
- snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
-}
+ if (soc_type == QCA_QCA2066)
+ prefix = "";
-static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg,
- const char *stem, u8 rom_ver, u16 bid)
-{
- if (bid == 0x0)
- snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver);
- else if (bid & 0xff00)
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%x", stem, rom_ver, bid);
- else
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%02x", stem, rom_ver, bid);
+ if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) {
+ /* If the chip is manufactured by GlobalFoundries */
+ if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
+ variant = "g";
+ }
+
+ if (rom_ver != 0) {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver,
+ variant, prefix, bid);
+ } else {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid);
+ }
}
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name, const char *rampatch_name)
{
struct qca_fw_config config = {};
int err;
@@ -761,44 +811,48 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- switch (soc_type) {
- case QCA_WCN3990:
- case QCA_WCN3991:
- case QCA_WCN3998:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN3988:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/apbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA2066:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA6390:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/htbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN6750:
- /* Choose mbn file by default.If mbn file is not found
- * then choose tlv file
- */
- config.type = ELF_TYPE_PATCH;
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/msbtfw%02x.mbn", rom_ver);
- break;
- case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN7850:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hmtbtfw%02x.tlv", rom_ver);
- break;
- default:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/rampatch_%08x.bin", soc_ver);
+ if (rampatch_name) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", rampatch_name);
+ } else {
+ switch (soc_type) {
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN3988:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/apbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA2066:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA6390:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN6750:
+ /* Choose mbn file by default.If mbn file is not found
+ * then choose tlv file
+ */
+ config.type = ELF_TYPE_PATCH;
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/msbtfw%02x.mbn", rom_ver);
+ break;
+ case QCA_WCN6855:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN7850:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hmtbtfw%02x.tlv", rom_ver);
+ break;
+ default:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/rampatch_%08x.bin", soc_ver);
+ }
}
err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
@@ -816,8 +870,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/%s", firmware_name);
+ /* The firmware name has an extension, use it directly */
+ if (qca_filename_has_extension(firmware_name)) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name);
+ } else {
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ firmware_name, soc_type, ver, 0, boardid);
+ }
} else {
switch (soc_type) {
case QCA_WCN3990:
@@ -836,8 +896,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/apnv%02x.bin", rom_ver);
break;
case QCA_QCA2066:
- qca_generate_hsp_nvm_name(config.fwname,
- sizeof(config.fwname), ver, rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname,
+ sizeof(config.fwname), "hpnv", soc_type, ver,
+ rom_ver, boardid);
break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
@@ -848,13 +909,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/msnv%02x.bin", rom_ver);
break;
case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpnv%02x.bin", rom_ver);
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hpnv", soc_type, ver, rom_ver, boardid);
break;
case QCA_WCN7850:
- qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hmtnv", soc_type, ver, rom_ver, boardid);
break;
-
default:
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index bb5207d7a8c7..9d28c8800225 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -161,7 +161,7 @@ enum qca_btsoc_type {
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name);
+ const char *firmware_name, const char *rampatch_name);
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
@@ -176,7 +176,8 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name,
+ const char *rampatch_name)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 83025f457ca0..d3eba0d4a57d 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -1351,12 +1351,14 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
btrtl_set_quirks(hdev, btrtl_dev);
- hci_set_hw_info(hdev,
+ if (btrtl_dev->ic_info) {
+ hci_set_hw_info(hdev,
"RTL lmp_subver=%u hci_rev=%u hci_ver=%u hci_bus=%u",
btrtl_dev->ic_info->lmp_subver,
btrtl_dev->ic_info->hci_rev,
btrtl_dev->ic_info->hci_ver,
btrtl_dev->ic_info->hci_bus);
+ }
btrtl_free(btrtl_dev);
return ret;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 279fe6c115fa..9aa018d4f6f5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -377,6 +377,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -481,6 +483,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8851BE Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
+
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -600,6 +605,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3576), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
@@ -610,6 +617,8 @@ static const struct usb_device_id quirks_table[] = {
/* MediaTek MT7922 Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3610), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
@@ -674,6 +683,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -879,7 +890,6 @@ struct btusb_data {
int (*disconnect)(struct hci_dev *hdev);
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
- unsigned cmd_timeout_cnt;
struct qca_dump_info qca_dump;
};
@@ -906,15 +916,12 @@ static void btusb_reset(struct hci_dev *hdev)
usb_queue_reset_device(data->intf);
}
-static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
+static void btusb_intel_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
struct btintel_data *intel_data = hci_get_priv(hdev);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (intel_data->acpi_reset_method) {
if (test_and_set_bit(INTEL_ACPI_RESET_ACTIVE, intel_data->flags)) {
bt_dev_err(hdev, "acpi: last reset failed ? Not resetting again");
@@ -987,7 +994,7 @@ static inline void btusb_rtl_alloc_devcoredump(struct hci_dev *hdev,
}
}
-static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
+static void btusb_rtl_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
@@ -997,9 +1004,6 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (!reset_gpio) {
btusb_reset(hdev);
return;
@@ -1034,19 +1038,16 @@ static void btusb_rtl_hw_error(struct hci_dev *hdev, u8 code)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
}
-static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
+static void btusb_qca_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
if (test_bit(BTUSB_HW_SSR_ACTIVE, &data->flags)) {
- bt_dev_info(hdev, "Ramdump in progress, defer cmd_timeout");
+ bt_dev_info(hdev, "Ramdump in progress, defer reset");
return;
}
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (reset_gpio) {
bt_dev_err(hdev, "Reset qca device via bt_en gpio");
@@ -3645,6 +3646,32 @@ static const struct file_operations force_poll_sync_fops = {
.llseek = default_llseek,
};
+static ssize_t isoc_alt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct btusb_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", data->isoc_altsetting);
+}
+
+static ssize_t isoc_alt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct btusb_data *data = dev_get_drvdata(dev);
+ int alt;
+ int ret;
+
+ if (kstrtoint(buf, 10, &alt))
+ return -EINVAL;
+
+ ret = btusb_switch_alt_setting(data->hdev, alt);
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(isoc_alt);
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3841,7 +3868,7 @@ static int btusb_probe(struct usb_interface *intf,
/* Transport specific configuration */
hdev->send = btusb_send_frame_intel;
- hdev->cmd_timeout = btusb_intel_cmd_timeout;
+ hdev->reset = btusb_intel_reset;
if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT)
btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT);
@@ -3861,7 +3888,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->setup = btusb_mtk_setup;
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
- hdev->cmd_timeout = btmtk_reset_sync;
+ hdev->reset = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
hdev->send = btusb_send_frame_mtk;
set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
@@ -3893,7 +3920,7 @@ static int btusb_probe(struct usb_interface *intf,
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ hdev->reset = btusb_qca_reset;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
btusb_check_needs_reset_resume(intf);
}
@@ -3907,7 +3934,7 @@ static int btusb_probe(struct usb_interface *intf,
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ hdev->reset = btusb_qca_reset;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
hci_set_msft_opcode(hdev, 0xFD70);
}
@@ -3926,7 +3953,7 @@ static int btusb_probe(struct usb_interface *intf,
btrtl_set_driver_name(hdev, btusb_driver.name);
hdev->setup = btusb_setup_realtek;
hdev->shutdown = btrtl_shutdown_realtek;
- hdev->cmd_timeout = btusb_rtl_cmd_timeout;
+ hdev->reset = btusb_rtl_reset;
hdev->hw_error = btusb_rtl_hw_error;
/* Realtek devices need to set remote wakeup on auto-suspend */
@@ -4008,6 +4035,10 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc, data);
if (err < 0)
goto out_free_dev;
+
+ err = device_create_file(&intf->dev, &dev_attr_isoc_alt);
+ if (err)
+ goto out_free_dev;
}
if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) {
@@ -4054,8 +4085,10 @@ static void btusb_disconnect(struct usb_interface *intf)
hdev = data->hdev;
usb_set_intfdata(data->intf, NULL);
- if (data->isoc)
+ if (data->isoc) {
+ device_remove_file(&intf->dev, &dev_attr_isoc_alt);
usb_set_intfdata(data->isoc, NULL);
+ }
if (data->diag)
usb_set_intfdata(data->diag, NULL);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 37129e6cb0eb..0ac2168f1dc4 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -31,6 +31,7 @@
#include <linux/pwrseq/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
+#include <linux/string_choices.h>
#include <linux/mutex.h>
#include <linux/unaligned.h>
@@ -228,7 +229,7 @@ struct qca_serdev {
u32 init_speed;
u32 oper_speed;
bool bdaddr_property_broken;
- const char *firmware_name;
+ const char *firmware_name[2];
};
static int qca_regulator_enable(struct qca_serdev *qcadev);
@@ -258,7 +259,18 @@ static const char *qca_get_firmware_name(struct hci_uart *hu)
if (hu->serdev) {
struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
- return qsd->firmware_name;
+ return qsd->firmware_name[0];
+ } else {
+ return NULL;
+ }
+}
+
+static const char *qca_get_rampatch_name(struct hci_uart *hu)
+{
+ if (hu->serdev) {
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
+
+ return qsd->firmware_name[1];
} else {
return NULL;
}
@@ -332,8 +344,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
else
__serial_clock_off(hu->tty);
- BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
- vote ? "true" : "false");
+ BT_DBG("Vote serial clock %s(%s)", str_true_false(new_vote),
+ str_true_false(vote));
diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
@@ -1638,7 +1650,7 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
}
-static void qca_cmd_timeout(struct hci_dev *hdev)
+static void qca_reset(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
@@ -1855,6 +1867,7 @@ static int qca_setup(struct hci_uart *hu)
unsigned int retries = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
const char *firmware_name = qca_get_firmware_name(hu);
+ const char *rampatch_name = qca_get_rampatch_name(hu);
int ret;
struct qca_btsoc_version ver;
struct qca_serdev *qcadev;
@@ -1963,12 +1976,12 @@ retry:
/* Setup patch / NVM configurations */
ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver,
- firmware_name);
+ firmware_name, rampatch_name);
if (!ret) {
clear_bit(QCA_IBS_DISABLED, &qca->flags);
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
- hu->hdev->cmd_timeout = qca_cmd_timeout;
+ hu->hdev->reset = qca_reset;
if (hu->serdev) {
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
hu->hdev->wakeup = qca_wakeup;
@@ -2202,7 +2215,7 @@ static int qca_power_off(struct hci_dev *hdev)
enum qca_btsoc_type soc_type = qca_soc_type(hu);
hu->hdev->hw_error = NULL;
- hu->hdev->cmd_timeout = NULL;
+ hu->hdev->reset = NULL;
del_timer_sync(&qca->wake_retrans_timer);
del_timer_sync(&qca->tx_idle_timer);
@@ -2309,8 +2322,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->serdev_hu.serdev = serdev;
data = device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
- device_property_read_string(&serdev->dev, "firmware-name",
- &qcadev->firmware_name);
+ device_property_read_string_array(&serdev->dev, "firmware-name",
+ qcadev->firmware_name, ARRAY_SIZE(qcadev->firmware_name));
device_property_read_u32(&serdev->dev, "max-speed",
&qcadev->oper_speed);
if (!qcadev->oper_speed)
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 64b097e830d4..85aceab5eac6 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -777,7 +777,7 @@ static int probe_gdrom(struct platform_device *devptr)
probe_gdrom_setupcd();
err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (err)
goto probe_fail_free_cd_info;
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 7296127181ec..ee2bdc7ed0da 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client)
ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL,
"%s%d", "ipmb-",
client->adapter->nr);
+ if (!ipmb_dev->miscdev.name)
+ return -ENOMEM;
+
ipmb_dev->miscdev.fops = &ipmb_fops;
ipmb_dev->miscdev.parent = &client->dev;
ret = misc_register(&ipmb_dev->miscdev);
@@ -355,11 +358,13 @@ static const struct i2c_device_id ipmb_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ipmb_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id acpi_ipmb_id[] = {
{ "IPMB0001", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id);
+#endif
static struct i2c_driver ipmb_driver = {
.driver = {
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 332082e02ea5..e6ba35b71f10 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -122,12 +122,9 @@ out:
static int ipmi_release(struct inode *inode, struct file *file)
{
struct ipmi_file_private *priv = file->private_data;
- int rv;
struct ipmi_recv_msg *msg, *next;
- rv = ipmi_destroy_user(priv->user);
- if (rv)
- return rv;
+ ipmi_destroy_user(priv->user);
list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
ipmi_free_recv_msg(msg);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e12b531f5c2f..1e5313748f8b 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1398,13 +1398,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
module_put(owner);
}
-int ipmi_destroy_user(struct ipmi_user *user)
+void ipmi_destroy_user(struct ipmi_user *user)
{
_ipmi_destroy_user(user);
kref_put(&user->refcount, free_user);
-
- return 0;
}
EXPORT_SYMBOL(ipmi_destroy_user);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 941d2dcc8c9d..05f17e3e6207 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -699,8 +699,6 @@ static int __init ipmi_poweroff_init(void)
#ifdef MODULE
static void __exit ipmi_poweroff_cleanup(void)
{
- int rv;
-
#ifdef CONFIG_PROC_FS
unregister_sysctl_table(ipmi_table_header);
#endif
@@ -708,9 +706,7 @@ static void __exit ipmi_poweroff_cleanup(void)
ipmi_smi_watcher_unregister(&smi_watcher);
if (ready) {
- rv = ipmi_destroy_user(ipmi_user);
- if (rv)
- pr_err("could not cleanup the IPMI user: 0x%x\n", rv);
+ ipmi_destroy_user(ipmi_user);
pm_power_off = old_poweroff_func;
}
}
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index b83d55685b22..8c0ea637aba0 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -118,7 +118,7 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
if (io.irq)
io.irq_setup = ipmi_std_irq_setup;
- dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ dev_info(&pdev->dev, "%pR regsize %u spacing %u irq %d\n",
&pdev->resource[0], io.regsize, io.regspacing, io.irq);
return ipmi_si_add_smi(&io);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 335eea80054e..f1875b2bebbc 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1064,7 +1064,6 @@ static void ipmi_register_watchdog(int ipmi_intf)
static void ipmi_unregister_watchdog(int ipmi_intf)
{
- int rv;
struct ipmi_user *loc_user = watchdog_user;
if (!loc_user)
@@ -1089,9 +1088,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
mutex_lock(&ipmi_watchdog_mutex);
/* Disconnect from IPMI. */
- rv = ipmi_destroy_user(loc_user);
- if (rv)
- pr_warn("error unlinking from IPMI: %d\n", rv);
+ ipmi_destroy_user(loc_user);
/* If it comes back, restart it properly. */
ipmi_start_timer_on_heartbeat = 1;
diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
index a14fafc583d4..310f17dd9511 100644
--- a/drivers/char/ipmi/ssif_bmc.c
+++ b/drivers/char/ipmi/ssif_bmc.c
@@ -292,7 +292,6 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
ssif_bmc->nbytes_processed = 0;
ssif_bmc->remain_len = 0;
ssif_bmc->busy = false;
- memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
wake_up_all(&ssif_bmc->wait_queue);
}
@@ -744,9 +743,11 @@ static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
ssif_bmc->aborting = true;
}
} else if (ssif_bmc->state == SSIF_RES_SENDING) {
- if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF)
+ if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) {
+ memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
/* Invalidate response buffer to denote it is sent */
complete_response(ssif_bmc);
+ }
ssif_bmc->state = SSIF_READY;
}
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 69533d0bfb51..cf02ec646f46 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
return n == 0;
}
+static void tpm_bios_log_free(void *data)
+{
+ kvfree(data);
+}
+
/* read binary bios log */
int tpm_read_log_acpi(struct tpm_chip *chip)
{
@@ -136,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
+ log->bios_event_log = kvmalloc(len, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
@@ -161,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
goto err;
}
+ ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log);
+ if (ret) {
+ log->bios_event_log = NULL;
+ goto err;
+ }
+
return format;
err:
- devm_kfree(&chip->dev, log->bios_event_log);
+ tpm_bios_log_free(log->bios_event_log);
log->bios_event_log = NULL;
return ret;
}
diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
index 65d422a588e1..9d178afc73bd 100644
--- a/drivers/clk/analogbits/wrpll-cln28hpc.c
+++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
@@ -292,7 +292,7 @@ int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
vco = vco_pre * f;
}
- delta = abs(target_rate - vco);
+ delta = abs(target_vco_rate - vco);
if (delta < best_delta) {
best_delta = delta;
best_r = r;
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 8e3684ba2c74..9128a06b860d 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -24,4 +24,5 @@ obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o
obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o dt-compat.o
+obj-$(CONFIG_SOC_SAMA7D65) += sama7d65.o
obj-$(CONFIG_SOC_SAMA7G5) += sama7g5.o
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 15c46489ba85..7a544e429d34 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -20,7 +20,7 @@
#define PMC_MCR_CSS_SHIFT (16)
-#define MASTER_MAX_ID 4
+#define MASTER_MAX_ID 9
#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index fda041102224..cefd9948e103 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -23,7 +23,7 @@
#define UPLL_DIV 2
#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
-#define PLL_MAX_ID 7
+#define PLL_MAX_ID 9
struct sam9x60_pll_core {
struct regmap *regmap;
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 5aa9c1f1c886..acf780a81589 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -151,6 +151,7 @@ static struct syscore_ops pmc_syscore_ops = {
static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sama7g5-pmc", },
+ { .compatible = "microchip,sama7d65-pmc", },
{ /* sentinel */ }
};
diff --git a/drivers/clk/at91/sama7d65.c b/drivers/clk/at91/sama7d65.c
new file mode 100644
index 000000000000..a5d40df8b2f2
--- /dev/null
+++ b/drivers/clk/at91/sama7d65.c
@@ -0,0 +1,1375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAMA7D65 PMC code.
+ *
+ * Copyright (C) 2024 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Ryan Wanner <ryan.wanner@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(pmc_mck0_lock);
+static DEFINE_SPINLOCK(pmc_mckX_lock);
+
+#define PMC_INDEX_MAX 25
+
+/*
+ * PLL clocks identifiers
+ * @PLL_ID_CPU: CPU PLL identifier
+ * @PLL_ID_SYS: System PLL identifier
+ * @PLL_ID_DDR: DDR PLL identifier
+ * @PLL_ID_GPU: Graphics subsystem PLL identifier
+ * @PLL_ID_BAUD: Baud PLL identifier
+ * @PLL_ID_AUDIO: Audio PLL identifier
+ * @PLL_ID_ETH: Ethernet PLL identifier
+ * @PLL_ID_LVDS: LVDS PLL identifier
+ * @PLL_ID_USB: USB PLL identifier
+ */
+enum pll_ids {
+ PLL_ID_CPU,
+ PLL_ID_SYS,
+ PLL_ID_DDR,
+ PLL_ID_GPU,
+ PLL_ID_BAUD,
+ PLL_ID_AUDIO,
+ PLL_ID_ETH,
+ PLL_ID_LVDS,
+ PLL_ID_USB,
+ PLL_ID_MAX
+};
+
+/*
+ * PLL component identifier
+ * @PLL_COMPID_FRAC: Fractional PLL component identifier
+ * @PLL_COMPID_DIV0: 1st PLL divider component identifier
+ * @PLL_COMPID_DIV1: 2nd PLL divider component identifier
+ */
+enum pll_component_id {
+ PLL_COMPID_FRAC,
+ PLL_COMPID_DIV0,
+ PLL_COMPID_DIV1,
+ PLL_COMPID_MAX
+};
+
+/*
+ * PLL type identifiers
+ * @PLL_TYPE_FRAC: fractional PLL identifier
+ * @PLL_TYPE_DIV: divider PLL identifier
+ */
+enum pll_type {
+ PLL_TYPE_FRAC,
+ PLL_TYPE_DIV
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_layout_frac = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+/* Layout for DIVPMC dividers. */
+static const struct clk_pll_layout pll_layout_divpmc = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_layout_divio = {
+ .div_mask = GENMASK(19, 12),
+ .endiv_mask = BIT(30),
+ .div_shift = 12,
+ .endiv_shift = 30,
+};
+
+/*
+ * CPU PLL output range.
+ * Notice: The upper limit has been setup to 1000000002 due to hardware
+ * block which cannot output exactly 1GHz.
+ */
+static const struct clk_range cpu_pll_outputs[] = {
+ { .min = 2343750, .max = 1000000002 },
+};
+
+/* PLL output range. */
+static const struct clk_range pll_outputs[] = {
+ { .min = 2343750, .max = 1200000000 },
+};
+
+/*
+ * Min: fCOREPLLCK = 600 MHz, PMC_PLL_CTRL0.DIVPMC = 255
+ * Max: fCOREPLLCK = 800 MHz, PMC_PLL_CTRL0.DIVPMC = 0
+ */
+static const struct clk_range lvdspll_outputs[] = {
+ { .min = 16406250, .max = 800000000 },
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 480000000, .max = 480000000 },
+};
+
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range lvdspll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range upll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+/* CPU PLL characteristics. */
+static const struct clk_pll_characteristics cpu_pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(cpu_pll_outputs),
+ .output = cpu_pll_outputs,
+ .core_output = core_outputs,
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(pll_outputs),
+ .output = pll_outputs,
+ .core_output = core_outputs,
+};
+
+static const struct clk_pll_characteristics lvdspll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(lvdspll_outputs),
+ .output = lvdspll_outputs,
+ .core_output = lvdspll_core_outputs,
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .core_output = upll_core_outputs,
+ .upll = true,
+};
+
+/*
+ * SAMA7D65 PLL possible parents
+ * @SAMA7D65_PLL_PARENT_MAINCK: MAINCK is PLL a parent
+ * @SAMA7D65_PLL_PARENT_MAIN_XTAL: MAIN XTAL is a PLL parent
+ * @SAMA7D65_PLL_PARENT_FRACCK: Frac PLL is a PLL parent (for PLL dividers)
+ */
+enum sama7d65_pll_parent {
+ SAMA7D65_PLL_PARENT_MAINCK,
+ SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ SAMA7D65_PLL_PARENT_FRACCK
+};
+
+/*
+ * PLL clocks description
+ * @n: clock name
+ * @l: clock layout
+ * @c: clock characteristics
+ * @hw: pointer to clk_hw
+ * @t: clock type
+ * @f: clock flags
+ * @p: clock parent
+ * @eid: export index in sama7d65->chws[] array
+ * @safe_div: intermediate divider need to be set on PRE_RATE_CHANGE
+ * notification
+ */
+static struct sama7d65_pll {
+ const char *n;
+ const struct clk_pll_layout *l;
+ const struct clk_pll_characteristics *c;
+ struct clk_hw *hw;
+ unsigned long f;
+ enum sama7d65_pll_parent p;
+ u8 t;
+ u8 eid;
+ u8 safe_div;
+} sama7d65_plls[][PLL_COMPID_MAX] = {
+ [PLL_ID_CPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "cpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds cpupll_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "cpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds CPU. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .eid = PMC_CPUPLL,
+ /*
+ * Safe div=15 should be safe even for switching b/w 1GHz and
+ * 90MHz (frac pll might go up to 1.2GHz).
+ */
+ .safe_div = 15,
+ },
+ },
+
+ [PLL_ID_SYS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "syspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds syspll_divpmcck which may feed critical parts
+ * of the systems like timers. Therefore it should not be
+ * disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "syspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /*
+ * This may feed critical parts of the systems like timers.
+ * Therefore it should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_SYSPLL,
+ },
+ },
+
+ [PLL_ID_DDR] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ddrpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds ddrpll_divpmcck which feeds DDR. It should not
+ * be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ddrpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds DDR. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+ },
+
+ [PLL_ID_GPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "gpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "gpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ },
+ },
+
+ [PLL_ID_BAUD] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "baudpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "baudpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_BAUDPLL,
+ },
+ },
+
+ [PLL_ID_AUDIO] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "audiopll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "audiopll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOPMCPLL,
+ },
+
+ [PLL_COMPID_DIV1] = {
+ .n = "audiopll_diviock",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divio,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOIOPLL,
+ },
+ },
+
+ [PLL_ID_ETH] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ethpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ethpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_ETHPLL,
+ },
+ },
+
+ [PLL_ID_LVDS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "lvdspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "lvdspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_LVDSPLL,
+ },
+ },
+
+ [PLL_ID_USB] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "usbpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "usbpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_UTMI,
+ },
+ },
+};
+
+/* Used to create an array entry identifying a PLL by its components. */
+#define PLL_IDS_TO_ARR_ENTRY(_id, _comp) { PLL_ID_##_id, PLL_COMPID_##_comp}
+
+/*
+ * Master clock (MCK[0..9]) description
+ * @n: clock name
+ * @ep_chg_chg_id: index in parents array that specifies the changeable
+ * @ep: extra parents names array (entry formed by PLL components
+ * identifiers (see enum pll_component_id))
+ * @hw: pointer to clk_hw
+ * parent
+ * @ep_count: extra parents count
+ * @ep_mux_table: mux table for extra parents
+ * @id: clock id
+ * @eid: export index in sama7d65->chws[] array
+ * @c: true if clock is critical and cannot be disabled
+ */
+static struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } ep[4];
+ struct clk_hw *hw;
+ int ep_chg_id;
+ u8 ep_count;
+ u8 ep_mux_table[4];
+ u8 id;
+ u8 eid;
+ u8 c;
+} sama7d65_mckx[] = {
+ { .n = "mck0", }, /* Dummy entry for MCK0 to store hw in probe. */
+ { .n = "mck1",
+ .id = 1,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK1,
+ .c = 1, },
+
+ { .n = "mck2",
+ .id = 2,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck3",
+ .id = 3,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK3,
+ .c = 1, },
+
+ { .n = "mck4",
+ .id = 4,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck5",
+ .id = 5,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK5,
+ .c = 1, },
+
+ { .n = "mck6",
+ .id = 6,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1,
+ .c = 1, },
+
+ { .n = "mck7",
+ .id = 7,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck8",
+ .id = 8,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck9",
+ .id = 9,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+};
+
+/*
+ * System clock description
+ * @n: clock name
+ * @p: clock parent name
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ u8 id;
+} sama7d65_systemck[] = {
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8, },
+ { .n = "pck1", .p = "prog1", .id = 9, },
+ { .n = "pck2", .p = "prog2", .id = 10, },
+ { .n = "pck3", .p = "prog3", .id = 11, },
+ { .n = "pck4", .p = "prog4", .id = 12, },
+ { .n = "pck5", .p = "prog5", .id = 13, },
+ { .n = "pck6", .p = "prog6", .id = 14, },
+ { .n = "pck7", .p = "prog7", .id = 15, },
+};
+
+/* Mux table for programmable clocks. */
+static u32 sama7d65_prog_mux_table[] = { 0, 1, 2, 5, 7, 8, 9, 10, 12 };
+
+/*
+ * Peripheral clock parent hw identifier (used to index in sama7d65_mckx[])
+ * @PCK_PARENT_HW_MCK0: pck parent hw identifier is MCK0
+ * @PCK_PARENT_HW_MCK1: pck parent hw identifier is MCK1
+ * @PCK_PARENT_HW_MCK2: pck parent hw identifier is MCK2
+ * @PCK_PARENT_HW_MCK3: pck parent hw identifier is MCK3
+ * @PCK_PARENT_HW_MCK4: pck parent hw identifier is MCK4
+ * @PCK_PARENT_HW_MCK5: pck parent hw identifier is MCK5
+ * @PCK_PARENT_HW_MCK6: pck parent hw identifier is MCK6
+ * @PCK_PARENT_HW_MCK7: pck parent hw identifier is MCK7
+ * @PCK_PARENT_HW_MCK8: pck parent hw identifier is MCK8
+ * @PCK_PARENT_HW_MCK9: pck parent hw identifier is MCK9
+ * @PCK_PARENT_HW_MAX: max identifier
+ */
+enum sama7d65_pck_parent_hw_id {
+ PCK_PARENT_HW_MCK0,
+ PCK_PARENT_HW_MCK1,
+ PCK_PARENT_HW_MCK2,
+ PCK_PARENT_HW_MCK3,
+ PCK_PARENT_HW_MCK4,
+ PCK_PARENT_HW_MCK5,
+ PCK_PARENT_HW_MCK6,
+ PCK_PARENT_HW_MCK7,
+ PCK_PARENT_HW_MCK8,
+ PCK_PARENT_HW_MCK9,
+ PCK_PARENT_HW_MAX
+};
+
+/*
+ * Peripheral clock description
+ * @n: clock name
+ * @p: clock parent hw id
+ * @r: clock range values
+ * @id: clock id
+ * @chgp: index in parent array of the changeable parent
+ */
+static struct {
+ const char *n;
+ enum sama7d65_pck_parent_hw_id p;
+ struct clk_range r;
+ u8 chgp;
+ u8 id;
+} sama7d65_periphck[] = {
+ { .n = "pioA_clk", .p = PCK_PARENT_HW_MCK0, .id = 10, },
+ { .n = "securam_clk", .p = PCK_PARENT_HW_MCK0, .id = 17, },
+ { .n = "sfr_clk", .p = PCK_PARENT_HW_MCK7, .id = 18, },
+ { .n = "hsmc_clk", .p = PCK_PARENT_HW_MCK5, .id = 20, },
+ { .n = "xdmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 21, },
+ { .n = "xdmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 22, },
+ { .n = "xdmac2_clk", .p = PCK_PARENT_HW_MCK1, .id = 23, },
+ { .n = "acc_clk", .p = PCK_PARENT_HW_MCK7, .id = 24, },
+ { .n = "aes_clk", .p = PCK_PARENT_HW_MCK6, .id = 26, },
+ { .n = "tzaesbasc_clk", .p = PCK_PARENT_HW_MCK8, .id = 27, },
+ { .n = "asrc_clk", .p = PCK_PARENT_HW_MCK9, .id = 29, .r = { .max = 200000000, }, },
+ { .n = "cpkcc_clk", .p = PCK_PARENT_HW_MCK0, .id = 30, },
+ { .n = "eic_clk", .p = PCK_PARENT_HW_MCK7, .id = 33, },
+ { .n = "flex0_clk", .p = PCK_PARENT_HW_MCK7, .id = 34, },
+ { .n = "flex1_clk", .p = PCK_PARENT_HW_MCK7, .id = 35, },
+ { .n = "flex2_clk", .p = PCK_PARENT_HW_MCK7, .id = 36, },
+ { .n = "flex3_clk", .p = PCK_PARENT_HW_MCK7, .id = 37, },
+ { .n = "flex4_clk", .p = PCK_PARENT_HW_MCK8, .id = 38, },
+ { .n = "flex5_clk", .p = PCK_PARENT_HW_MCK8, .id = 39, },
+ { .n = "flex6_clk", .p = PCK_PARENT_HW_MCK8, .id = 40, },
+ { .n = "flex7_clk", .p = PCK_PARENT_HW_MCK8, .id = 41, },
+ { .n = "flex8_clk", .p = PCK_PARENT_HW_MCK9, .id = 42, },
+ { .n = "flex9_clk", .p = PCK_PARENT_HW_MCK9, .id = 43, },
+ { .n = "flex10_clk", .p = PCK_PARENT_HW_MCK9, .id = 44, },
+ { .n = "gmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 46, },
+ { .n = "gmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 47, },
+ { .n = "gmac0_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 49, },
+ { .n = "gmac1_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 50, },
+ { .n = "icm_clk", .p = PCK_PARENT_HW_MCK5, .id = 53, },
+ { .n = "i2smcc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 54, .r = { .max = 200000000, }, },
+ { .n = "i2smcc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 55, .r = { .max = 200000000, }, },
+ { .n = "lcd_clk", .p = PCK_PARENT_HW_MCK3, .id = 56, },
+ { .n = "matrix_clk", .p = PCK_PARENT_HW_MCK5, .id = 57, },
+ { .n = "mcan0_clk", .p = PCK_PARENT_HW_MCK5, .id = 58, .r = { .max = 200000000, }, },
+ { .n = "mcan1_clk", .p = PCK_PARENT_HW_MCK5, .id = 59, .r = { .max = 200000000, }, },
+ { .n = "mcan2_clk", .p = PCK_PARENT_HW_MCK5, .id = 60, .r = { .max = 200000000, }, },
+ { .n = "mcan3_clk", .p = PCK_PARENT_HW_MCK5, .id = 61, .r = { .max = 200000000, }, },
+ { .n = "mcan4_clk", .p = PCK_PARENT_HW_MCK5, .id = 62, .r = { .max = 200000000, }, },
+ { .n = "pdmc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 64, .r = { .max = 200000000, }, },
+ { .n = "pdmc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 65, .r = { .max = 200000000, }, },
+ { .n = "pit64b0_clk", .p = PCK_PARENT_HW_MCK7, .id = 66, },
+ { .n = "pit64b1_clk", .p = PCK_PARENT_HW_MCK7, .id = 67, },
+ { .n = "pit64b2_clk", .p = PCK_PARENT_HW_MCK7, .id = 68, },
+ { .n = "pit64b3_clk", .p = PCK_PARENT_HW_MCK8, .id = 69, },
+ { .n = "pit64b4_clk", .p = PCK_PARENT_HW_MCK8, .id = 70, },
+ { .n = "pit64b5_clk", .p = PCK_PARENT_HW_MCK8, .id = 71, },
+ { .n = "pwm_clk", .p = PCK_PARENT_HW_MCK7, .id = 72, },
+ { .n = "qspi0_clk", .p = PCK_PARENT_HW_MCK5, .id = 73, },
+ { .n = "qspi1_clk", .p = PCK_PARENT_HW_MCK5, .id = 74, },
+ { .n = "sdmmc0_clk", .p = PCK_PARENT_HW_MCK1, .id = 75, },
+ { .n = "sdmmc1_clk", .p = PCK_PARENT_HW_MCK1, .id = 76, },
+ { .n = "sdmmc2_clk", .p = PCK_PARENT_HW_MCK1, .id = 77, },
+ { .n = "sha_clk", .p = PCK_PARENT_HW_MCK6, .id = 78, },
+ { .n = "spdifrx_clk", .p = PCK_PARENT_HW_MCK9, .id = 79, .r = { .max = 200000000, }, },
+ { .n = "spdiftx_clk", .p = PCK_PARENT_HW_MCK9, .id = 80, .r = { .max = 200000000, }, },
+ { .n = "ssc0_clk", .p = PCK_PARENT_HW_MCK7, .id = 81, .r = { .max = 200000000, }, },
+ { .n = "ssc1_clk", .p = PCK_PARENT_HW_MCK8, .id = 82, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch0_clk", .p = PCK_PARENT_HW_MCK8, .id = 83, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch1_clk", .p = PCK_PARENT_HW_MCK8, .id = 84, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch2_clk", .p = PCK_PARENT_HW_MCK8, .id = 85, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch0_clk", .p = PCK_PARENT_HW_MCK5, .id = 86, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch1_clk", .p = PCK_PARENT_HW_MCK5, .id = 87, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch2_clk", .p = PCK_PARENT_HW_MCK5, .id = 88, .r = { .max = 200000000, }, },
+ { .n = "tcpca_clk", .p = PCK_PARENT_HW_MCK5, .id = 89, },
+ { .n = "tcpcb_clk", .p = PCK_PARENT_HW_MCK5, .id = 90, },
+ { .n = "tdes_clk", .p = PCK_PARENT_HW_MCK6, .id = 91, },
+ { .n = "trng_clk", .p = PCK_PARENT_HW_MCK6, .id = 92, },
+ { .n = "udphsa_clk", .p = PCK_PARENT_HW_MCK5, .id = 99, },
+ { .n = "udphsb_clk", .p = PCK_PARENT_HW_MCK5, .id = 100, },
+ { .n = "uhphs_clk", .p = PCK_PARENT_HW_MCK5, .id = 101, },
+ { .n = "dsi_clk", .p = PCK_PARENT_HW_MCK3, .id = 103, },
+ { .n = "lvdsc_clk", .p = PCK_PARENT_HW_MCK3, .id = 104, },
+};
+
+/*
+ * Generic clock description
+ * @n: clock name
+ * @pp: PLL parents (entry formed by PLL components identifiers
+ * (see enum pll_component_id))
+ * @pp_mux_table: PLL parents mux table
+ * @r: clock output range
+ * @pp_chg_id: id in parent array of changeable PLL parent
+ * @pp_count: PLL parents count
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } pp[8];
+ const char pp_mux_table[8];
+ struct clk_range r;
+ int pp_chg_id;
+ u8 pp_count;
+ u8 id;
+} sama7d65_gck[] = {
+ { .n = "adc_gclk",
+ .id = 25,
+ .r = { .max = 100000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 8, 9, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "asrc_gclk",
+ .id = 29,
+ .r = { .max = 200000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex0_gclk",
+ .id = 34,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex1_gclk",
+ .id = 35,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex2_gclk",
+ .id = 36,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex3_gclk",
+ .id = 37,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex4_gclk",
+ .id = 38,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex5_gclk",
+ .id = 39,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex6_gclk",
+ .id = 40,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex7_gclk",
+ .id = 41,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex8_gclk",
+ .id = 42,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex9_gclk",
+ .id = 43,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex10_gclk",
+ .id = 44,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac0_gclk",
+ .id = 46,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac1_gclk",
+ .id = 47,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac0_tsu_gclk",
+ .id = 49,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac1_tsu_gclk",
+ .id = 50,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc0_gclk",
+ .id = 54,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc1_gclk",
+ .id = 55,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "lcdc_gclk",
+ .id = 56,
+ .r = { .max = 90000000 },
+ .pp_count = 0,
+ .pp_chg_id = INT_MIN,
+ },
+
+ { .n = "mcan0_gclk",
+ .id = 58,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan1_gclk",
+ .id = 59,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan2_gclk",
+ .id = 60,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan3_gclk",
+ .id = 61,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan4_gclk",
+ .id = 62,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "pdmc0_gclk",
+ .id = 64,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9 },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pdmc1_gclk",
+ .id = 65,
+ .r = { .max = 80000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b0_gclk",
+ .id = 66,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b1_gclk",
+ .id = 67,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b2_gclk",
+ .id = 68,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b3_gclk",
+ .id = 69,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b4_gclk",
+ .id = 70,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b5_gclk",
+ .id = 71,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi0_gclk",
+ .id = 73,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi1_gclk",
+ .id = 74,
+ .r = { .max = 266000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "sdmmc0_gclk",
+ .id = 75,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc1_gclk",
+ .id = 76,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc2_gclk",
+ .id = 77,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10 },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "spdifrx_gclk",
+ .id = 79,
+ .r = { .max = 150000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "spdiftx_gclk",
+ .id = 80,
+ .r = { .max = 25000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb0_ch0_gclk",
+ .id = 83,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb1_ch0_gclk",
+ .id = 86,
+ .r = { .max = 67000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "DSI_gclk",
+ .id = 103,
+ .r = {.max = 27000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .pp_mux_table = {5},
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "I3CC_gclk",
+ .id = 105,
+ .r = {.max = 125000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+};
+
+/* MCK0 characteristics. */
+static const struct clk_master_characteristics mck0_characteristics = {
+ .output = { .min = 32768, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3, 5 },
+ .have_div3_pres = 1,
+};
+
+/* MCK0 layout. */
+static const struct clk_master_layout mck0_layout = {
+ .mask = 0x773,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+/* Programmable clock layout. */
+static const struct clk_programmable_layout programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
+/* Peripheral clock layout. */
+static const struct clk_pcr_layout sama7d65_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static void __init sama7d65_pmc_setup(struct device_node *np)
+{
+ const char *main_xtal_name = "main_xtal";
+ struct pmc_data *sama7d65_pmc;
+ const char *parent_names[11];
+ void **alloc_mem = NULL;
+ int alloc_mem_size = 0;
+ struct regmap *regmap;
+ struct clk_hw *hw, *main_rc_hw, *main_osc_hw, *main_xtal_hw;
+ struct clk_hw *td_slck_hw, *md_slck_hw;
+ static struct clk_parent_data parent_data;
+ struct clk_hw *parent_hws[10];
+ bool bypass;
+ int i, j;
+
+ td_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "td_slck"));
+ md_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "md_slck"));
+ main_xtal_hw = __clk_get_hw(of_clk_get_by_name(np, main_xtal_name));
+
+ if (!td_slck_hw || !md_slck_hw || !main_xtal_hw)
+ return;
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama7d65_pmc = pmc_data_allocate(PMC_INDEX_MAX,
+ nck(sama7d65_systemck),
+ nck(sama7d65_periphck),
+ nck(sama7d65_gck), 8);
+ if (!sama7d65_pmc)
+ return;
+
+ alloc_mem = kmalloc(sizeof(void *) *
+ (ARRAY_SIZE(sama7d65_mckx) + ARRAY_SIZE(sama7d65_gck)),
+ GFP_KERNEL);
+ if (!alloc_mem)
+ goto err_free;
+
+ main_rc_hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(main_rc_hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ parent_data.name = main_xtal_name;
+ parent_data.fw_name = main_xtal_name;
+ main_osc_hw = at91_clk_register_main_osc(regmap, "main_osc", NULL,
+ &parent_data, bypass);
+ if (IS_ERR(main_osc_hw))
+ goto err_free;
+
+ parent_hws[0] = main_rc_hw;
+ parent_hws[1] = main_osc_hw;
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", NULL, parent_hws, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+ for (j = 0; j < PLL_COMPID_MAX; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sama7d65_plls[i][j].n)
+ continue;
+
+ switch (sama7d65_plls[i][j].t) {
+ case PLL_TYPE_FRAC:
+ switch (sama7d65_plls[i][j].p) {
+ case SAMA7D65_PLL_PARENT_MAINCK:
+ parent_hw = sama7d65_pmc->chws[PMC_MAIN];
+ break;
+ case SAMA7D65_PLL_PARENT_MAIN_XTAL:
+ parent_hw = main_xtal_hw;
+ break;
+ default:
+ /* Should not happen. */
+ parent_hw = NULL;
+ break;
+ }
+
+ hw = sam9x60_clk_register_frac_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, parent_hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f);
+ break;
+
+ case PLL_TYPE_DIV:
+ hw = sam9x60_clk_register_div_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, sama7d65_plls[i][0].hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f,
+ sama7d65_plls[i][j].safe_div);
+ break;
+
+ default:
+ continue;
+ }
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_plls[i][j].hw = hw;
+ if (sama7d65_plls[i][j].eid)
+ sama7d65_pmc->chws[sama7d65_plls[i][j].eid] = hw;
+ }
+ }
+
+ hw = at91_clk_register_master_div(regmap, "mck0", NULL,
+ sama7d65_plls[PLL_ID_CPU][1].hw,
+ &mck0_layout, &mck0_characteristics,
+ &pmc_mck0_lock, CLK_GET_RATE_NOCACHE, 5);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MCK] = hw;
+ sama7d65_mckx[PCK_PARENT_HW_MCK0].hw = hw;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ for (i = PCK_PARENT_HW_MCK1; i < ARRAY_SIZE(sama7d65_mckx); i++) {
+ u8 num_parents = 3 + sama7d65_mckx[i].ep_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7d65_mckx[i].ep_mux_table,
+ sama7d65_mckx[i].ep_count);
+ for (j = 0; j < sama7d65_mckx[i].ep_count; j++) {
+ u8 pll_id = sama7d65_mckx[i].ep[j].pll_id;
+ u8 pll_compid = sama7d65_mckx[i].ep[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7d65_mckx[i].ep_count);
+
+ hw = at91_clk_sama7g5_register_master(regmap, sama7d65_mckx[i].n,
+ num_parents, NULL, parent_hws,
+ mux_table, &pmc_mckX_lock,
+ sama7d65_mckx[i].id,
+ sama7d65_mckx[i].c,
+ sama7d65_mckx[i].ep_chg_id);
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_mckx[i].hw = hw;
+ if (sama7d65_mckx[i].eid)
+ sama7d65_pmc->chws[sama7d65_mckx[i].eid] = hw;
+ }
+
+ parent_names[0] = "syspll_divpmcck";
+ parent_names[1] = "usbpll_divpmcck";
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_plls[PLL_ID_SYS][PLL_COMPID_DIV0].hw;
+ parent_hws[4] = sama7d65_plls[PLL_ID_DDR][PLL_COMPID_DIV0].hw;
+ parent_hws[5] = sama7d65_plls[PLL_ID_GPU][PLL_COMPID_DIV0].hw;
+ parent_hws[6] = sama7d65_plls[PLL_ID_BAUD][PLL_COMPID_DIV0].hw;
+ parent_hws[7] = sama7d65_plls[PLL_ID_AUDIO][PLL_COMPID_DIV0].hw;
+ parent_hws[8] = sama7d65_plls[PLL_ID_ETH][PLL_COMPID_DIV0].hw;
+
+ for (i = 0; i < 8; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name, NULL, parent_hws,
+ 9, i,
+ &programmable_layout,
+ sama7d65_prog_mux_table);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->pchws[i] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama7d65_systemck[i].n,
+ sama7d65_systemck[i].p, NULL,
+ sama7d65_systemck[i].id, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->shws[sama7d65_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_periphck[i].n,
+ NULL,
+ sama7d65_mckx[sama7d65_periphck[i].p].hw,
+ sama7d65_periphck[i].id,
+ &sama7d65_periphck[i].r,
+ sama7d65_periphck[i].chgp ? 0 :
+ INT_MIN, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->phws[sama7d65_periphck[i].id] = hw;
+ }
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_pmc->chws[PMC_MCK1];
+ for (i = 0; i < ARRAY_SIZE(sama7d65_gck); i++) {
+ u8 num_parents = 4 + sama7d65_gck[i].pp_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 4);
+ PMC_FILL_TABLE(&mux_table[4], sama7d65_gck[i].pp_mux_table,
+ sama7d65_gck[i].pp_count);
+ for (j = 0; j < sama7d65_gck[i].pp_count; j++) {
+ u8 pll_id = sama7d65_gck[i].pp[j].pll_id;
+ u8 pll_compid = sama7d65_gck[i].pp[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[4], tmp_parent_hws,
+ sama7d65_gck[i].pp_count);
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_gck[i].n, NULL,
+ parent_hws, mux_table,
+ num_parents,
+ sama7d65_gck[i].id,
+ &sama7d65_gck[i].r,
+ sama7d65_gck[i].pp_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->ghws[sama7d65_gck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama7d65_pmc);
+ kfree(alloc_mem);
+
+ return;
+
+err_free:
+ if (alloc_mem) {
+ for (i = 0; i < alloc_mem_size; i++)
+ kfree(alloc_mem[i]);
+ kfree(alloc_mem);
+ }
+
+ kfree(sama7d65_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sama7d65_pmc, "microchip,sama7d65-pmc", sama7d65_pmc_setup);
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index 7741d8f3dbee..021d1b412af4 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -12,6 +12,8 @@
#include <linux/of_address.h>
#include <linux/io.h>
+#include <dt-bindings/clock/at91.h>
+
#define SLOW_CLOCK_FREQ 32768
#define SLOWCK_SW_CYCLES 5
#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \
@@ -470,7 +472,7 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
{
void __iomem *regbase = of_iomap(np, 0);
struct clk_hw_onecell_data *clk_data;
- struct clk_hw *slow_rc, *slow_osc;
+ struct clk_hw *slow_rc, *slow_osc, *hw;
const char *xtal_name;
const struct clk_hw *parent_hws[2];
static struct clk_parent_data parent_data = {
@@ -506,19 +508,19 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
/* MD_SLCK and TD_SLCK. */
clk_data->num = 2;
- clk_data->hws[0] = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck",
- slow_rc,
- 0, 32768);
- if (IS_ERR(clk_data->hws[0]))
+ hw = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck", slow_rc,
+ 0, 32768);
+ if (IS_ERR(hw))
goto clk_data_free;
+ clk_data->hws[SCKC_MD_SLCK] = hw;
parent_hws[0] = slow_rc;
parent_hws[1] = slow_osc;
- clk_data->hws[1] = at91_clk_register_sam9x5_slow(regbase, "td_slck",
- parent_hws, 2,
- &at91sam9x60_bits);
- if (IS_ERR(clk_data->hws[1]))
+ hw = at91_clk_register_sam9x5_slow(regbase, "td_slck", parent_hws,
+ 2, &at91sam9x60_bits);
+ if (IS_ERR(hw))
goto unregister_md_slck;
+ clk_data->hws[SCKC_TD_SLCK] = hw;
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
if (WARN_ON(ret))
@@ -527,9 +529,9 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
return;
unregister_td_slck:
- at91_clk_unregister_sam9x5_slow(clk_data->hws[1]);
+ at91_clk_unregister_sam9x5_slow(clk_data->hws[SCKC_TD_SLCK]);
unregister_md_slck:
- clk_hw_unregister(clk_data->hws[0]);
+ clk_hw_unregister(clk_data->hws[SCKC_MD_SLCK]);
clk_data_free:
kfree(clk_data);
unregister_slow_osc:
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index ec5749e301ba..2b0ea882f1e4 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/string_choices.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
@@ -502,7 +503,7 @@ static int clk_gate(struct ccu_data *ccu, const char *name,
return 0;
pr_err("%s: failed to %s gate for %s\n", __func__,
- enable ? "enable" : "disable", name);
+ str_enable_disable(enable), name);
return -EIO;
}
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index a18a8768feb4..0e1fe3759530 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -34,6 +34,7 @@ static char *rpi_firmware_clk_names[] = {
[RPI_FIRMWARE_M2MC_CLK_ID] = "m2mc",
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = "pixel-bvb",
[RPI_FIRMWARE_VEC_CLK_ID] = "vec",
+ [RPI_FIRMWARE_DISP_CLK_ID] = "disp",
};
#define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0)
@@ -56,6 +57,12 @@ struct raspberrypi_clk_data {
struct raspberrypi_clk *rpi;
};
+static inline
+const struct raspberrypi_clk_data *clk_hw_to_data(const struct clk_hw *hw)
+{
+ return container_of(hw, struct raspberrypi_clk_data, hw);
+}
+
struct raspberrypi_clk_variant {
bool export;
char *clkdev;
@@ -111,18 +118,31 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
},
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_PIXEL_CLK_ID] = {
.export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_HEVC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_ISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_VEC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_DISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
};
@@ -153,7 +173,6 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
struct raspberrypi_firmware_prop msg = {
.id = cpu_to_le32(data->id),
.val = cpu_to_le32(*val),
- .disable_turbo = cpu_to_le32(1),
};
int ret;
@@ -168,8 +187,7 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
@@ -186,8 +204,7 @@ static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
@@ -203,8 +220,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 _rate = rate;
int ret;
@@ -221,8 +237,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk_variant *variant = data->variant;
/*
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index 495c0d607c7d..6a763bc9ac1a 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -75,6 +75,7 @@ struct en_rst_data {
};
struct en_clk_soc_data {
+ u32 num_clocks;
const struct clk_ops pcie_ops;
int (*hw_init)(struct platform_device *pdev,
struct clk_hw_onecell_data *clk_data);
@@ -90,6 +91,7 @@ static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 }
static const u32 bus7581_base[] = { 600000000, 540000000 };
static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
static const u32 crypto_base[] = { 540000000, 480000000 };
+static const u32 emmc7581_base[] = { 200000000, 150000000 };
static const struct en_clk_desc en7523_base_clks[] = {
{
@@ -280,6 +282,15 @@ static const struct en_clk_desc en7581_base_clks[] = {
.base_shift = 0,
.base_values = crypto_base,
.n_base_values = ARRAY_SIZE(crypto_base),
+ }, {
+ .id = EN7581_CLK_EMMC,
+ .name = "emmc",
+
+ .base_reg = REG_CRYPTO_CLKSRC2,
+ .base_bits = 1,
+ .base_shift = 12,
+ .base_values = emmc7581_base,
+ .n_base_values = ARRAY_SIZE(emmc7581_base),
}
};
@@ -504,8 +515,6 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
u32 rate;
int i;
- clk_data->num = EN7523_NUM_CLOCKS;
-
for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
const struct en_clk_desc *desc = &en7523_base_clks[i];
u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
@@ -587,8 +596,6 @@ static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_dat
hw = en7523_register_pcie_clk(dev, base);
clk_data->hws[EN7523_CLK_PCIE] = hw;
-
- clk_data->num = EN7523_NUM_CLOCKS;
}
static int en7523_reset_update(struct reset_controller_dev *rcdev,
@@ -702,13 +709,15 @@ static int en7523_clk_probe(struct platform_device *pdev)
struct clk_hw_onecell_data *clk_data;
int r;
+ soc_data = device_get_match_data(&pdev->dev);
+
clk_data = devm_kzalloc(&pdev->dev,
- struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
+ struct_size(clk_data, hws, soc_data->num_clocks),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
- soc_data = device_get_match_data(&pdev->dev);
+ clk_data->num = soc_data->num_clocks;
r = soc_data->hw_init(pdev, clk_data);
if (r)
return r;
@@ -717,6 +726,7 @@ static int en7523_clk_probe(struct platform_device *pdev)
}
static const struct en_clk_soc_data en7523_data = {
+ .num_clocks = ARRAY_SIZE(en7523_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7523_pci_is_enabled,
.prepare = en7523_pci_prepare,
@@ -726,6 +736,8 @@ static const struct en_clk_soc_data en7523_data = {
};
static const struct en_clk_soc_data en7581_data = {
+ /* We increment num_clocks by 1 to account for additional PCIe clock */
+ .num_clocks = ARRAY_SIZE(en7581_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7581_pci_is_enabled,
.enable = en7581_pci_enable,
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
index f888aed79b11..4bd8d6ecf6a2 100644
--- a/drivers/clk/clk-ep93xx.c
+++ b/drivers/clk/clk-ep93xx.c
@@ -586,9 +586,9 @@ static unsigned long calc_pll_rate(u64 rate, u32 config_word)
static int ep93xx_plls_init(struct ep93xx_clk_priv *priv)
{
- const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
- const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
- const char pclk_divisors[] = { 1, 2, 4, 8 };
+ static const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
+ static const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
+ static const char pclk_divisors[] = { 1, 2, 4, 8 };
struct clk_parent_data xtali = { .index = 0 };
unsigned int clk_f_div, clk_h_div, clk_p_div;
unsigned long clk_pll1_rate, clk_pll2_rate;
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index c997e7491996..2bcf422f0b04 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -375,7 +375,7 @@ static unsigned long lmk04832_vco_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
- const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
+ static const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
unsigned int pll2_n, p, pll2_r;
unsigned int pll2_misc;
unsigned long vco_rate;
@@ -637,7 +637,7 @@ static int lmk04832_register_vco(struct lmk04832 *lmk)
static int lmk04832_clkout_set_ddly(struct lmk04832 *lmk, int id)
{
- const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
+ static const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
unsigned int sclkx_y_ddly = 10;
unsigned int dclkx_y_ddly;
unsigned int dclkx_y_div;
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index 7082b4309c6f..27e632edd484 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -294,7 +294,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
return -EINVAL;
for (p = data; p->name; p++)
- clks_num++;
+ clks_num = max(clks_num, p->id + 1);
clp = devm_kzalloc(dev, struct_size(clp, clk_data.hws, clks_num),
GFP_KERNEL);
@@ -309,6 +309,9 @@ static int loongson2_clk_probe(struct platform_device *pdev)
clp->clk_data.num = clks_num;
clp->dev = dev;
+ /* Avoid returning NULL for unused id */
+ memset_p((void **)clp->clk_data.hws, ERR_PTR(-ENOENT), clks_num);
+
for (i = 0; i < clks_num; i++) {
p = &data[i];
switch (p->type) {
@@ -335,8 +338,8 @@ static int loongson2_clk_probe(struct platform_device *pdev)
&clp->clk_lock);
break;
case CLK_TYPE_FIXED:
- hw = clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
- 0, p->fixed_rate);
+ hw = devm_clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
+ 0, p->fixed_rate);
break;
default:
return dev_err_probe(dev, -EINVAL, "Invalid clk type\n");
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 06245681dac7..fc0aeb4247f2 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/reboot.h>
/*
@@ -116,9 +117,9 @@ static void __init nomadik_src_init(void)
val = readl(src_base + SRC_XTALCR);
pr_info("SXTALO is %s\n",
- (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
+ str_disabled_enabled(val & SRC_XTALCR_SXTALDIS));
pr_info("MXTAL is %s\n",
- (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
+ str_enabled_disabled(val & SRC_XTALCR_MXTALSTAT));
if (of_property_read_bool(np, "disable-sxtalo")) {
/* The machine uses an external oscillator circuit */
val |= SRC_XTALCR_SXTALDIS;
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 07c13ebe327d..f476883bc93b 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -5,6 +5,7 @@
* Inspired by clk-asm9260.c .
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -34,11 +35,20 @@
#define STM32F4_RCC_APB2ENR 0x44
#define STM32F4_RCC_BDCR 0x70
#define STM32F4_RCC_CSR 0x74
+#define STM32F4_RCC_SSCGR 0x80
#define STM32F4_RCC_PLLI2SCFGR 0x84
#define STM32F4_RCC_PLLSAICFGR 0x88
#define STM32F4_RCC_DCKCFGR 0x8c
#define STM32F7_RCC_DCKCFGR2 0x90
+#define STM32F4_RCC_PLLCFGR_N_MASK GENMASK(14, 6)
+
+#define STM32F4_RCC_SSCGR_SSCGEN BIT(31)
+#define STM32F4_RCC_SSCGR_SPREADSEL BIT(30)
+#define STM32F4_RCC_SSCGR_RESERVED_MASK GENMASK(29, 28)
+#define STM32F4_RCC_SSCGR_INCSTEP_MASK GENMASK(27, 13)
+#define STM32F4_RCC_SSCGR_MODPER_MASK GENMASK(12, 0)
+
#define NONE -1
#define NO_IDX NONE
#define NO_MUX NONE
@@ -364,6 +374,16 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
};
+enum stm32f4_pll_ssc_mod_type {
+ STM32F4_PLL_SSC_CENTER_SPREAD,
+ STM32F4_PLL_SSC_DOWN_SPREAD,
+};
+
+static const char * const stm32f4_ssc_mod_methods[] __initconst = {
+ [STM32F4_PLL_SSC_DOWN_SPREAD] = "down-spread",
+ [STM32F4_PLL_SSC_CENTER_SPREAD] = "center-spread",
+};
+
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
@@ -509,6 +529,12 @@ static const struct clk_div_table pll_divr_table[] = {
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
};
+struct stm32f4_pll_ssc {
+ unsigned int mod_freq;
+ unsigned int mod_depth;
+ enum stm32f4_pll_ssc_mod_type mod_type;
+};
+
struct stm32f4_pll {
spinlock_t *lock;
struct clk_gate gate;
@@ -516,6 +542,8 @@ struct stm32f4_pll {
u8 bit_rdy_idx;
u8 status;
u8 n_start;
+ bool ssc_enable;
+ struct stm32f4_pll_ssc ssc_conf;
};
#define to_stm32f4_pll(_gate) container_of(_gate, struct stm32f4_pll, gate)
@@ -538,6 +566,7 @@ struct stm32f4_vco_data {
u8 offset;
u8 bit_idx;
u8 bit_rdy_idx;
+ bool sscg;
};
static const struct stm32f4_vco_data vco_data[] = {
@@ -632,9 +661,11 @@ static unsigned long stm32f4_pll_recalc(struct clk_hw *hw,
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ unsigned long val;
unsigned long n;
- n = (readl(base + pll->offset) >> 6) & 0x1ff;
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
return parent_rate * n;
}
@@ -656,6 +687,32 @@ static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate,
return *prate * n;
}
+static void stm32f4_pll_set_ssc(struct clk_hw *hw, unsigned long parent_rate,
+ unsigned int ndiv)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct stm32f4_pll_ssc *ssc = &pll->ssc_conf;
+ u32 modeper, incstep;
+ u32 sscgr;
+
+ sscgr = readl(base + STM32F4_RCC_SSCGR);
+ /* reserved field must be kept at reset value */
+ sscgr &= STM32F4_RCC_SSCGR_RESERVED_MASK;
+
+ modeper = DIV_ROUND_CLOSEST(parent_rate, 4 * ssc->mod_freq);
+ incstep = DIV_ROUND_CLOSEST(((1 << 15) - 1) * ssc->mod_depth * ndiv,
+ 5 * 10000 * modeper);
+ sscgr |= STM32F4_RCC_SSCGR_SSCGEN |
+ FIELD_PREP(STM32F4_RCC_SSCGR_INCSTEP_MASK, incstep) |
+ FIELD_PREP(STM32F4_RCC_SSCGR_MODPER_MASK, modeper);
+
+ if (ssc->mod_type)
+ sscgr |= STM32F4_RCC_SSCGR_SPREADSEL;
+
+ writel(sscgr, base + STM32F4_RCC_SSCGR);
+}
+
static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -673,9 +730,13 @@ static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
n = rate / parent_rate;
- val = readl(base + pll->offset) & ~(0x1ff << 6);
+ val = readl(base + pll->offset) & ~STM32F4_RCC_PLLCFGR_N_MASK;
+ val |= FIELD_PREP(STM32F4_RCC_PLLCFGR_N_MASK, n);
+
+ writel(val, base + pll->offset);
- writel(val | ((n & 0x1ff) << 6), base + pll->offset);
+ if (pll->ssc_enable)
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
if (pll_state)
stm32f4_pll_enable(hw);
@@ -782,6 +843,84 @@ static struct clk_hw *clk_register_pll_div(const char *name,
return hw;
}
+static int __init stm32f4_pll_init_ssc(struct clk_hw *hw,
+ const struct stm32f4_pll_ssc *conf)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct clk_hw *parent;
+ unsigned long parent_rate;
+ int pll_state;
+ unsigned long n, val;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent) {
+ pr_err("%s: failed to get clock parent\n", __func__);
+ return -ENODEV;
+ }
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ pll->ssc_enable = true;
+ memcpy(&pll->ssc_conf, conf, sizeof(pll->ssc_conf));
+
+ pll_state = stm32f4_pll_is_enabled(hw);
+
+ if (pll_state)
+ stm32f4_pll_disable(hw);
+
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
+
+ pr_debug("%s: pll: %s, parent: %s, parent-rate: %lu, n: %lu\n",
+ __func__, clk_hw_get_name(hw), clk_hw_get_name(parent),
+ parent_rate, n);
+
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
+
+ if (pll_state)
+ stm32f4_pll_enable(hw);
+
+ return 0;
+}
+
+static int __init stm32f4_pll_ssc_parse_dt(struct device_node *np,
+ struct stm32f4_pll_ssc *conf)
+{
+ int ret;
+ const char *s;
+
+ if (!conf)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "st,ssc-modfreq-hz", &conf->mod_freq);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "st,ssc-moddepth-permyriad",
+ &conf->mod_depth);
+ if (ret) {
+ pr_err("%pOF: missing st,ssc-moddepth-permyriad\n", np);
+ return ret;
+ }
+
+ ret = fwnode_property_match_property_string(of_fwnode_handle(np),
+ "st,ssc-modmethod",
+ stm32f4_ssc_mod_methods,
+ ARRAY_SIZE(stm32f4_ssc_mod_methods));
+ if (ret < 0) {
+ pr_err("%pOF: failed to get st,ssc-modmethod\n", np);
+ return ret;
+ }
+
+ conf->mod_type = ret;
+
+ pr_debug("%pOF: SSCG settings: mod_freq: %d, mod_depth: %d mod_method: %s [%d]\n",
+ np, conf->mod_freq, conf->mod_depth, s, conf->mod_type);
+
+ return 0;
+}
+
static struct clk_hw *stm32f4_rcc_register_pll(const char *pllsrc,
const struct stm32f4_pll_data *data, spinlock_t *lock)
{
@@ -1689,7 +1828,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
const struct of_device_id *match;
const struct stm32f4_clk_data *data;
unsigned long pllm;
- struct clk_hw *pll_src_hw;
+ struct clk_hw *pll_src_hw, *pll_vco_hw;
+ struct stm32f4_pll_ssc ssc_conf;
base = of_iomap(np, 0);
if (!base) {
@@ -1748,8 +1888,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clk_hw_register_fixed_factor(NULL, "vco_in", pll_src,
0, 1, pllm);
- stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
- &stm32f4_clk_lock);
+ pll_vco_hw = stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
+ &stm32f4_clk_lock);
clks[PLL_VCO_I2S] = stm32f4_rcc_register_pll("vco_in",
&data->pll_data[1], &stm32f4_clk_lock);
@@ -1894,6 +2034,9 @@ static void __init stm32f4_rcc_init(struct device_node *np)
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
+ if (!stm32f4_pll_ssc_parse_dt(np, &ssc_conf))
+ stm32f4_pll_init_ssc(pll_vco_hw, &ssc_conf);
+
return;
fail:
kfree(clks);
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 76d7ea1964c3..9fe27dace111 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -78,9 +78,6 @@
#define VC3_PLL1_VCO_MIN 300000000UL
#define VC3_PLL1_VCO_MAX 600000000UL
-#define VC3_PLL2_VCO_MIN 400000000UL
-#define VC3_PLL2_VCO_MAX 1200000000UL
-
#define VC3_PLL3_VCO_MIN 300000000UL
#define VC3_PLL3_VCO_MAX 800000000UL
@@ -147,9 +144,13 @@ struct vc3_pfd_data {
u8 mdiv2_bitmsk;
};
+struct vc3_vco {
+ unsigned long min;
+ unsigned long max;
+};
+
struct vc3_pll_data {
- unsigned long vco_min;
- unsigned long vco_max;
+ struct vc3_vco vco;
u8 num;
u8 int_div_msb_offs;
u8 int_div_lsb_offs;
@@ -166,12 +167,17 @@ struct vc3_div_data {
struct vc3_hw_data {
struct clk_hw hw;
struct regmap *regmap;
- const void *data;
+ void *data;
u32 div_int;
u32 div_frc;
};
+struct vc3_hw_cfg {
+ struct vc3_vco pll2_vco;
+ u32 se2_clk_sel_msk;
+};
+
static const struct clk_div_table div1_divs[] = {
{ .val = 0, .div = 1, }, { .val = 1, .div = 4, },
{ .val = 2, .div = 5, }, { .val = 3, .div = 6, },
@@ -386,10 +392,10 @@ static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
const struct vc3_pll_data *pll = vc3->data;
u64 div_frc;
- if (rate < pll->vco_min)
- rate = pll->vco_min;
- if (rate > pll->vco_max)
- rate = pll->vco_max;
+ if (rate < pll->vco.min)
+ rate = pll->vco.min;
+ if (rate > pll->vco.max)
+ rate = pll->vco.max;
vc3->div_int = rate / *parent_rate;
@@ -680,8 +686,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL1,
.int_div_msb_offs = VC3_PLL1_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL1_VCO_N_DIVIDER,
- .vco_min = VC3_PLL1_VCO_MIN,
- .vco_max = VC3_PLL1_VCO_MAX
+ .vco = {
+ .min = VC3_PLL1_VCO_MIN,
+ .max = VC3_PLL1_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll1",
@@ -698,8 +706,6 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL2,
.int_div_msb_offs = VC3_PLL2_FB_INT_DIV_MSB,
.int_div_lsb_offs = VC3_PLL2_FB_INT_DIV_LSB,
- .vco_min = VC3_PLL2_VCO_MIN,
- .vco_max = VC3_PLL2_VCO_MAX
},
.hw.init = &(struct clk_init_data) {
.name = "pll2",
@@ -716,8 +722,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL3,
.int_div_msb_offs = VC3_PLL3_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL3_N_DIVIDER,
- .vco_min = VC3_PLL3_VCO_MIN,
- .vco_max = VC3_PLL3_VCO_MAX
+ .vco = {
+ .min = VC3_PLL3_VCO_MIN,
+ .max = VC3_PLL3_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll3",
@@ -901,7 +909,6 @@ static struct vc3_hw_data clk_mux[] = {
[VC3_SE2_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_SE2_CTRL_REG0,
- .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
.hw.init = &(struct clk_init_data) {
.name = "se2_mux",
@@ -982,6 +989,7 @@ static int vc3_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 settings[NUM_CONFIG_REGISTERS];
+ const struct vc3_hw_cfg *data;
struct regmap *regmap;
const char *name;
int ret, i;
@@ -1029,9 +1037,16 @@ static int vc3_probe(struct i2c_client *client)
clk_pfd[i].hw.init->name);
}
+ data = i2c_get_match_data(client);
+
/* Register pll's */
for (i = 0; i < ARRAY_SIZE(clk_pll); i++) {
clk_pll[i].regmap = regmap;
+ if (i == VC3_PLL2) {
+ struct vc3_pll_data *pll_data = clk_pll[i].data;
+
+ pll_data->vco = data->pll2_vco;
+ }
ret = devm_clk_hw_register(dev, &clk_pll[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1059,6 +1074,11 @@ static int vc3_probe(struct i2c_client *client)
/* Register clk muxes */
for (i = 0; i < ARRAY_SIZE(clk_mux); i++) {
clk_mux[i].regmap = regmap;
+ if (i == VC3_SE2_MUX) {
+ struct vc3_clk_data *clk_data = clk_mux[i].data;
+
+ clk_data->bitmsk = data->se2_clk_sel_msk;
+ }
ret = devm_clk_hw_register(dev, &clk_mux[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1108,8 +1128,19 @@ static int vc3_probe(struct i2c_client *client)
return ret;
}
+static const struct vc3_hw_cfg vc3_5p = {
+ .pll2_vco = { .min = 400000000UL, .max = 1200000000UL },
+ .se2_clk_sel_msk = BIT(6),
+};
+
+static const struct vc3_hw_cfg vc3_5l = {
+ .pll2_vco = { .min = 30000000UL, .max = 130000000UL },
+ .se2_clk_sel_msk = BIT(0),
+};
+
static const struct of_device_id dev_ids[] = {
- { .compatible = "renesas,5p35023" },
+ { .compatible = "renesas,5p35023", .data = &vc3_5p },
+ { .compatible = "renesas,5l35023", .data = &vc3_5l },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dev_ids);
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 0c3d0cee98c8..96946a8e2854 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clkdev.h>
@@ -520,8 +521,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
- data & pclk->param.reg_clk_mask ? "enabled" :
- "disabled");
+ str_enabled_disabled(data & pclk->param.reg_clk_mask));
} else {
return 1;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9b45fa005030..cf7720b9172f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -5385,8 +5385,10 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index)
count++;
}
/* We went off the end of 'clock-indices' without finding it */
- if (of_property_present(clkspec.np, "clock-indices") && !found)
+ if (of_property_present(clkspec.np, "clock-indices") && !found) {
+ of_node_put(clkspec.np);
return NULL;
+ }
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 5bbbb3a66477..82727b1fc67a 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -19,7 +19,6 @@
#include <linux/mfd/syscon.h>
#include <linux/notifier.h>
#include <linux/of.h>
-#include <linux/platform_data/clk-davinci-pll.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -840,27 +839,6 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
return 0;
}
-static struct davinci_pll_platform_data *davinci_pll_get_pdata(struct device *dev)
-{
- struct davinci_pll_platform_data *pdata = dev_get_platdata(dev);
-
- /*
- * Platform data is optional, so allocate a new struct if one was not
- * provided. For device tree, this will always be the case.
- */
- if (!pdata)
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- /* for device tree, we need to fill in the struct */
- if (dev->of_node)
- pdata->cfgchip =
- syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
-
- return pdata;
-}
-
/* needed in early boot for clocksource/clockevent */
#ifdef CONFIG_ARCH_DAVINCI_DA850
CLK_OF_DECLARE(da850_pll0, "ti,da850-pll0", of_da850_pll0_init);
@@ -890,8 +868,8 @@ typedef int (*davinci_pll_init)(struct device *dev, void __iomem *base,
static int davinci_pll_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct davinci_pll_platform_data *pdata;
davinci_pll_init pll_init = NULL;
+ struct regmap *cfgchip;
void __iomem *base;
pll_init = device_get_match_data(dev);
@@ -903,17 +881,13 @@ static int davinci_pll_probe(struct platform_device *pdev)
return -EINVAL;
}
- pdata = davinci_pll_get_pdata(dev);
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
+ cfgchip = syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- return pll_init(dev, base, pdata->cfgchip);
+ return pll_init(dev, base, cfgchip);
}
static struct platform_driver davinci_pll_driver = {
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 516dbd170c8a..fb18f507f121 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -399,8 +399,9 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
"dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
- "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
- "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+ "arm_pll_out", "sys_pll1_out", "sys_pll2_out",
+ "sys_pll3_out", "dummy", "dummy", "osc_24m",
+ "dummy", "osc_32k"};
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index 58a516dd385b..c5f358a75f30 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -15,7 +15,7 @@
#include "clk.h"
-#define IMX93_CLK_END 207
+#define IMX93_CLK_END 208
#define PLAT_IMX93 BIT(0)
#define PLAT_IMX91 BIT(1)
@@ -38,6 +38,7 @@ static u32 share_count_sai2;
static u32 share_count_sai3;
static u32 share_count_mub;
static u32 share_count_pdm;
+static u32 share_count_spdif;
static const char * const a55_core_sels[] = {"a55_alt", "arm_pll"};
static const char *parent_names[MAX_SEL][4] = {
@@ -70,8 +71,8 @@ static const struct imx93_clk_root {
{ IMX93_CLK_WAKEUP_AXI, "wakeup_axi_root", 0x0380, FAST_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_SWO_TRACE, "swo_trace_root", 0x0400, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
- { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, },
+ { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_LPTMR1, "lptmr1_root", 0x0700, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_LPTMR2, "lptmr2_root", 0x0780, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_TPM2, "tpm2_root", 0x0880, TPM_SEL, },
@@ -177,10 +178,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_WDOG5_GATE, "wdog5", "osc_24m", 0x8400, },
{ IMX93_CLK_SEMA1_GATE, "sema1", "bus_aon_root", 0x8440, },
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
- { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub },
- { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub },
+ { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
+ { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
{ IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi1", "flexspi1_root", 0x8640, },
@@ -188,8 +189,8 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_GPIO2_GATE, "gpio2", "bus_wakeup_root", 0x88c0, },
{ IMX93_CLK_GPIO3_GATE, "gpio3", "bus_wakeup_root", 0x8900, },
{ IMX93_CLK_GPIO4_GATE, "gpio4", "bus_wakeup_root", 0x8940, },
- { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, },
- { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, },
+ { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, 0, NULL, PLAT_IMX93},
+ { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, 0, NULL, PLAT_IMX93},
{ IMX93_CLK_LPIT1_GATE, "lpit1", "bus_aon_root", 0x8a00, },
{ IMX93_CLK_LPIT2_GATE, "lpit2", "bus_wakeup_root", 0x8a40, },
{ IMX93_CLK_LPTMR1_GATE, "lptmr1", "lptmr1_root", 0x8a80, },
@@ -238,10 +239,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_SAI3_GATE, "sai3", "sai3_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_SAI3_IPG, "sai3_ipg_clk", "bus_wakeup_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_MIPI_CSI_GATE, "mipi_csi", "media_apb_root", 0x9580, },
- { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, },
- { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, },
+ { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, 0, NULL, PLAT_IMX93 },
+ { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_LCDIF_GATE, "lcdif", "media_apb_root", 0x9640, },
- { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, },
+ { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_ISI_GATE, "isi", "media_apb_root", 0x96c0, },
{ IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_axi_root", 0x9700, },
{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root", 0x9a00, },
@@ -252,12 +253,13 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_MQS1_GATE, "mqs1", "sai1_root", 0x9b00, },
{ IMX93_CLK_MQS2_GATE, "mqs2", "sai3_root", 0x9b40, },
{ IMX93_CLK_AUD_XCVR_GATE, "aud_xcvr", "audio_xcvr_root", 0x9b80, },
- { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, },
+ { IMX93_CLK_SPDIF_IPG, "spdif_ipg_clk", "bus_wakeup_root", 0x9c00, 0, &share_count_spdif},
+ { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, 0, &share_count_spdif},
{ IMX93_CLK_HSIO_32K_GATE, "hsio_32k", "osc_32k", 0x9dc0, },
{ IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX93, },
{ IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX93, },
- { IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX91, },
- { IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX91, },
+ { IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX91, },
+ { IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX91, },
/* Critical because clk accessed during CPU idle */
{ IMX93_CLK_SYS_CNT_GATE, "sys_cnt", "osc_24m", 0x9e80, CLK_IS_CRITICAL},
{ IMX93_CLK_TSTMR1_GATE, "tstmr1", "bus_aon_root", 0x9ec0, },
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index d63564dbb12c..f290981ea13b 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -56,7 +56,9 @@ static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
PLL_1416X_RATE(700000000U, 350, 3, 2),
PLL_1416X_RATE(640000000U, 320, 3, 2),
PLL_1416X_RATE(600000000U, 300, 3, 2),
+ PLL_1416X_RATE(416000000U, 208, 3, 2),
PLL_1416X_RATE(320000000U, 160, 3, 2),
+ PLL_1416X_RATE(208000000U, 208, 3, 3),
};
static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 425c69cfb105..e103121cf58e 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -55,10 +55,16 @@ static const struct mtk_gate audio_clks[] = {
GATE_DUMMY(CLK_DUMMY, "aud_dummy"),
/* AUDIO0 */
GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
+ GATE_DUMMY(CLK_AUD_LRCK_DETECT, "audio_lrck_detect_dummy"),
+ GATE_DUMMY(CLK_AUD_I2S, "audio_i2c_dummy"),
+ GATE_DUMMY(CLK_AUD_APLL_TUNER, "audio_apll_tuner_dummy"),
GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
GATE_AUDIO0(CLK_AUD_SPDF, "audio_spdf", "audpll_sel", 21),
GATE_AUDIO0(CLK_AUD_SPDF2, "audio_spdf2", "audpll_sel", 22),
GATE_AUDIO0(CLK_AUD_APLL, "audio_apll", "audpll_sel", 23),
+ GATE_DUMMY(CLK_AUD_TML, "audio_tml_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_EXT, "audio_ahb_idle_ext_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_INT, "audio_ahb_idle_int_dummy"),
/* AUDIO1 */
GATE_AUDIO1(CLK_AUD_I2SIN1, "audio_i2sin1", "aud_mux1_sel", 0),
GATE_AUDIO1(CLK_AUD_I2SIN2, "audio_i2sin2", "aud_mux1_sel", 1),
@@ -76,10 +82,12 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUD_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
GATE_AUDIO1(CLK_AUD_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
GATE_AUDIO1(CLK_AUD_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+ GATE_DUMMY(CLK_AUD_HDMIRX, "audio_hdmirx_dummy"),
GATE_AUDIO1(CLK_AUD_INTDIR, "audio_intdir", "intdir_sel", 20),
GATE_AUDIO1(CLK_AUD_A1SYS, "audio_a1sys", "aud_mux1_sel", 21),
GATE_AUDIO1(CLK_AUD_A2SYS, "audio_a2sys", "aud_mux2_sel", 22),
GATE_AUDIO1(CLK_AUD_AFE_CONN, "audio_afe_conn", "aud_mux1_sel", 23),
+ GATE_DUMMY(CLK_AUD_AFE_PCMIF, "audio_afe_pcmif_dummy"),
GATE_AUDIO1(CLK_AUD_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUD_MMIF_UL1, "audio_ul1", "aud_mux1_sel", 0),
@@ -100,6 +108,8 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO2(CLK_AUD_MMIF_AWB2, "audio_awb2", "aud_mux1_sel", 15),
GATE_AUDIO2(CLK_AUD_MMIF_DAI, "audio_dai", "aud_mux1_sel", 16),
/* AUDIO3 */
+ GATE_DUMMY(CLK_AUD_DMIC1, "audio_dmic1_dummy"),
+ GATE_DUMMY(CLK_AUD_DMIC2, "audio_dmic2_dummy"),
GATE_AUDIO3(CLK_AUD_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
GATE_AUDIO3(CLK_AUD_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
GATE_AUDIO3(CLK_AUD_ASRCI5, "audio_asrci5", "asm_h_sel", 4),
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index 5da3eabffd3e..f11c7a4fa37b 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate bdp_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "bdp_dummy"),
GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index 875594bc9dcb..c158e54c4652 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -22,6 +22,7 @@ static const struct mtk_gate_regs img_cg_regs = {
GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate img_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "img_dummy"),
GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index bc68fa718878..474d87d62e83 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs disp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "mm_dummy"),
GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
GATE_DISP0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
GATE_DISP0(CLK_MM_CMDQ, "mm_cmdq", "mm_sel", 2),
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index 94db86f8d0a4..5299d92f3aba 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "vdec_dummy"),
GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
};
diff --git a/drivers/clk/mmp/clk-pxa1908-apbc.c b/drivers/clk/mmp/clk-pxa1908-apbc.c
index b93d08466198..3fd7b5e644f3 100644
--- a/drivers/clk/mmp/clk-pxa1908-apbc.c
+++ b/drivers/clk/mmp/clk-pxa1908-apbc.c
@@ -96,8 +96,8 @@ static int pxa1908_apbc_probe(struct platform_device *pdev)
struct pxa1908_clk_unit *pxa_unit;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
- if (IS_ERR(pxa_unit))
- return PTR_ERR(pxa_unit);
+ if (!pxa_unit)
+ return -ENOMEM;
pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pxa_unit->base))
diff --git a/drivers/clk/mmp/clk-pxa1908-apbcp.c b/drivers/clk/mmp/clk-pxa1908-apbcp.c
index 08f3845cbb1b..f638d7e89b47 100644
--- a/drivers/clk/mmp/clk-pxa1908-apbcp.c
+++ b/drivers/clk/mmp/clk-pxa1908-apbcp.c
@@ -48,8 +48,8 @@ static int pxa1908_apbcp_probe(struct platform_device *pdev)
struct pxa1908_clk_unit *pxa_unit;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
- if (IS_ERR(pxa_unit))
- return PTR_ERR(pxa_unit);
+ if (!pxa_unit)
+ return -ENOMEM;
pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pxa_unit->base))
diff --git a/drivers/clk/mmp/clk-pxa1908-mpmu.c b/drivers/clk/mmp/clk-pxa1908-mpmu.c
index e3337bacaadd..90b4b2488574 100644
--- a/drivers/clk/mmp/clk-pxa1908-mpmu.c
+++ b/drivers/clk/mmp/clk-pxa1908-mpmu.c
@@ -78,8 +78,8 @@ static int pxa1908_mpmu_probe(struct platform_device *pdev)
struct pxa1908_clk_unit *pxa_unit;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
- if (IS_ERR(pxa_unit))
- return PTR_ERR(pxa_unit);
+ if (!pxa_unit)
+ return -ENOMEM;
pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pxa_unit->base))
diff --git a/drivers/clk/mmp/pwr-island.c b/drivers/clk/mmp/pwr-island.c
index edaa2433a472..eaf5d2c5e593 100644
--- a/drivers/clk/mmp/pwr-island.c
+++ b/drivers/clk/mmp/pwr-island.c
@@ -106,10 +106,10 @@ struct generic_pm_domain *mmp_pm_domain_register(const char *name,
pm_domain->flags = flags;
pm_domain->lock = lock;
- pm_genpd_init(&pm_domain->genpd, NULL, true);
pm_domain->genpd.name = name;
pm_domain->genpd.power_on = mmp_pm_domain_power_on;
pm_domain->genpd.power_off = mmp_pm_domain_power_off;
+ pm_genpd_init(&pm_domain->genpd, NULL, true);
return &pm_domain->genpd;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index ef89d686cbc4..69bbf62ba3cd 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -64,6 +64,15 @@ config CLK_X1E80100_TCSRCC
Support for the TCSR clock controller on X1E80100 devices.
Say Y if you want to use peripheral devices such as SD/UFS.
+config CLK_X1P42100_GPUCC
+ tristate "X1P42100 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_X1E80100_GCC
+ help
+ Support for the graphics clock controller on X1P42100 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config CLK_QCM2290_GPUCC
tristate "QCM2290 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -190,6 +199,15 @@ config IPQ_APSS_6018
Say Y if you want to support CPU frequency scaling on
ipq based devices.
+config IPQ_CMN_PLL
+ tristate "IPQ CMN PLL Clock Controller"
+ help
+ Support for CMN PLL clock controller on IPQ platform. The
+ CMN PLL consumes the AHB/SYS clocks from GCC and supplies
+ the output clocks to the networking hardware and GCC blocks.
+ Say Y or M if you want to support CMN PLL clock on the IPQ
+ based devices.
+
config IPQ_GCC_4019
tristate "IPQ4019 Global Clock Controller"
help
@@ -495,6 +513,15 @@ config QCS_GCC_8300
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config QCS_GCC_615
+ tristate "QCS615 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on QCS615 devices.
+ Say Y if you want to use multimedia devices or peripheral
+ devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+
config SC_CAMCC_7180
tristate "SC7180 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1022,6 +1049,17 @@ config SM_DISPCC_8550
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_DISPCC_8750
+ tristate "SM8750 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on SM_GCC_8750
+ select QCOM_GDSC
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM8750 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_GCC_4450
tristate "SM4450 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1079,6 +1117,7 @@ config SM_GCC_7150
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
help
Support for the global clock controller on SM8150 devices.
Say Y if you want to use peripheral devices such as UART,
@@ -1130,6 +1169,15 @@ config SM_GCC_8650
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8750
+ tristate "SM8750 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8750 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_4450
tristate "SM4450 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1230,6 +1278,15 @@ config SM_GPUCC_8650
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_LPASSCC_6115
+ tristate "SM6115 Low Power Audio Subsystem (LPASS) Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_6115
+ help
+ Support for the LPASS clock controller on SM6115 devices.
+ Say Y if you want to toggle LPASS-adjacent resets within
+ this clock controller to reset the LPASS subsystem.
+
config SM_TCSRCC_8550
tristate "SM8550 TCSR Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1246,6 +1303,14 @@ config SM_TCSRCC_8650
Support for the TCSR clock controller on SM8650 devices.
Say Y if you want to use peripheral devices such as SD/UFS.
+config SM_TCSRCC_8750
+ tristate "SM8750 TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on SM8750 devices.
+ Say Y if you want to use peripheral devices such as UFS/USB/PCIe.
+
config SA_VIDEOCC_8775P
tristate "SA8775P Video Clock Controller"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index b09dbdc210eb..0db2f98bcb3e 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -26,9 +26,11 @@ obj-$(CONFIG_CLK_X1E80100_DISPCC) += dispcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GCC) += gcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GPUCC) += gpucc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
+obj-$(CONFIG_CLK_X1P42100_GPUCC) += gpucc-x1p42100.o
obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
+obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o
obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o
@@ -71,6 +73,7 @@ obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o
obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
+obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o
obj-$(CONFIG_QCS_GCC_8300) += gcc-qcs8300.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
@@ -131,6 +134,7 @@ obj-$(CONFIG_SM_DISPCC_7150) += dispcc-sm7150.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
+obj-$(CONFIG_SM_DISPCC_8750) += dispcc-sm8750.o
obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -143,6 +147,7 @@ obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o
+obj-$(CONFIG_SM_GCC_8750) += gcc-sm8750.o
obj-$(CONFIG_SM_GPUCC_4450) += gpucc-sm4450.o
obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
@@ -154,8 +159,10 @@ obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_GPUCC_8450) += gpucc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8550) += gpucc-sm8550.o
obj-$(CONFIG_SM_GPUCC_8650) += gpucc-sm8650.o
+obj-$(CONFIG_SM_LPASSCC_6115) += lpasscc-sm6115.o
obj-$(CONFIG_SM_TCSRCC_8550) += tcsrcc-sm8550.o
obj-$(CONFIG_SM_TCSRCC_8650) += tcsrcc-sm8650.o
+obj-$(CONFIG_SM_TCSRCC_8750) += tcsrcc-sm8750.o
obj-$(CONFIG_SM_VIDEOCC_7150) += videocc-sm7150.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index e8632db2c542..d6c1aea7e9e1 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -73,20 +73,19 @@ static const struct alpha_pll_config ipq5018_pll_config = {
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
- .alpha_en_mask = BIT(24),
.status_val = 0x3,
.status_mask = GENMASK(10, 8),
.lock_det = BIT(2),
.test_ctl_hi_val = 0x00400003,
};
+/* 1.080 GHz configuration */
static const struct alpha_pll_config ipq5332_pll_config = {
.l = 0x2d,
.config_ctl_val = 0x4001075b,
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
- .alpha_en_mask = BIT(24),
.status_val = 0x3,
.status_mask = GENMASK(10, 8),
.lock_det = BIT(2),
diff --git a/drivers/clk/qcom/camcc-x1e80100.c b/drivers/clk/qcom/camcc-x1e80100.c
index 85e76c7712ad..b73524ae64b1 100644
--- a/drivers/clk/qcom/camcc-x1e80100.c
+++ b/drivers/clk/qcom/camcc-x1e80100.c
@@ -2212,6 +2212,8 @@ static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
},
};
+static struct gdsc cam_cc_titan_top_gdsc;
+
static struct gdsc cam_cc_bps_gdsc = {
.gdscr = 0x10004,
.en_rest_wait_val = 0x2,
@@ -2221,6 +2223,7 @@ static struct gdsc cam_cc_bps_gdsc = {
.name = "cam_cc_bps_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2233,6 +2236,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
.name = "cam_cc_ife_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2245,6 +2249,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
.name = "cam_cc_ife_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2257,6 +2262,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
.name = "cam_cc_ipe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2269,6 +2275,7 @@ static struct gdsc cam_cc_sfe_0_gdsc = {
.name = "cam_cc_sfe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index b8351f8c0b84..9a65d14acf71 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -58,6 +58,7 @@
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
#define PLL_TEST_CTL_U2(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U2])
+#define PLL_TEST_CTL_U3(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U3])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
@@ -197,6 +198,37 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U1] = 0x34,
[PLL_OFF_TEST_CTL_U2] = 0x38,
},
+ [CLK_ALPHA_PLL_TYPE_PONGO_ELU] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATE] = 0x08,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_USER_CTL_U] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U2] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ [PLL_OFF_TEST_CTL_U1] = 0x34,
+ [PLL_OFF_TEST_CTL_U2] = 0x38,
+ [PLL_OFF_TEST_CTL_U3] = 0x3c,
+ },
+ [CLK_ALPHA_PLL_TYPE_TAYCAN_ELU] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATE] = 0x08,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_ALPHA_VAL] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ },
[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO] = {
[PLL_OFF_OPMODE] = 0x04,
[PLL_OFF_STATUS] = 0x0c,
@@ -323,6 +355,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define LUCID_EVO_PLL_CAL_L_VAL_SHIFT 16
#define LUCID_OLE_PLL_RINGOSC_CAL_L_VAL_SHIFT 24
+/* PONGO ELU PLL specific setting and offsets */
+#define PONGO_PLL_OUT_MASK GENMASK(1, 0)
+#define PONGO_PLL_L_VAL_MASK GENMASK(11, 0)
+#define PONGO_XO_PRESENT BIT(10)
+#define PONGO_CLOCK_SELECT BIT(12)
+
/* ZONDA PLL specific */
#define ZONDA_PLL_OUT_MASK 0xf
#define ZONDA_STAY_IN_CFA BIT(16)
@@ -352,7 +390,8 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
if (ret)
return ret;
- for (count = 200; count > 0; count--) {
+ /* Pongo PLLs using a 32KHz reference can take upwards of 1500us to lock. */
+ for (count = 1500; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@@ -432,6 +471,8 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
mask |= config->pre_div_mask;
mask |= config->post_div_mask;
mask |= config->vco_mask;
+ mask |= config->alpha_en_mask;
+ mask |= config->alpha_mode_mask;
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
@@ -2494,6 +2535,144 @@ const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_reset_lucid_evo_ops);
+static int alpha_pll_pongo_elu_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Enable PLL intially to one-time calibrate against XO. */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ regmap_update_bits(regmap, PLL_MODE(pll), PONGO_XO_PRESENT, PONGO_XO_PRESENT);
+
+ /* Set regmap for wait_for_pll() */
+ pll->clkr.regmap = regmap;
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret) {
+ /* Reverse calibration - disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ return ret;
+ }
+
+ /* Disable PLL after one-time calibration. */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ /* Select internally generated clock. */
+ regmap_update_bits(regmap, PLL_MODE(pll), PONGO_CLOCK_SELECT,
+ PONGO_CLOCK_SELECT);
+
+ return 0;
+}
+
+static int alpha_pll_pongo_elu_enable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Check if PLL is already enabled */
+ if (trion_pll_is_enabled(pll, regmap))
+ return 0;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set operation mode to RUN */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Enable the global PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ return ret;
+}
+
+static void alpha_pll_pongo_elu_disable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Disable the global PLL output */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL mode in STANDBY */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+
+static unsigned long alpha_pll_pongo_elu_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 l;
+
+ if (regmap_read(regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ l &= PONGO_PLL_L_VAL_MASK;
+
+ return alpha_pll_calc_rate(parent_rate, l, 0, pll_alpha_width(pll));
+}
+
+const struct clk_ops clk_alpha_pll_pongo_elu_ops = {
+ .prepare = alpha_pll_pongo_elu_prepare,
+ .enable = alpha_pll_pongo_elu_enable,
+ .disable = alpha_pll_pongo_elu_disable,
+ .recalc_rate = alpha_pll_pongo_elu_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_pongo_elu_ops);
+
+void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll,
+ struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val;
+
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), PONGO_PLL_OUT_MASK,
+ PONGO_PLL_OUT_MASK);
+
+ if (trion_pll_is_enabled(pll, regmap))
+ return;
+
+ if (regmap_read(regmap, PLL_L_VAL(pll), &val))
+ return;
+ val &= PONGO_PLL_L_VAL_MASK;
+ if (val)
+ return;
+
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U2(pll), config->config_ctl_hi2_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val | PONGO_PLL_OUT_MASK);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U2(pll), config->test_ctl_hi2_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U3(pll), config->test_ctl_hi3_val);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+}
+EXPORT_SYMBOL_GPL(clk_pongo_elu_pll_configure);
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index c6d1b8429f95..79aca8525262 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -27,6 +27,8 @@ enum {
CLK_ALPHA_PLL_TYPE_ZONDA_OLE,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
+ CLK_ALPHA_PLL_TYPE_PONGO_ELU,
+ CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
@@ -52,6 +54,7 @@ enum {
PLL_OFF_TEST_CTL_U,
PLL_OFF_TEST_CTL_U1,
PLL_OFF_TEST_CTL_U2,
+ PLL_OFF_TEST_CTL_U3,
PLL_OFF_STATE,
PLL_OFF_STATUS,
PLL_OFF_OPMODE,
@@ -137,6 +140,7 @@ struct alpha_pll_config {
u32 test_ctl_hi_mask;
u32 test_ctl_hi1_val;
u32 test_ctl_hi2_val;
+ u32 test_ctl_hi3_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
@@ -185,13 +189,17 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_zonda_ole_ops clk_alpha_pll_zonda_ops
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
+#define clk_alpha_pll_taycan_elu_ops clk_alpha_pll_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
#define clk_alpha_pll_reset_lucid_ole_ops clk_alpha_pll_reset_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
#define clk_alpha_pll_fixed_lucid_ole_ops clk_alpha_pll_fixed_lucid_evo_ops
+#define clk_alpha_pll_fixed_taycan_elu_ops clk_alpha_pll_fixed_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
#define clk_alpha_pll_postdiv_lucid_ole_ops clk_alpha_pll_postdiv_lucid_evo_ops
+#define clk_alpha_pll_postdiv_taycan_elu_ops clk_alpha_pll_postdiv_lucid_evo_ops
+extern const struct clk_ops clk_alpha_pll_pongo_elu_ops;
extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
@@ -218,6 +226,11 @@ void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
const struct alpha_pll_config *config);
void clk_lucid_ole_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+#define clk_taycan_elu_pll_configure(pll, regmap, config) \
+ clk_lucid_evo_pll_configure(pll, regmap, config)
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 88845baa7f84..987141c91fe0 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -597,6 +597,7 @@ struct frac_entry {
};
static const struct frac_entry pixel_table[] = {
+ { 1, 1 },
{ 1, 2 },
{ 1, 3 },
{ 3, 16 },
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 80f1f4fcd52a..4fbdf4880d03 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -189,6 +189,7 @@ struct clk_rcg2_gfx3d {
container_of(to_clk_rcg2(_hw), struct clk_rcg2_gfx3d, rcg)
extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_gp_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
extern const struct clk_ops clk_rcg2_fm_ops;
extern const struct clk_ops clk_rcg2_mux_closest_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index bf6406f5279a..8001fd9faf9d 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -8,11 +8,13 @@
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/export.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/math64.h>
+#include <linux/gcd.h>
#include <linux/minmax.h>
#include <linux/slab.h>
@@ -32,6 +34,7 @@
#define CFG_REG 0x4
#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_DIV_LENGTH 8
#define CFG_SRC_SEL_SHIFT 8
#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
#define CFG_MODE_SHIFT 12
@@ -148,12 +151,32 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
return update_config(rcg);
}
-/*
- * Calculate m/n:d rate
+/**
+ * convert_to_reg_val() - Convert divisor values to hardware values.
+ *
+ * @f: Frequency table with pure m/n/pre_div parameters.
+ */
+static void convert_to_reg_val(struct freq_tbl *f)
+{
+ f->pre_div *= 2;
+ f->pre_div -= 1;
+}
+
+/**
+ * calc_rate() - Calculate rate based on m/n:d values
+ *
+ * @rate: Parent rate.
+ * @m: Multiplier.
+ * @n: Divisor.
+ * @mode: Use zero to ignore m/n calculation.
+ * @hid_div: Pre divisor register value. Pre divisor value
+ * relates to hid_div as pre_div = (hid_div + 1) / 2.
+ *
+ * Return calculated rate according to formula:
*
* parent_rate m
* rate = ----------- x ---
- * hid_div n
+ * pre_div n
*/
static unsigned long
calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
@@ -393,16 +416,110 @@ static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
}
-static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
- u32 *_cfg)
+/**
+ * clk_rcg2_split_div() - Split multiplier that doesn't fit in n neither in pre_div.
+ *
+ * @multiplier: Multiplier to split between n and pre_div.
+ * @pre_div: Pointer to pre divisor value.
+ * @n: Pointer to n divisor value.
+ * @pre_div_max: Pre divisor maximum value.
+ */
+static inline void clk_rcg2_split_div(int multiplier, unsigned int *pre_div,
+ u16 *n, unsigned int pre_div_max)
+{
+ *n = mult_frac(multiplier * *n, *pre_div, pre_div_max);
+ *pre_div = pre_div_max;
+}
+
+static void clk_rcg2_calc_mnd(u64 parent_rate, u64 rate, struct freq_tbl *f,
+ unsigned int mnd_max, unsigned int pre_div_max)
+{
+ int i = 2;
+ unsigned int pre_div = 1;
+ unsigned long rates_gcd, scaled_parent_rate;
+ u16 m, n = 1, n_candidate = 1, n_max;
+
+ rates_gcd = gcd(parent_rate, rate);
+ m = div64_u64(rate, rates_gcd);
+ scaled_parent_rate = div64_u64(parent_rate, rates_gcd);
+ while (scaled_parent_rate > (mnd_max + m) * pre_div_max) {
+ // we're exceeding divisor's range, trying lower scale.
+ if (m > 1) {
+ m--;
+ scaled_parent_rate = mult_frac(scaled_parent_rate, m, (m + 1));
+ } else {
+ // cannot lower scale, just set max divisor values.
+ f->n = mnd_max + m;
+ f->pre_div = pre_div_max;
+ f->m = m;
+ return;
+ }
+ }
+
+ n_max = m + mnd_max;
+
+ while (scaled_parent_rate > 1) {
+ while (scaled_parent_rate % i == 0) {
+ n_candidate *= i;
+ if (n_candidate < n_max)
+ n = n_candidate;
+ else if (pre_div * i < pre_div_max)
+ pre_div *= i;
+ else
+ clk_rcg2_split_div(i, &pre_div, &n, pre_div_max);
+
+ scaled_parent_rate /= i;
+ }
+ i++;
+ }
+
+ f->m = m;
+ f->n = n;
+ f->pre_div = pre_div > 1 ? pre_div : 0;
+}
+
+static int clk_rcg2_determine_gp_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f_tbl = {}, *f = &f_tbl;
+ int mnd_max = BIT(rcg->mnd_width) - 1;
+ int hid_max = BIT(rcg->hid_width) - 1;
+ struct clk_hw *parent;
+ u64 parent_rate;
+
+ parent = clk_hw_get_parent(hw);
+ parent_rate = clk_get_rate(parent->clk);
+ if (!parent_rate)
+ return -EINVAL;
+
+ clk_rcg2_calc_mnd(parent_rate, req->rate, f, mnd_max, hid_max / 2);
+ convert_to_reg_val(f);
+ req->rate = calc_rate(parent_rate, f->m, f->n, f->n, f->pre_div);
+
+ return 0;
+}
+
+static int __clk_rcg2_configure_parent(struct clk_rcg2 *rcg, u8 src, u32 *_cfg)
{
- u32 cfg, mask, d_val, not2d_val, n_minus_m;
struct clk_hw *hw = &rcg->clkr.hw;
- int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ int index = qcom_find_src_index(hw, rcg->parent_map, src);
if (index < 0)
return index;
+ *_cfg &= ~CFG_SRC_SEL_MASK;
+ *_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ return 0;
+}
+
+static int __clk_rcg2_configure_mnd(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+ u32 *_cfg)
+{
+ u32 cfg, mask, d_val, not2d_val, n_minus_m;
+ int ret;
+
if (rcg->mnd_width && f->n) {
mask = BIT(rcg->mnd_width) - 1;
ret = regmap_update_bits(rcg->clkr.regmap,
@@ -431,9 +548,8 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
}
mask = BIT(rcg->hid_width) - 1;
- mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
+ mask |= CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
- cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
if (rcg->hw_clk_ctrl)
@@ -445,6 +561,22 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
return 0;
}
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+ u32 *_cfg)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure_parent(rcg, f->src, _cfg);
+ if (ret)
+ return ret;
+
+ ret = __clk_rcg2_configure_mnd(rcg, f, _cfg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg;
@@ -465,6 +597,26 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
return update_config(rcg);
}
+static int clk_rcg2_configure_gp(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ u32 cfg;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+ if (ret)
+ return ret;
+
+ ret = __clk_rcg2_configure_mnd(rcg, f, &cfg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
enum freq_policy policy)
{
@@ -518,6 +670,22 @@ static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
return __clk_rcg2_set_rate(hw, rate, CEIL);
}
+static int clk_rcg2_set_gp_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int mnd_max = BIT(rcg->mnd_width) - 1;
+ int hid_max = BIT(rcg->hid_width) - 1;
+ struct freq_tbl f_tbl = {}, *f = &f_tbl;
+ int ret;
+
+ clk_rcg2_calc_mnd(parent_rate, rate, f, mnd_max, hid_max / 2);
+ convert_to_reg_val(f);
+ ret = clk_rcg2_configure_gp(rcg, f);
+
+ return ret;
+}
+
static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -645,6 +813,18 @@ const struct clk_ops clk_rcg2_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+const struct clk_ops clk_rcg2_gp_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_gp_rate,
+ .set_rate = clk_rcg2_set_gp_rate,
+ .get_duty_cycle = clk_rcg2_get_duty_cycle,
+ .set_duty_cycle = clk_rcg2_set_duty_cycle,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_gp_ops);
+
const struct clk_ops clk_rcg2_floor_ops = {
.is_enabled = clk_rcg2_is_enabled,
.get_parent = clk_rcg2_get_parent,
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index 9da034f8f2ff..ccc112c21667 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -4,6 +4,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -224,10 +225,10 @@ static void clk_rpm_unprepare(struct clk_hw *hw)
unsigned long active_rate, sleep_rate;
int ret;
- mutex_lock(&rpm_clk_lock);
+ guard(mutex)(&rpm_clk_lock);
if (!r->rate)
- goto out;
+ return;
/* Take peer clock's rate into account only if it's enabled. */
if (peer->enabled)
@@ -237,17 +238,14 @@ static void clk_rpm_unprepare(struct clk_hw *hw)
active_rate = r->branch ? !!peer_rate : peer_rate;
ret = clk_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return;
sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
ret = clk_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return;
r->enabled = false;
-
-out:
- mutex_unlock(&rpm_clk_lock);
}
static int clk_rpm_xo_prepare(struct clk_hw *hw)
@@ -324,12 +322,12 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
unsigned long active_rate, sleep_rate;
unsigned long this_rate = 0, this_sleep_rate = 0;
unsigned long peer_rate = 0, peer_sleep_rate = 0;
- int ret = 0;
+ int ret;
- mutex_lock(&rpm_clk_lock);
+ guard(mutex)(&rpm_clk_lock);
if (!r->enabled)
- goto out;
+ return 0;
to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
@@ -341,19 +339,16 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
active_rate = max(this_rate, peer_rate);
ret = clk_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return ret;
sleep_rate = max(this_sleep_rate, peer_sleep_rate);
ret = clk_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return ret;
r->rate = rate;
-out:
- mutex_unlock(&rpm_clk_lock);
-
- return ret;
+ return 0;
}
static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index eefc322ce367..c7675930fde1 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/rpmh.h>
#include <soc/qcom/tcs.h>
@@ -206,7 +207,7 @@ static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
c->state = c->valid_state_mask;
WARN(1, "clk: %s failed to %s\n", c->res_name,
- enable ? "enable" : "disable");
+ str_enable_disable(enable));
return ret;
}
@@ -329,7 +330,7 @@ static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
{
struct clk_rpmh *c = to_clk_rpmh(hw);
- return c->aggr_state * c->unit;
+ return (unsigned long)c->aggr_state * c->unit;
}
static const struct clk_ops clk_rpmh_bcm_ops = {
@@ -368,6 +369,8 @@ DEFINE_CLK_RPMH_VRM(rf_clk2, _d, "rfclkd2", 1);
DEFINE_CLK_RPMH_VRM(rf_clk3, _d, "rfclkd3", 1);
DEFINE_CLK_RPMH_VRM(rf_clk4, _d, "rfclkd4", 1);
+DEFINE_CLK_RPMH_VRM(rf_clk3, _a2, "rfclka3", 2);
+
DEFINE_CLK_RPMH_VRM(clk1, _a1, "clka1", 1);
DEFINE_CLK_RPMH_VRM(clk2, _a1, "clka2", 1);
DEFINE_CLK_RPMH_VRM(clk3, _a1, "clka3", 1);
@@ -807,6 +810,45 @@ static const struct clk_rpmh_desc clk_rpmh_x1e80100 = {
.num_clks = ARRAY_SIZE(x1e80100_rpmh_clocks),
};
+static struct clk_hw *qcs615_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_qcs615 = {
+ .clks = qcs615_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(qcs615_rpmh_clocks),
+};
+
+static struct clk_hw *sm8750_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK1] = &clk_rpmh_clk6_a2.hw,
+ [RPMH_LN_BB_CLK1_A] = &clk_rpmh_clk6_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_clk8_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_clk8_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a2.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a2_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
+ .clks = sm8750_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8750_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -890,10 +932,12 @@ static int clk_rpmh_probe(struct platform_device *pdev)
}
static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,qcs615-rpmh-clk", .data = &clk_rpmh_qcs615},
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
{ .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
{ .compatible = "qcom,sar2130p-rpmh-clk", .data = &clk_rpmh_sar2130p},
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
+ { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
@@ -909,7 +953,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450},
{ .compatible = "qcom,sm8550-rpmh-clk", .data = &clk_rpmh_sm8550},
{ .compatible = "qcom,sm8650-rpmh-clk", .data = &clk_rpmh_sm8650},
- { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
+ { .compatible = "qcom,sm8750-rpmh-clk", .data = &clk_rpmh_sm8750},
{ .compatible = "qcom,x1e80100-rpmh-clk", .data = &clk_rpmh_x1e80100},
{ }
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 45c5255bcd11..29ef08a9d50b 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -4,6 +4,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -309,10 +310,10 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
unsigned long active_rate, sleep_rate;
int ret;
- mutex_lock(&rpm_smd_clk_lock);
+ guard(mutex)(&rpm_smd_clk_lock);
if (!r->rate)
- goto out;
+ return;
/* Take peer clock's rate into account only if it's enabled. */
if (peer->enabled)
@@ -322,17 +323,14 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
active_rate = r->branch ? !!peer_rate : peer_rate;
ret = clk_smd_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return;
sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return;
r->enabled = false;
-
-out:
- mutex_unlock(&rpm_smd_clk_lock);
}
static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -345,10 +343,10 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long peer_rate = 0, peer_sleep_rate = 0;
int ret = 0;
- mutex_lock(&rpm_smd_clk_lock);
+ guard(mutex)(&rpm_smd_clk_lock);
if (!r->enabled)
- goto out;
+ return 0;
to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
@@ -360,19 +358,16 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
active_rate = max(this_rate, peer_rate);
ret = clk_smd_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return ret;
sleep_rate = max(this_sleep_rate, peer_sleep_rate);
ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return ret;
r->rate = rate;
-out:
- mutex_unlock(&rpm_smd_clk_lock);
-
- return ret;
+ return 0;
}
static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -700,6 +695,60 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8936 = {
.num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
};
+static struct clk_smd_rpm *msm8937_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8937 = {
+ .clks = msm8937_clks,
+ .num_clks = ARRAY_SIZE(msm8937_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
+static struct clk_smd_rpm *msm8940_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8940 = {
+ .clks = msm8940_clks,
+ .num_clks = ARRAY_SIZE(msm8940_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
static struct clk_smd_rpm *msm8974_clks[] = {
[RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
[RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
@@ -1216,6 +1265,8 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8917", .data = &rpm_clk_msm8917 },
{ .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 },
+ { .compatible = "qcom,rpmcc-msm8937", .data = &rpm_clk_msm8937 },
+ { .compatible = "qcom,rpmcc-msm8940", .data = &rpm_clk_msm8940 },
{ .compatible = "qcom,rpmcc-msm8953", .data = &rpm_clk_msm8953 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 },
diff --git a/drivers/clk/qcom/clk-spmi-pmic-div.c b/drivers/clk/qcom/clk-spmi-pmic-div.c
index f394031eb0e5..41a0a4f3b4fb 100644
--- a/drivers/clk/qcom/clk-spmi-pmic-div.c
+++ b/drivers/clk/qcom/clk-spmi-pmic-div.c
@@ -3,6 +3,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -140,30 +141,26 @@ static int clk_spmi_pmic_div_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clkdiv *clkdiv = to_clkdiv(hw);
unsigned int div_factor = div_to_div_factor(parent_rate / rate);
- unsigned long flags;
bool enabled;
int ret;
- spin_lock_irqsave(&clkdiv->lock, flags);
+ guard(spinlock_irqsave)(&clkdiv->lock);
+
enabled = is_spmi_pmic_clkdiv_enabled(clkdiv);
if (enabled) {
ret = spmi_pmic_clkdiv_set_enable_state(clkdiv, false);
if (ret)
- goto unlock;
+ return ret;
}
ret = regmap_update_bits(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1,
DIV_CTL1_DIV_FACTOR_MASK, div_factor);
if (ret)
- goto unlock;
+ return ret;
if (enabled)
ret = __spmi_pmic_clkdiv_set_enable_state(clkdiv, true,
div_factor);
-
-unlock:
- spin_unlock_irqrestore(&clkdiv->lock, flags);
-
return ret;
}
diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
index 449ffea2295d..d7bb1399e102 100644
--- a/drivers/clk/qcom/dispcc-qcm2290.c
+++ b/drivers/clk/qcom/dispcc-qcm2290.c
@@ -40,8 +40,6 @@ static const struct pll_vco spark_vco[] = {
/* 768MHz configuration */
static const struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x28,
- .alpha = 0x0,
- .alpha_en_mask = BIT(24),
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.main_output_mask = BIT(0),
diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
index 939887f82ecc..2b236d52b29f 100644
--- a/drivers/clk/qcom/dispcc-sm6115.c
+++ b/drivers/clk/qcom/dispcc-sm6115.c
@@ -48,8 +48,6 @@ static const struct pll_vco spark_vco[] = {
/* 768MHz configuration */
static const struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x28,
- .alpha = 0x0,
- .alpha_en_mask = BIT(24),
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.main_output_mask = BIT(0),
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index 50facb36701a..2bc6b5f99f57 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -187,13 +187,12 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
.cmd_rcgr = 0x1144,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_dp_aux_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c
new file mode 100644
index 000000000000..0358dff91da5
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm8750.c
@@ -0,0 +1,1963 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/clock/qcom,sm8750-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DISP_CC_PLL2_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco pongo_elu_vco[] = {
+ { 38400000, 38400000, 0 },
+};
+
+static const struct pll_vco taycan_elu_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+static struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config disp_cc_pll2_config = {
+ .l = 0x493,
+ .alpha = 0x0,
+ .config_ctl_val = 0x60000f68,
+ .config_ctl_hi_val = 0x0001c808,
+ .config_ctl_hi1_val = 0x00000000,
+ .config_ctl_hi2_val = 0x040082f4,
+ .test_ctl_val = 0x00008000,
+ .test_ctl_hi_val = 0x0080c496,
+ .test_ctl_hi1_val = 0x40100180,
+ .test_ctl_hi2_val = 0x441001bc,
+ .test_ctl_hi3_val = 0x002003d8,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00e50302,
+};
+
+static struct clk_alpha_pll disp_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = pongo_elu_vco,
+ .num_vco = ARRAY_SIZE(pongo_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_PONGO_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_SLEEP_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_pongo_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DISP_CC_PLL2_OUT_MAIN, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .hw = &disp_cc_pll2.clkr.hw },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL2_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_11[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_11[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_esync0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_esync0_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_esync1_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8360,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8298,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x827c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x8264,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82fc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x82b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x82cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8348,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x832c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x8314,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(156000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(207000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(337000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(417000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(532000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(575000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ /*
+ * TODO: Downstream does not manage the clock directly, but
+ * places votes via new hardware block called "cesta".
+ * It is not clear whether such approach should be taken instead
+ * of manual control.
+ */
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x8120,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
+ .cmd_rcgr = 0x8138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_osc_clk_src[] = {
+ F(38400000, P_DISP_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_osc_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_10,
+ .freq_tbl = ftbl_disp_cc_osc_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk_src",
+ .parent_data = disp_cc_parent_data_10,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_11,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_11,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8198,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8200,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8294,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x82c8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8344,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_esync0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_esync1_clk = {
+ .halt_reg = 0x80bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_shift_clk = {
+ .halt_reg = 0xe060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x80a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
+ .halt_reg = 0x80ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk2_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_osc_clk = {
+ .halt_reg = 0x80b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_osc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mdss_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm8750_clocks[] = {
+ [DISP_CC_ESYNC0_CLK] = &disp_cc_esync0_clk.clkr,
+ [DISP_CC_ESYNC0_CLK_SRC] = &disp_cc_esync0_clk_src.clkr,
+ [DISP_CC_ESYNC1_CLK] = &disp_cc_esync1_clk.clkr,
+ [DISP_CC_ESYNC1_CLK_SRC] = &disp_cc_esync1_clk_src.clkr,
+ [DISP_CC_MDSS_ACCU_SHIFT_CLK] = &disp_cc_mdss_accu_shift_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK] = &disp_cc_mdss_pclk2_clk.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK_SRC] = &disp_cc_mdss_pclk2_clk_src.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_OSC_CLK] = &disp_cc_osc_clk.clkr,
+ [DISP_CC_OSC_CLK_SRC] = &disp_cc_osc_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_PLL2] = &disp_cc_pll2.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sm8750_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_sm8750_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11014,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_sm8750_desc = {
+ .config = &disp_cc_sm8750_regmap_config,
+ .clks = disp_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm8750_clocks),
+ .resets = disp_cc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm8750_resets),
+ .gdscs = disp_cc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm8750_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm8750_match_table);
+
+static int disp_cc_sm8750_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm8750_desc);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_put_rpm;
+ }
+
+ clk_taycan_elu_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_taycan_elu_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ clk_pongo_elu_pll_configure(&disp_cc_pll2, regmap, &disp_cc_pll2_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xe07c); /* DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xe05c); /* DISP_CC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc00c); /* DISP_CC_MDSS_RSCC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc008); /* DISP_CC_MDSS_RSCC_VSYNC_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8750_desc, regmap);
+ if (ret)
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm8750_driver = {
+ .probe = disp_cc_sm8750_probe,
+ .driver = {
+ .name = "disp_cc-sm8750",
+ .of_match_table = disp_cc_sm8750_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_sm8750_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq5424.c b/drivers/clk/qcom/gcc-ipq5424.c
index 88a7d5b2e751..d5b218b76e29 100644
--- a/drivers/clk/qcom/gcc-ipq5424.c
+++ b/drivers/clk/qcom/gcc-ipq5424.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/interconnect-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -12,6 +13,7 @@
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,ipq5424-gcc.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
#include <dt-bindings/reset/qcom,ipq5424-gcc.h>
#include "clk-alpha-pll.h"
@@ -325,6 +327,24 @@ static struct clk_rcg2 gcc_xo_clk_src = {
},
};
+static struct clk_branch gcc_xo_clk = {
+ .halt_reg = 0x34018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x34018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_fixed_factor gcc_xo_div4_clk_src = {
.mult = 1,
.div = 4,
@@ -1097,24 +1117,6 @@ static struct clk_branch gcc_adss_pwm_clk = {
},
};
-static struct clk_branch gcc_apss_dbg_clk = {
- .halt_reg = 0x2402c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x2402c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_apss_dbg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_cnoc_pcie0_1lane_s_clk = {
.halt_reg = 0x31088,
.halt_check = BRANCH_HALT,
@@ -2785,7 +2787,6 @@ static struct clk_branch gcc_pcie3_rchng_clk = {
static struct clk_regmap *gcc_ipq5424_clocks[] = {
[GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
[GCC_ADSS_PWM_CLK_SRC] = &gcc_adss_pwm_clk_src.clkr,
- [GCC_APSS_DBG_CLK] = &gcc_apss_dbg_clk.clkr,
[GCC_CNOC_PCIE0_1LANE_S_CLK] = &gcc_cnoc_pcie0_1lane_s_clk.clkr,
[GCC_CNOC_PCIE1_1LANE_S_CLK] = &gcc_cnoc_pcie1_1lane_s_clk.clkr,
[GCC_CNOC_PCIE2_2LANE_S_CLK] = &gcc_cnoc_pcie2_2lane_s_clk.clkr,
@@ -2920,6 +2921,7 @@ static struct clk_regmap *gcc_ipq5424_clocks[] = {
[GCC_QPIC_CLK_SRC] = &gcc_qpic_clk_src.clkr,
[GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
[GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
+ [GCC_XO_CLK] = &gcc_xo_clk.clkr,
[GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
[GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
[GPLL0] = &gpll0.clkr,
@@ -3230,6 +3232,20 @@ static const struct qcom_reset_map gcc_ipq5424_resets[] = {
[GCC_QUSB2_1_PHY_BCR] = { 0x3C030, 0 },
};
+#define IPQ_APPS_ID 5424 /* some unique value */
+
+static const struct qcom_icc_hws_data icc_ipq5424_hws[] = {
+ { MASTER_ANOC_PCIE0, SLAVE_ANOC_PCIE0, GCC_ANOC_PCIE0_1LANE_M_CLK },
+ { MASTER_CNOC_PCIE0, SLAVE_CNOC_PCIE0, GCC_CNOC_PCIE0_1LANE_S_CLK },
+ { MASTER_ANOC_PCIE1, SLAVE_ANOC_PCIE1, GCC_ANOC_PCIE1_1LANE_M_CLK },
+ { MASTER_CNOC_PCIE1, SLAVE_CNOC_PCIE1, GCC_CNOC_PCIE1_1LANE_S_CLK },
+ { MASTER_ANOC_PCIE2, SLAVE_ANOC_PCIE2, GCC_ANOC_PCIE2_2LANE_M_CLK },
+ { MASTER_CNOC_PCIE2, SLAVE_CNOC_PCIE2, GCC_CNOC_PCIE2_2LANE_S_CLK },
+ { MASTER_ANOC_PCIE3, SLAVE_ANOC_PCIE3, GCC_ANOC_PCIE3_2LANE_M_CLK },
+ { MASTER_CNOC_PCIE3, SLAVE_CNOC_PCIE3, GCC_CNOC_PCIE3_2LANE_S_CLK },
+ { MASTER_CNOC_USB, SLAVE_CNOC_USB, GCC_CNOC_USB_CLK },
+};
+
static const struct of_device_id gcc_ipq5424_match_table[] = {
{ .compatible = "qcom,ipq5424-gcc" },
{ }
@@ -3260,6 +3276,8 @@ static const struct qcom_cc_desc gcc_ipq5424_desc = {
.num_resets = ARRAY_SIZE(gcc_ipq5424_resets),
.clk_hws = gcc_ipq5424_hws,
.num_clk_hws = ARRAY_SIZE(gcc_ipq5424_hws),
+ .icc_hws = icc_ipq5424_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_hws),
};
static int gcc_ipq5424_probe(struct platform_device *pdev)
@@ -3272,6 +3290,7 @@ static struct platform_driver gcc_ipq5424_driver = {
.driver = {
.name = "qcom,gcc-ipq5424",
.of_match_table = gcc_ipq5424_match_table,
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index ab0f7fc665a9..d861191b0c85 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -4194,10 +4194,9 @@ static const struct alpha_pll_config ubi32_pll_config = {
.test_ctl_hi_val = 0x4000,
};
+/* 1200 MHz configuration */
static const struct alpha_pll_config nss_crypto_pll_config = {
.l = 0x32,
- .alpha = 0x0,
- .alpha_hi = 0x0,
.config_ctl_val = 0x4001055b,
.main_output_mask = BIT(0),
.pre_div_val = 0x0,
@@ -4206,7 +4205,6 @@ static const struct alpha_pll_config nss_crypto_pll_config = {
.post_div_mask = GENMASK(11, 8),
.vco_mask = GENMASK(21, 20),
.vco_val = 0x0,
- .alpha_en_mask = BIT(24),
};
static struct clk_hw *gcc_ipq6018_hws[] = {
diff --git a/drivers/clk/qcom/gcc-mdm9607.c b/drivers/clk/qcom/gcc-mdm9607.c
index 6e6068b168e6..07f1b78d737a 100644
--- a/drivers/clk/qcom/gcc-mdm9607.c
+++ b/drivers/clk/qcom/gcc-mdm9607.c
@@ -535,7 +535,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
};
static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
- .cmd_rcgr = 0x6044,
+ .cmd_rcgr = 0x7044,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
diff --git a/drivers/clk/qcom/gcc-qcs615.c b/drivers/clk/qcom/gcc-qcs615.c
new file mode 100644
index 000000000000..9695446bc2a3
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qcs615.c
@@ -0,0 +1,3034 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_AUX2_DIV,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL3_OUT_MAIN_DIV,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL8_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* Fixed divider clock of GPLL0 instead of PLL normal postdiv */
+static struct clk_fixed_factor gpll0_out_aux2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_out_aux2_div",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* Fixed divider clock of GPLL3 instead of PLL normal postdiv */
+static struct clk_fixed_factor gpll3_out_aux2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll3_out_aux2_div",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x13000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_main = {
+ .offset = 0x13000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll6_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x1a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x1b000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_main = {
+ .offset = 0x1b000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll8_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN_DIV, 4 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3_out_aux2_div.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x6038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_rgmii_clk_src[] = {
+ F(2500000, P_BI_TCXO, 1, 25, 192),
+ F(5000000, P_BI_TCXO, 1, 25, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_rgmii_clk_src = {
+ .cmd_rcgr = 0x601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_rgmii_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = {
+ F(60000000, P_GPLL0_OUT_AUX2_DIV, 5, 0, 0),
+ F(133250000, P_GPLL3_OUT_MAIN_DIV, 4, 0, 0),
+ F(266500000, P_GPLL3_OUT_MAIN_DIV, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_qspi_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_AUX2_DIV, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_AUX2_DIV, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_AUX2_DIV, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_AUX2_DIV, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_AUX2_DIV, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_AUX2_DIV, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_AUX2_DIV, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_AUX2_DIV, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GPLL0_OUT_AUX2_DIV, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_AUX2_DIV, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_AUX2_DIV, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_AUX2_DIV, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x173a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x174d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x17608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_AUX2_DIV, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2_DIV, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x12028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x12010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2_DIV, 3, 0, 0),
+ F(202000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x7707c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_sec_master_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_sec_master_clk_src = {
+ .cmd_rcgr = 0xa601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb20_sec_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xa6034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb2_sec_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb2_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa6060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_AUX2_DIV, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_AUX2_DIV, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_AUX2_DIV, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_vsensor_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_sec_axi_clk = {
+ .halt_reg = 0xa6084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa6084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_east_clk = {
+ .halt_reg = 0x6a008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy_east_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_west_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy_west_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_sec_axi_clk = {
+ .halt_reg = 0xa609c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa609c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_out_aux2_div.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_axi_clk = {
+ .halt_reg = 0x6010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_ptp_clk = {
+ .halt_reg = 0x6034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_rgmii_clk = {
+ .halt_reg = 0x6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_slv_ahb_clk = {
+ .halt_reg = 0x6014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_out_aux2_div.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qspi_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x173a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x174d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x17604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_clk = {
+ .halt_reg = 0x8c030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_rx1_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx3_usb2_clkref_clk = {
+ .halt_reg = 0x8c038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_rx3_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x12008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x1200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x4819c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77044,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x77078,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77040,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_master_clk = {
+ .halt_reg = 0xa6010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_mock_utmi_clk = {
+ .halt_reg = 0xa6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_sleep_clk = {
+ .halt_reg = 0xa6014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_prim_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_clkref_clk = {
+ .halt_reg = 0x8c018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_aux_clk = {
+ .halt_reg = 0xa6050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb2_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_com_aux_clk = {
+ .halt_reg = 0xa6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb2_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_pipe_clk = {
+ .halt_reg = 0xa6058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xa6058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gcc_qcs615_hws[] = {
+ [GPLL0_OUT_AUX2_DIV] = &gpll0_out_aux2_div.hw,
+ [GPLL3_OUT_AUX2_DIV] = &gpll3_out_aux2_div.hw,
+};
+
+static struct gdsc emac_gdsc = {
+ .gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "emac_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb20_sec_gdsc = {
+ .gdscr = 0xa6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb20_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d04c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_qcs615_clocks[] = {
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_SEC_AXI_CLK] = &gcc_aggre_usb2_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AHB2PHY_EAST_CLK] = &gcc_ahb2phy_east_clk.clkr,
+ [GCC_AHB2PHY_WEST_CLK] = &gcc_ahb2phy_west_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB2_SEC_AXI_CLK] = &gcc_cfg_noc_usb2_sec_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EMAC_AXI_CLK] = &gcc_emac_axi_clk.clkr,
+ [GCC_EMAC_PTP_CLK] = &gcc_emac_ptp_clk.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_EMAC_RGMII_CLK] = &gcc_emac_rgmii_clk.clkr,
+ [GCC_EMAC_RGMII_CLK_SRC] = &gcc_emac_rgmii_clk_src.clkr,
+ [GCC_EMAC_SLV_AHB_CLK] = &gcc_emac_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+ [GCC_RX3_USB2_CLKREF_CLK] = &gcc_rx3_usb2_clkref_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_SEC_MASTER_CLK] = &gcc_usb20_sec_master_clk.clkr,
+ [GCC_USB20_SEC_MASTER_CLK_SRC] = &gcc_usb20_sec_master_clk_src.clkr,
+ [GCC_USB20_SEC_MOCK_UTMI_CLK] = &gcc_usb20_sec_mock_utmi_clk.clkr,
+ [GCC_USB20_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb20_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB20_SEC_SLEEP_CLK] = &gcc_usb20_sec_sleep_clk.clkr,
+ [GCC_USB2_PRIM_CLKREF_CLK] = &gcc_usb2_prim_clkref_clk.clkr,
+ [GCC_USB2_SEC_CLKREF_CLK] = &gcc_usb2_sec_clkref_clk.clkr,
+ [GCC_USB2_SEC_PHY_AUX_CLK] = &gcc_usb2_sec_phy_aux_clk.clkr,
+ [GCC_USB2_SEC_PHY_AUX_CLK_SRC] = &gcc_usb2_sec_phy_aux_clk_src.clkr,
+ [GCC_USB2_SEC_PHY_COM_AUX_CLK] = &gcc_usb2_sec_phy_com_aux_clk.clkr,
+ [GCC_USB2_SEC_PHY_PIPE_CLK] = &gcc_usb2_sec_phy_pipe_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_MAIN] = &gpll6_out_main.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr,
+};
+
+static struct gdsc *gcc_qcs615_gdscs[] = {
+ [EMAC_GDSC] = &emac_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB20_SEC_GDSC] = &usb20_sec_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] = &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] = &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] = &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] = &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_qcs615_resets[] = {
+ [GCC_EMAC_BCR] = { 0x6000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0xd000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0xd004 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB2_PHY_SEC_BCR] = { 0x50018 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB20_SEC_BCR] = { 0xa6000 },
+ [GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x50000 },
+ [GCC_SDCC1_BCR] = { 0x12000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static const struct regmap_config gcc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa609c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qcs615_desc = {
+ .config = &gcc_qcs615_regmap_config,
+ .clk_hws = gcc_qcs615_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_qcs615_hws),
+ .clks = gcc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qcs615_clocks),
+ .resets = gcc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(gcc_qcs615_resets),
+ .gdscs = gcc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_qcs615_gdscs),
+};
+
+static const struct of_device_id gcc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qcs615_match_table);
+
+static int gcc_qcs615_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_qcs615_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ /*
+ * Disable the GPLL0 active input to MM blocks and GPU
+ * via MISC registers.
+ */
+ regmap_update_bits(regmap, 0x0b084, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x9b000, BIT(0), BIT(0));
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xb008); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb044); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb00c); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb048); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb040); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x480040); /* GCC_CPUSS_GNOC_CLK */
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_qcs615_desc, regmap);
+}
+
+static struct platform_driver gcc_qcs615_driver = {
+ .probe = gcc_qcs615_probe,
+ .driver = {
+ .name = "gcc-qcs615",
+ .of_match_table = gcc_qcs615_match_table,
+ },
+};
+
+static int __init gcc_qcs615_init(void)
+{
+ return platform_driver_register(&gcc_qcs615_driver);
+}
+subsys_initcall(gcc_qcs615_init);
+
+static void __exit gcc_qcs615_exit(void)
+{
+ platform_driver_unregister(&gcc_qcs615_driver);
+}
+module_exit(gcc_qcs615_exit);
+
+MODULE_DESCRIPTION("QTI GCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index dc3aa7014c3e..6d0f9cede5cf 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -284,11 +284,6 @@ static struct clk_rcg2 gcc_sdm670_cpuss_rbcpr_clk_src = {
};
static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
- F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
};
@@ -302,7 +297,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
.name = "gcc_gp1_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -316,7 +311,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
.name = "gcc_gp2_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -330,7 +325,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
.name = "gcc_gp3_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -454,7 +449,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.name = "gcc_qupv3_wrap0_s0_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
@@ -470,7 +465,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.name = "gcc_qupv3_wrap0_s1_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -486,7 +481,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.name = "gcc_qupv3_wrap0_s2_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -502,7 +497,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.name = "gcc_qupv3_wrap0_s3_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -518,7 +513,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.name = "gcc_qupv3_wrap0_s4_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -534,7 +529,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.name = "gcc_qupv3_wrap0_s5_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -550,7 +545,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.name = "gcc_qupv3_wrap0_s6_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -566,7 +561,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.name = "gcc_qupv3_wrap0_s7_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -582,7 +577,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.name = "gcc_qupv3_wrap1_s0_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -598,7 +593,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.name = "gcc_qupv3_wrap1_s1_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -614,7 +609,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.name = "gcc_qupv3_wrap1_s2_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -630,7 +625,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.name = "gcc_qupv3_wrap1_s3_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -646,7 +641,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.name = "gcc_qupv3_wrap1_s4_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -662,7 +657,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.name = "gcc_qupv3_wrap1_s5_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -678,7 +673,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.name = "gcc_qupv3_wrap1_s6_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -694,7 +689,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.name = "gcc_qupv3_wrap1_s7_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index a811fad2aa27..74346dc02606 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -182,6 +182,14 @@ static const struct clk_parent_data gcc_parent_data_2_ao[] = {
{ .hw = &gpll0_out_odd.clkr.hw },
};
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
static const struct parent_map gcc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@@ -701,13 +709,12 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
.cmd_rcgr = 0x3a0b0,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -764,13 +771,12 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
.cmd_rcgr = 0x1a034,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
index 5abaeddd6afc..862a9bf73bcb 100644
--- a/drivers/clk/qcom/gcc-sm8550.c
+++ b/drivers/clk/qcom/gcc-sm8550.c
@@ -3003,7 +3003,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3014,7 +3014,7 @@ static struct gdsc pcie_0_phy_gdsc = {
.pd = {
.name = "pcie_0_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3025,7 +3025,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3036,7 +3036,7 @@ static struct gdsc pcie_1_phy_gdsc = {
.pd = {
.name = "pcie_1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index fd9d6544bdd5..9dd5c48f33be 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3437,7 +3437,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3448,7 +3448,7 @@ static struct gdsc pcie_0_phy_gdsc = {
.pd = {
.name = "pcie_0_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3459,7 +3459,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3470,7 +3470,7 @@ static struct gdsc pcie_1_phy_gdsc = {
.pd = {
.name = "pcie_1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
new file mode 100644
index 000000000000..b36d70976095
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -0,0 +1,3274 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_elu_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b080,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770ec,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x39070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b084,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
+ .cmd_rcgr = 0x17008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
+ .cmd_rcgr = 0x17024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
+ .cmd_rcgr = 0x17040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
+ .cmd_rcgr = 0x1705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
+ .cmd_rcgr = 0x17078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
+ .cmd_rcgr = 0x17094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
+ .cmd_rcgr = 0x170b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
+ .cmd_rcgr = 0x170cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
+ .cmd_rcgr = 0x170e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
+ .cmd_rcgr = 0x17104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+/* Check this frequency table.*/
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x188c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_ref_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s3_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x182a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x183dc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18518,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18654,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18790,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src[] = {
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_ibi_ctrl_0_clk_src = {
+ .cmd_rcgr = 0x1e9f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_0_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e28c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e504,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e640,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_s6_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(128000000, P_GCC_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x1e77c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0x1e8b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1401c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7708c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x770a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x39030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x39048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x39074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0x1828c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x39060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_axi_clk = {
+ .halt_reg = 0x10068,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x39090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3908c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_sf_axi_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71150,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x71150,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_qtb_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1007c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_qtb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0_clk = {
+ .halt_reg = 0x9f008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0c_clk = {
+ .halt_reg = 0x9f018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9f018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gemnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gemnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x6b064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_cmd_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_cmd_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_core_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s0_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s1_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s2_clk = {
+ .halt_reg = 0x1703c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s3_clk = {
+ .halt_reg = 0x17058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s4_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s5_clk = {
+ .halt_reg = 0x17090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s6_clk = {
+ .halt_reg = 0x170ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s7_clk = {
+ .halt_reg = 0x170c8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s8_clk = {
+ .halt_reg = 0x170e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s8_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s9_clk = {
+ .halt_reg = 0x17100,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s9_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s_ahb_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x2315c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_ref_clk = {
+ .halt_reg = 0x188bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x18290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x183cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18508,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18644,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18780,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x232b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x232a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_2_clk = {
+ .halt_reg = 0x1e9ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9ec,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_3_clk = {
+ .halt_reg = 0x1e9f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e27c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e3b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e630,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x1e76c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0x1e8a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23140,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x23144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23144,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_2_ahb_clk = {
+ .halt_reg = 0x1e9e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_3_ahb_clk = {
+ .halt_reg = 0x1e9e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9e8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_3_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x23298,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23298,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x2329c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2329c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x7707c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7707c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7707c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770bc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770d8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x3902c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3902c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x39064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x39068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x3906c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x3906c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3906c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(0),
+ .pd = {
+ .name = "gcc_pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_0_phy_gdsc = {
+ .gdscr = 0x6c000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(2),
+ .pd = {
+ .name = "gcc_pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_ufs_mem_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x39004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_phy_gdsc = {
+ .gdscr = 0x50018,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_sm8750_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_AXI_CLK] = &gcc_aggre_noc_pcie_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CNOC_PCIE_SF_AXI_CLK] = &gcc_cnoc_pcie_sf_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_QTB_CLK] = &gcc_ddrss_pcie_sf_qtb_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EVA_AXI0_CLK] = &gcc_eva_axi0_clk.clkr,
+ [GCC_EVA_AXI0C_CLK] = &gcc_eva_axi0c_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GEMNOC_GFX_CLK] = &gcc_gpu_gemnoc_gfx_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_CMD_AHB_CLK] = &gcc_qmip_camera_cmd_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_I2C_CORE_CLK] = &gcc_qupv3_i2c_core_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK] = &gcc_qupv3_i2c_s0_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK_SRC] = &gcc_qupv3_i2c_s0_clk_src.clkr,
+ [GCC_QUPV3_I2C_S1_CLK] = &gcc_qupv3_i2c_s1_clk.clkr,
+ [GCC_QUPV3_I2C_S1_CLK_SRC] = &gcc_qupv3_i2c_s1_clk_src.clkr,
+ [GCC_QUPV3_I2C_S2_CLK] = &gcc_qupv3_i2c_s2_clk.clkr,
+ [GCC_QUPV3_I2C_S2_CLK_SRC] = &gcc_qupv3_i2c_s2_clk_src.clkr,
+ [GCC_QUPV3_I2C_S3_CLK] = &gcc_qupv3_i2c_s3_clk.clkr,
+ [GCC_QUPV3_I2C_S3_CLK_SRC] = &gcc_qupv3_i2c_s3_clk_src.clkr,
+ [GCC_QUPV3_I2C_S4_CLK] = &gcc_qupv3_i2c_s4_clk.clkr,
+ [GCC_QUPV3_I2C_S4_CLK_SRC] = &gcc_qupv3_i2c_s4_clk_src.clkr,
+ [GCC_QUPV3_I2C_S5_CLK] = &gcc_qupv3_i2c_s5_clk.clkr,
+ [GCC_QUPV3_I2C_S5_CLK_SRC] = &gcc_qupv3_i2c_s5_clk_src.clkr,
+ [GCC_QUPV3_I2C_S6_CLK] = &gcc_qupv3_i2c_s6_clk.clkr,
+ [GCC_QUPV3_I2C_S6_CLK_SRC] = &gcc_qupv3_i2c_s6_clk_src.clkr,
+ [GCC_QUPV3_I2C_S7_CLK] = &gcc_qupv3_i2c_s7_clk.clkr,
+ [GCC_QUPV3_I2C_S7_CLK_SRC] = &gcc_qupv3_i2c_s7_clk_src.clkr,
+ [GCC_QUPV3_I2C_S8_CLK] = &gcc_qupv3_i2c_s8_clk.clkr,
+ [GCC_QUPV3_I2C_S8_CLK_SRC] = &gcc_qupv3_i2c_s8_clk_src.clkr,
+ [GCC_QUPV3_I2C_S9_CLK] = &gcc_qupv3_i2c_s9_clk.clkr,
+ [GCC_QUPV3_I2C_S9_CLK_SRC] = &gcc_qupv3_i2c_s9_clk_src.clkr,
+ [GCC_QUPV3_I2C_S_AHB_CLK] = &gcc_qupv3_i2c_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK] = &gcc_qupv3_wrap1_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC] = &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_2_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_2_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_3_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_sm8750_gdscs[] = {
+ [GCC_PCIE_0_GDSC] = &gcc_pcie_0_gdsc,
+ [GCC_PCIE_0_PHY_GDSC] = &gcc_pcie_0_phy_gdsc,
+ [GCC_UFS_MEM_PHY_GDSC] = &gcc_ufs_mem_phy_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB3_PHY_GDSC] = &gcc_usb3_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sm8750_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_EVA_BCR] = { 0x9f000 },
+ [GCC_EVA_AXI0_CLK_ARES] = { 0x9f008, 2 },
+ [GCC_EVA_AXI0C_CLK_ARES] = { 0x9f018, 2 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0x11000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUPV3_WRAPPER_I2C_BCR] = { 0x17000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0x39000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32028, 2 },
+};
+
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static const struct regmap_config gcc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8750_desc = {
+ .config = &gcc_sm8750_regmap_config,
+ .clks = gcc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8750_clocks),
+ .resets = gcc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8750_resets),
+ .gdscs = gcc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8750_gdscs),
+};
+
+static const struct of_device_id gcc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8750_match_table);
+
+static int gcc_sm8750_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8750_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /*
+ * Keep clocks always enabled:
+ * gcc_cam_bist_mclk_ahb_clk
+ * gcc_camera_ahb_clk
+ * gcc_camera_xo_clk
+ * gcc_disp_ahb_clk
+ * gcc_eva_ahb_clk
+ * gcc_eva_xo_clk
+ * gcc_gpu_cfg_ahb_clk
+ * gcc_video_ahb_clk
+ * gcc_video_xo_clk
+ * gcc_pcie_rscc_cfg_ahb_clk
+ * gcc_pcie_rscc_xo_clk
+ */
+ qcom_branch_set_clk_en(regmap, 0xa0004);
+ qcom_branch_set_clk_en(regmap, 0x26004);
+ qcom_branch_set_clk_en(regmap, 0x26034);
+ qcom_branch_set_clk_en(regmap, 0x27004);
+ qcom_branch_set_clk_en(regmap, 0x9f004);
+ qcom_branch_set_clk_en(regmap, 0x9f01c);
+ qcom_branch_set_clk_en(regmap, 0x71004);
+ qcom_branch_set_clk_en(regmap, 0x32004);
+ qcom_branch_set_clk_en(regmap, 0x32038);
+ regmap_update_bits(regmap, 0x52010, BIT(20), BIT(20));
+ regmap_update_bits(regmap, 0x52010, BIT(21), BIT(21));
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm8750_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8750_driver = {
+ .probe = gcc_sm8750_probe,
+ .driver = {
+ .name = "gcc-sm8750",
+ .of_match_table = gcc_sm8750_match_table,
+ },
+};
+
+static int __init gcc_sm8750_init(void)
+{
+ return platform_driver_register(&gcc_sm8750_driver);
+}
+subsys_initcall(gcc_sm8750_init);
+
+static void __exit gcc_sm8750_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8750_driver);
+}
+module_exit(gcc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 8ea25aa25dff..7288af845434 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -6083,7 +6083,7 @@ static struct gdsc gcc_usb20_prim_gdsc = {
.pd = {
.name = "gcc_usb20_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/gpucc-x1p42100.c b/drivers/clk/qcom/gpucc-x1p42100.c
new file mode 100644
index 000000000000..dba783339613
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-x1p42100.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,x1e80100-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 560.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x1d,
+ .alpha = 0x2aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+/* 440.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x16,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x9480,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9480,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x947c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x947c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x90b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x9288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x928c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x928c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_cc_gx_gdsc = {
+ .gdscr = 0x905c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gpu_cc_x1p42100_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static struct gdsc *gpu_cc_x1p42100_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cc_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_cc_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_x1p42100_resets[] = {
+ [GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPU_CC_CX_BCR] = { 0x9104 },
+ [GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPU_CC_FF_BCR] = { 0x9470 },
+ [GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPU_CC_GX_BCR] = { 0x9058 },
+ [GPU_CC_XO_BCR] = { 0x9000 },
+};
+
+static const struct regmap_config gpu_cc_x1p42100_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9988,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc gpu_cc_x1p42100_desc = {
+ .config = &gpu_cc_x1p42100_regmap_config,
+ .clks = gpu_cc_x1p42100_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_x1p42100_clocks),
+ .resets = gpu_cc_x1p42100_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_x1p42100_resets),
+ .gdscs = gpu_cc_x1p42100_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_x1p42100_gdscs),
+};
+
+static const struct of_device_id gpu_cc_x1p42100_match_table[] = {
+ { .compatible = "qcom,x1p42100-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_x1p42100_match_table);
+
+static int gpu_cc_x1p42100_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_x1p42100_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x93a4); /* GPU_CC_CB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x9004); /* GPU_CC_CXO_AON_CLK */
+ qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &gpu_cc_x1p42100_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver gpu_cc_x1p42100_driver = {
+ .probe = gpu_cc_x1p42100_probe,
+ .driver = {
+ .name = "gpucc-x1p42100",
+ .of_match_table = gpu_cc_x1p42100_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_x1p42100_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC X1P42100 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
new file mode 100644
index 000000000000..432d4c4b7aa6
--- /dev/null
+++ b/drivers/clk/qcom/ipq-cmn-pll.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/*
+ * CMN PLL block expects the reference clock from on-board Wi-Fi block,
+ * and supplies fixed rate clocks as output to the networking hardware
+ * blocks and to GCC. The networking related blocks include PPE (packet
+ * process engine), the externally connected PHY or switch devices, and
+ * the PCS.
+ *
+ * On the IPQ9574 SoC, there are three clocks with 50 MHZ and one clock
+ * with 25 MHZ which are output from the CMN PLL to Ethernet PHY (or switch),
+ * and one clock with 353 MHZ to PPE. The other fixed rate output clocks
+ * are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
+ * with 31.25 MHZ.
+ *
+ * +---------+
+ * | GCC |
+ * +--+---+--+
+ * AHB CLK| |SYS CLK
+ * V V
+ * +-------+---+------+
+ * | +-------------> eth0-50mhz
+ * REF CLK | IPQ9574 |
+ * -------->+ +-------------> eth1-50mhz
+ * | CMN PLL block |
+ * | +-------------> eth2-50mhz
+ * | |
+ * +----+----+----+---+-------------> eth-25mhz
+ * | | |
+ * V V V
+ * GCC PCS NSS/PPE
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
+
+#define CMN_PLL_REFCLK_SRC_SELECTION 0x28
+#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8)
+
+#define CMN_PLL_LOCKED 0x64
+#define CMN_PLL_CLKS_LOCKED BIT(8)
+
+#define CMN_PLL_POWER_ON_AND_RESET 0x780
+#define CMN_ANA_EN_SW_RSTN BIT(6)
+
+#define CMN_PLL_REFCLK_CONFIG 0x784
+#define CMN_PLL_REFCLK_EXTERNAL BIT(9)
+#define CMN_PLL_REFCLK_DIV GENMASK(8, 4)
+#define CMN_PLL_REFCLK_INDEX GENMASK(3, 0)
+
+#define CMN_PLL_CTRL 0x78c
+#define CMN_PLL_CTRL_LOCK_DETECT_EN BIT(15)
+
+#define CMN_PLL_DIVIDER_CTRL 0x794
+#define CMN_PLL_DIVIDER_CTRL_FACTOR GENMASK(9, 0)
+
+/**
+ * struct cmn_pll_fixed_output_clk - CMN PLL output clocks information
+ * @id: Clock specifier to be supplied
+ * @name: Clock name to be registered
+ * @rate: Clock rate
+ */
+struct cmn_pll_fixed_output_clk {
+ unsigned int id;
+ const char *name;
+ unsigned long rate;
+};
+
+/**
+ * struct clk_cmn_pll - CMN PLL hardware specific data
+ * @regmap: hardware regmap.
+ * @hw: handle between common and hardware-specific interfaces
+ */
+struct clk_cmn_pll {
+ struct regmap *regmap;
+ struct clk_hw hw;
+};
+
+#define CLK_PLL_OUTPUT(_id, _name, _rate) { \
+ .id = _id, \
+ .name = _name, \
+ .rate = _rate, \
+}
+
+#define to_clk_cmn_pll(_hw) container_of(_hw, struct clk_cmn_pll, hw)
+
+static const struct regmap_config ipq_cmn_pll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7fc,
+ .fast_io = true,
+};
+
+static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
+ CLK_PLL_OUTPUT(XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
+ CLK_PLL_OUTPUT(NSS_1200MHZ_CLK, "nss-1200mhz", 1200000000UL),
+ CLK_PLL_OUTPUT(PPE_353MHZ_CLK, "ppe-353mhz", 353000000UL),
+ CLK_PLL_OUTPUT(ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
+};
+
+/*
+ * CMN PLL has the single parent clock, which supports the several
+ * possible parent clock rates, each parent clock rate is reflected
+ * by the specific reference index value in the hardware.
+ */
+static int ipq_cmn_pll_find_freq_index(unsigned long parent_rate)
+{
+ int index = -EINVAL;
+
+ switch (parent_rate) {
+ case 25000000:
+ index = 3;
+ break;
+ case 31250000:
+ index = 4;
+ break;
+ case 40000000:
+ index = 6;
+ break;
+ case 48000000:
+ case 96000000:
+ /*
+ * Parent clock rate 48 MHZ and 96 MHZ take the same value
+ * of reference clock index. 96 MHZ needs the source clock
+ * divider to be programmed as 2.
+ */
+ index = 7;
+ break;
+ case 50000000:
+ index = 8;
+ break;
+ default:
+ break;
+ }
+
+ return index;
+}
+
+static unsigned long clk_cmn_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
+ u32 val, factor;
+
+ /*
+ * The value of CMN_PLL_DIVIDER_CTRL_FACTOR is automatically adjusted
+ * by HW according to the parent clock rate.
+ */
+ regmap_read(cmn_pll->regmap, CMN_PLL_DIVIDER_CTRL, &val);
+ factor = FIELD_GET(CMN_PLL_DIVIDER_CTRL_FACTOR, val);
+
+ return parent_rate * 2 * factor;
+}
+
+static int clk_cmn_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ int ret;
+
+ /* Validate the rate of the single parent clock. */
+ ret = ipq_cmn_pll_find_freq_index(req->best_parent_rate);
+
+ return ret < 0 ? ret : 0;
+}
+
+/*
+ * This function is used to initialize the CMN PLL to enable the fixed
+ * rate output clocks. It is expected to be configured once.
+ */
+static int clk_cmn_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
+ int ret, index;
+ u32 val;
+
+ /*
+ * Configure the reference input clock selection as per the given
+ * parent clock. The output clock rates are always of fixed value.
+ */
+ index = ipq_cmn_pll_find_freq_index(parent_rate);
+ if (index < 0)
+ return index;
+
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
+ CMN_PLL_REFCLK_INDEX,
+ FIELD_PREP(CMN_PLL_REFCLK_INDEX, index));
+ if (ret)
+ return ret;
+
+ /*
+ * Update the source clock rate selection and source clock
+ * divider as 2 when the parent clock rate is 96 MHZ.
+ */
+ if (parent_rate == 96000000) {
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
+ CMN_PLL_REFCLK_DIV,
+ FIELD_PREP(CMN_PLL_REFCLK_DIV, 2));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_SRC_SELECTION,
+ CMN_PLL_REFCLK_SRC_DIV,
+ FIELD_PREP(CMN_PLL_REFCLK_SRC_DIV, 0));
+ if (ret)
+ return ret;
+ }
+
+ /* Enable PLL locked detect. */
+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_CTRL,
+ CMN_PLL_CTRL_LOCK_DETECT_EN);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset the CMN PLL block to ensure the updated configurations
+ * take effect.
+ */
+ ret = regmap_clear_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
+ CMN_ANA_EN_SW_RSTN);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 1200);
+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
+ CMN_ANA_EN_SW_RSTN);
+ if (ret)
+ return ret;
+
+ /* Stability check of CMN PLL output clocks. */
+ return regmap_read_poll_timeout(cmn_pll->regmap, CMN_PLL_LOCKED, val,
+ (val & CMN_PLL_CLKS_LOCKED),
+ 100, 100 * USEC_PER_MSEC);
+}
+
+static const struct clk_ops clk_cmn_pll_ops = {
+ .recalc_rate = clk_cmn_pll_recalc_rate,
+ .determine_rate = clk_cmn_pll_determine_rate,
+ .set_rate = clk_cmn_pll_set_rate,
+};
+
+static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
+{
+ struct clk_parent_data pdata = { .index = 0 };
+ struct device *dev = &pdev->dev;
+ struct clk_init_data init = {};
+ struct clk_cmn_pll *cmn_pll;
+ struct regmap *regmap;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &ipq_cmn_pll_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ cmn_pll = devm_kzalloc(dev, sizeof(*cmn_pll), GFP_KERNEL);
+ if (!cmn_pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "cmn_pll";
+ init.parent_data = &pdata;
+ init.num_parents = 1;
+ init.ops = &clk_cmn_pll_ops;
+
+ cmn_pll->hw.init = &init;
+ cmn_pll->regmap = regmap;
+
+ ret = devm_clk_hw_register(dev, &cmn_pll->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &cmn_pll->hw;
+}
+
+static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
+{
+ const struct cmn_pll_fixed_output_clk *fixed_clk;
+ struct clk_hw_onecell_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct clk_hw *cmn_pll_hw;
+ unsigned int num_clks;
+ struct clk_hw *hw;
+ int ret, i;
+
+ fixed_clk = ipq9574_output_clks;
+ num_clks = ARRAY_SIZE(ipq9574_output_clks);
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ /*
+ * Register the CMN PLL clock, which is the parent clock of
+ * the fixed rate output clocks.
+ */
+ cmn_pll_hw = ipq_cmn_pll_clk_hw_register(pdev);
+ if (IS_ERR(cmn_pll_hw))
+ return PTR_ERR(cmn_pll_hw);
+
+ /* Register the fixed rate output clocks. */
+ for (i = 0; i < num_clks; i++) {
+ hw = clk_hw_register_fixed_rate_parent_hw(dev, fixed_clk[i].name,
+ cmn_pll_hw, 0,
+ fixed_clk[i].rate);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto unregister_fixed_clk;
+ }
+
+ hw_data->hws[fixed_clk[i].id] = hw;
+ }
+
+ /*
+ * Provide the CMN PLL clock. The clock rate of CMN PLL
+ * is configured to 12 GHZ by DT property assigned-clock-rates-u64.
+ */
+ hw_data->hws[CMN_PLL_CLK] = cmn_pll_hw;
+ hw_data->num = num_clks + 1;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
+ if (ret)
+ goto unregister_fixed_clk;
+
+ platform_set_drvdata(pdev, hw_data);
+
+ return 0;
+
+unregister_fixed_clk:
+ while (i > 0)
+ clk_hw_unregister(hw_data->hws[fixed_clk[--i].id]);
+
+ return ret;
+}
+
+static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * To access the CMN PLL registers, the GCC AHB & SYS clocks
+ * of CMN PLL block need to be enabled.
+ */
+ ret = pm_clk_add(dev, "ahb");
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to add AHB clock\n");
+
+ ret = pm_clk_add(dev, "sys");
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to add SYS clock\n");
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ /* Register CMN PLL clock and fixed rate output clocks. */
+ ret = ipq_cmn_pll_register_clks(pdev);
+ pm_runtime_put(dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Fail to register CMN PLL clocks\n");
+
+ return 0;
+}
+
+static void ipq_cmn_pll_clk_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *hw_data = platform_get_drvdata(pdev);
+ int i;
+
+ /*
+ * The clock with index CMN_PLL_CLK is unregistered by
+ * device management.
+ */
+ for (i = 0; i < hw_data->num; i++) {
+ if (i != CMN_PLL_CLK)
+ clk_hw_unregister(hw_data->hws[i]);
+ }
+}
+
+static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
+ { .compatible = "qcom,ipq9574-cmn-pll", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
+
+static struct platform_driver ipq_cmn_pll_clk_driver = {
+ .probe = ipq_cmn_pll_clk_probe,
+ .remove = ipq_cmn_pll_clk_remove,
+ .driver = {
+ .name = "ipq_cmn_pll",
+ .of_match_table = ipq_cmn_pll_clk_ids,
+ .pm = &ipq_cmn_pll_pm_ops,
+ },
+};
+module_platform_driver(ipq_cmn_pll_clk_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ CMN PLL Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/lpasscc-sm6115.c b/drivers/clk/qcom/lpasscc-sm6115.c
new file mode 100644
index 000000000000..8ffdab71b948
--- /dev/null
+++ b/drivers/clk/qcom/lpasscc-sm6115.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, 2023 Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6115-lpasscc.h>
+
+#include "common.h"
+#include "reset.h"
+
+static const struct qcom_reset_map lpass_audiocc_sm6115_resets[] = {
+ [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
+};
+
+static struct regmap_config lpass_audiocc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-audio-csr",
+ .max_register = 0x1000,
+};
+
+static const struct qcom_cc_desc lpass_audiocc_sm6115_reset_desc = {
+ .config = &lpass_audiocc_sm6115_regmap_config,
+ .resets = lpass_audiocc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(lpass_audiocc_sm6115_resets),
+};
+
+static const struct qcom_reset_map lpasscc_sm6115_resets[] = {
+ [LPASS_SWR_TX_CONFIG_CGCR] = { .reg = 0x100, .bit = 1, .udelay = 500 },
+};
+
+static struct regmap_config lpasscc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-tcsr",
+ .max_register = 0x1000,
+};
+
+static const struct qcom_cc_desc lpasscc_sm6115_reset_desc = {
+ .config = &lpasscc_sm6115_regmap_config,
+ .resets = lpasscc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(lpasscc_sm6115_resets),
+};
+
+static const struct of_device_id lpasscc_sm6115_match_table[] = {
+ {
+ .compatible = "qcom,sm6115-lpassaudiocc",
+ .data = &lpass_audiocc_sm6115_reset_desc,
+ }, {
+ .compatible = "qcom,sm6115-lpasscc",
+ .data = &lpasscc_sm6115_reset_desc,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpasscc_sm6115_match_table);
+
+static int lpasscc_sm6115_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc = of_device_get_match_data(&pdev->dev);
+
+ return qcom_cc_probe_by_index(pdev, 0, desc);
+}
+
+static struct platform_driver lpasscc_sm6115_driver = {
+ .probe = lpasscc_sm6115_probe,
+ .driver = {
+ .name = "lpasscc-sm6115",
+ .of_match_table = lpasscc_sm6115_match_table,
+ },
+};
+
+module_platform_driver(lpasscc_sm6115_driver);
+
+MODULE_DESCRIPTION("QTI LPASSCC SM6115 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 3f41249c5ae4..20d1c43f35d9 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -37,6 +37,7 @@ enum {
P_DSI2_PLL_DSICLK,
P_DSI1_PLL_BYTECLK,
P_DSI2_PLL_BYTECLK,
+ P_LVDS_PLL,
};
#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
@@ -143,6 +144,20 @@ static const struct clk_parent_data mmcc_pxo_dsi2_dsi1[] = {
{ .fw_name = "dsi1pll", .name = "dsi1pll" },
};
+static const struct parent_map mmcc_pxo_dsi2_dsi1_lvds_map[] = {
+ { P_PXO, 0 },
+ { P_DSI2_PLL_DSICLK, 1 },
+ { P_LVDS_PLL, 2 },
+ { P_DSI1_PLL_DSICLK, 3 },
+};
+
+static const struct clk_parent_data mmcc_pxo_dsi2_dsi1_lvds[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "dsi2pll", .name = "dsi2pll" },
+ { .fw_name = "lvdspll", .name = "mpd4_lvds_pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
+};
+
static const struct parent_map mmcc_pxo_dsi1_dsi2_byte_map[] = {
{ P_PXO, 0 },
{ P_DSI1_PLL_BYTECLK, 1 },
@@ -2439,26 +2454,42 @@ static struct clk_rcg dsi2_pixel_src = {
},
.s = {
.src_sel_shift = 0,
- .parent_map = mmcc_pxo_dsi2_dsi1_map,
+ .parent_map = mmcc_pxo_dsi2_dsi1_lvds_map,
},
.clkr = {
.enable_reg = 0x0094,
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_pixel_src",
- .parent_data = mmcc_pxo_dsi2_dsi1,
- .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
+ .parent_data = mmcc_pxo_dsi2_dsi1_lvds,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1_lvds),
.ops = &clk_rcg_pixel_ops,
},
},
};
+static struct clk_branch dsi2_pixel_lvds_src = {
+ .clkr = {
+ .enable_reg = 0x0094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_pixel_lvds_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_simple_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch dsi2_pixel_clk = {
.halt_reg = 0x01d0,
.halt_bit = 19,
.clkr = {
.enable_reg = 0x0094,
- .enable_mask = BIT(0),
+ .enable_mask = 0,
.hw.init = &(struct clk_init_data){
.name = "mdp_pclk2_clk",
.parent_hws = (const struct clk_hw*[]){
@@ -2471,6 +2502,24 @@ static struct clk_branch dsi2_pixel_clk = {
},
};
+static struct clk_branch lvds_clk = {
+ .halt_reg = 0x024c,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0264,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_lvds_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_lvds_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gfx2d0_ahb_clk = {
.hwcg_reg = 0x0038,
.hwcg_bit = 28,
@@ -2799,6 +2848,8 @@ static struct clk_regmap *mmcc_msm8960_clks[] = {
[CSIPHY1_TIMER_CLK] = &csiphy1_timer_clk.clkr,
[CSIPHY0_TIMER_CLK] = &csiphy0_timer_clk.clkr,
[PLL2] = &pll2.clkr,
+ [DSI2_PIXEL_LVDS_SRC] = &dsi2_pixel_lvds_src.clkr,
+ [LVDS_CLK] = &lvds_clk.clkr,
};
static const struct qcom_reset_map mmcc_msm8960_resets[] = {
@@ -2983,6 +3034,8 @@ static struct clk_regmap *mmcc_apq8064_clks[] = {
[VCAP_CLK] = &vcap_clk.clkr,
[VCAP_NPL_CLK] = &vcap_npl_clk.clkr,
[PLL15] = &pll15.clkr,
+ [DSI2_PIXEL_LVDS_SRC] = &dsi2_pixel_lvds_src.clkr,
+ [LVDS_CLK] = &lvds_clk.clkr,
};
static const struct qcom_reset_map mmcc_apq8064_resets[] = {
diff --git a/drivers/clk/qcom/tcsrcc-sm8750.c b/drivers/clk/qcom/tcsrcc-sm8750.c
new file mode 100644
index 000000000000..242e320986ef
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-sm8750.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-tcsr.h>
+
+#include "clk-branch.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_pcie_0_clkref_en = {
+ .halt_reg = 0x0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_ufs_clkref_en = {
+ .halt_reg = 0x1000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_ufs_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_clkref_en = {
+ .halt_reg = 0x2000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_clkref_en = {
+ .halt_reg = 0x3000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x3000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_sm8750_clocks[] = {
+ [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ [TCSR_UFS_CLKREF_EN] = &tcsr_ufs_clkref_en.clkr,
+ [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
+ [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_sm8750_desc = {
+ .config = &tcsr_cc_sm8750_regmap_config,
+ .clks = tcsr_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_sm8750_clocks),
+};
+
+static const struct of_device_id tcsr_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_sm8750_match_table);
+
+static int tcsr_cc_sm8750_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_sm8750_desc);
+}
+
+static struct platform_driver tcsr_cc_sm8750_driver = {
+ .probe = tcsr_cc_sm8750_probe,
+ .driver = {
+ .name = "tcsr_cc-sm8750",
+ .of_match_table = tcsr_cc_sm8750_match_table,
+ },
+};
+
+static int __init tcsr_cc_sm8750_init(void)
+{
+ return platform_driver_register(&tcsr_cc_sm8750_driver);
+}
+subsys_initcall(tcsr_cc_sm8750_init);
+
+static void __exit tcsr_cc_sm8750_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_sm8750_driver);
+}
+module_exit(tcsr_cc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI TCSR_CC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
index 97b8ca0f9181..19d433034884 100644
--- a/drivers/clk/ralink/clk-mtmips.c
+++ b/drivers/clk/ralink/clk-mtmips.c
@@ -266,7 +266,6 @@ err_clk_unreg:
}
static struct mtmips_clk_fixed rt3883_fixed_clocks[] = {
- CLK_FIXED("xtal", NULL, 40000000),
CLK_FIXED("periph", "xtal", 40000000)
};
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index ff01f5f0ed20..5a4bc3f94d49 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -40,6 +40,7 @@ config CLK_RENESAS
select CLK_R9A07G054 if ARCH_R9A07G054
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
+ select CLK_R9A09G047 if ARCH_R9A09G047
select CLK_R9A09G057 if ARCH_R9A09G057
select CLK_SH73A0 if ARCH_SH73A0
@@ -194,6 +195,10 @@ config CLK_R9A09G011
bool "RZ/V2M clock support" if COMPILE_TEST
select CLK_RZG2L
+config CLK_R9A09G047
+ bool "RZ/G3E clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
@@ -234,7 +239,7 @@ config CLK_RZG2L
select RESET_CONTROLLER
config CLK_RZV2H
- bool "RZ/V2H(P) family clock support" if COMPILE_TEST
+ bool "RZ/{G3E,V2H(P)} family clock support" if COMPILE_TEST
select RESET_CONTROLLER
config CLK_RENESAS_VBATTB
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 82efaa835ac7..2d6e746939c4 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
+obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index 55c8dd032fc3..d45571096b96 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -238,6 +238,10 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("pfc2", 917, R8A779G0_CLK_CP),
DEF_MOD("pfc3", 918, R8A779G0_CLK_CP),
DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
+ DEF_MOD("vspx0", 1028, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("vspx1", 1029, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx0", 1100, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx1", 1101, R8A779G0_CLK_S0D1_VIO),
DEF_MOD("tsn", 2723, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER),
diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
index 9067e407cbc6..607fa815b6c1 100644
--- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
@@ -177,6 +177,9 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("canfd0", 328, R8A779H0_CLK_SASYNCPERD2),
DEF_MOD("csi40", 331, R8A779H0_CLK_CSI),
DEF_MOD("csi41", 400, R8A779H0_CLK_CSI),
+ DEF_MOD("dis0", 411, R8A779H0_CLK_VIOBUSD2),
+ DEF_MOD("dsitxlink0", 415, R8A779H0_CLK_VIOBUSD2),
+ DEF_MOD("fcpvd0", 508, R8A779H0_CLK_VIOBUSD2),
DEF_MOD("hscif0", 514, R8A779H0_CLK_SASYNCPERD1),
DEF_MOD("hscif1", 515, R8A779H0_CLK_SASYNCPERD1),
DEF_MOD("hscif2", 516, R8A779H0_CLK_SASYNCPERD1),
@@ -225,6 +228,7 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("vin15", 811, R8A779H0_CLK_S0D4_VIO),
DEF_MOD("vin16", 812, R8A779H0_CLK_S0D4_VIO),
DEF_MOD("vin17", 813, R8A779H0_CLK_S0D4_VIO),
+ DEF_MOD("vspd0", 830, R8A779H0_CLK_VIOBUSD2),
DEF_MOD("wdt1:wdt0", 907, R8A779H0_CLK_R),
DEF_MOD("cmt0", 910, R8A779H0_CLK_R),
DEF_MOD("cmt1", 911, R8A779H0_CLK_R),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index c1348e2d450c..dcda19318b2a 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -20,15 +20,24 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/spinlock.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
#define R9A06G032_SYSCTRL_USB 0x00
-#define R9A06G032_SYSCTRL_USB_H2MODE (1<<1)
+#define R9A06G032_SYSCTRL_USB_H2MODE BIT(1)
#define R9A06G032_SYSCTRL_DMAMUX 0xA0
+#define R9A06G032_SYSCTRL_RSTEN 0x120
+#define R9A06G032_SYSCTRL_RSTEN_MRESET_EN BIT(0)
+#define R9A06G032_SYSCTRL_RSTCTRL 0x198
+/* These work for both reset registers */
+#define R9A06G032_SYSCTRL_SWRST BIT(6)
+#define R9A06G032_SYSCTRL_WDA7RST_1 BIT(2)
+#define R9A06G032_SYSCTRL_WDA7RST_0 BIT(1)
+
/**
* struct regbit - describe one bit in a register
* @reg: offset of register relative to base address,
@@ -1270,6 +1279,12 @@ static void r9a06g032_clocks_del_clk_provider(void *data)
of_clk_del_provider(data);
}
+static int r9a06g032_restart_handler(struct sys_off_data *data)
+{
+ writel(R9A06G032_SYSCTRL_SWRST, sysctrl_priv->reg + R9A06G032_SYSCTRL_RSTCTRL);
+ return NOTIFY_DONE;
+}
+
static void __init r9a06g032_init_h2mode(struct r9a06g032_priv *clocks)
{
struct device_node *usbf_np;
@@ -1324,6 +1339,18 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
r9a06g032_init_h2mode(clocks);
+ /* Clear potentially pending resets */
+ writel(R9A06G032_SYSCTRL_WDA7RST_0 | R9A06G032_SYSCTRL_WDA7RST_1,
+ clocks->reg + R9A06G032_SYSCTRL_RSTCTRL);
+ /* Allow software reset */
+ writel(R9A06G032_SYSCTRL_SWRST | R9A06G032_SYSCTRL_RSTEN_MRESET_EN,
+ clocks->reg + R9A06G032_SYSCTRL_RSTEN);
+
+ error = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART, SYS_OFF_PRIO_HIGH,
+ r9a06g032_restart_handler, NULL);
+ if (error)
+ dev_warn(dev, "couldn't register restart handler (%d)\n", error);
+
for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
const char *parent_name = d->source ?
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index b2ae8cdc4723..0e7e3bf05b52 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -187,6 +187,7 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
DEF_FIXED("OSC", R9A08G045_OSCCLK, CLK_EXTAL, 1, 1),
DEF_FIXED("OSC2", R9A08G045_OSCCLK2, CLK_EXTAL, 1, 3),
DEF_FIXED("HP", R9A08G045_CLK_HP, CLK_PLL6, 1, 2),
+ DEF_FIXED("TSU", R9A08G045_CLK_TSU, CLK_PLL2_DIV2, 1, 8),
};
static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
@@ -209,6 +210,14 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9),
DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10),
DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11),
+ DEF_MOD("ssi0_pclk2", R9A08G045_SSI0_PCLK2, R9A08G045_CLK_P0, 0x570, 0),
+ DEF_MOD("ssi0_sfr", R9A08G045_SSI0_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 1),
+ DEF_MOD("ssi1_pclk2", R9A08G045_SSI1_PCLK2, R9A08G045_CLK_P0, 0x570, 2),
+ DEF_MOD("ssi1_sfr", R9A08G045_SSI1_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 3),
+ DEF_MOD("ssi2_pclk2", R9A08G045_SSI2_PCLK2, R9A08G045_CLK_P0, 0x570, 4),
+ DEF_MOD("ssi2_sfr", R9A08G045_SSI2_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 5),
+ DEF_MOD("ssi3_pclk2", R9A08G045_SSI3_PCLK2, R9A08G045_CLK_P0, 0x570, 6),
+ DEF_MOD("ssi3_sfr", R9A08G045_SSI3_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 7),
DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0),
DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1),
DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2),
@@ -224,7 +233,14 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("i2c2_pclk", R9A08G045_I2C2_PCLK, R9A08G045_CLK_P0, 0x580, 2),
DEF_MOD("i2c3_pclk", R9A08G045_I2C3_PCLK, R9A08G045_CLK_P0, 0x580, 3),
DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0),
+ DEF_MOD("scif1_clk_pck", R9A08G045_SCIF1_CLK_PCK, R9A08G045_CLK_P0, 0x584, 1),
+ DEF_MOD("scif2_clk_pck", R9A08G045_SCIF2_CLK_PCK, R9A08G045_CLK_P0, 0x584, 2),
+ DEF_MOD("scif3_clk_pck", R9A08G045_SCIF3_CLK_PCK, R9A08G045_CLK_P0, 0x584, 3),
+ DEF_MOD("scif4_clk_pck", R9A08G045_SCIF4_CLK_PCK, R9A08G045_CLK_P0, 0x584, 4),
+ DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5),
DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0),
+ DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0),
+ DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1),
DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0),
};
@@ -238,6 +254,10 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2),
+ DEF_RST(R9A08G045_SSI0_RST_M2_REG, 0x870, 0),
+ DEF_RST(R9A08G045_SSI1_RST_M2_REG, 0x870, 1),
+ DEF_RST(R9A08G045_SSI2_RST_M2_REG, 0x870, 2),
+ DEF_RST(R9A08G045_SSI3_RST_M2_REG, 0x870, 3),
DEF_RST(R9A08G045_USB_U2H0_HRESETN, 0x878, 0),
DEF_RST(R9A08G045_USB_U2H1_HRESETN, 0x878, 1),
DEF_RST(R9A08G045_USB_U2P_EXL_SYSRST, 0x878, 2),
@@ -249,9 +269,16 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_I2C2_MRST, 0x880, 2),
DEF_RST(R9A08G045_I2C3_MRST, 0x880, 3),
DEF_RST(R9A08G045_SCIF0_RST_SYSTEM_N, 0x884, 0),
+ DEF_RST(R9A08G045_SCIF1_RST_SYSTEM_N, 0x884, 1),
+ DEF_RST(R9A08G045_SCIF2_RST_SYSTEM_N, 0x884, 2),
+ DEF_RST(R9A08G045_SCIF3_RST_SYSTEM_N, 0x884, 3),
+ DEF_RST(R9A08G045_SCIF4_RST_SYSTEM_N, 0x884, 4),
+ DEF_RST(R9A08G045_SCIF5_RST_SYSTEM_N, 0x884, 5),
DEF_RST(R9A08G045_GPIO_RSTN, 0x898, 0),
DEF_RST(R9A08G045_GPIO_PORT_RESETN, 0x898, 1),
DEF_RST(R9A08G045_GPIO_SPARE_RESETN, 0x898, 2),
+ DEF_RST(R9A08G045_ADC_PRESETN, 0x8a8, 0),
+ DEF_RST(R9A08G045_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A08G045_VBAT_BRESETN, 0x914, 0),
};
@@ -286,6 +313,14 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)), 0),
DEF_PD("sdhi2", R9A08G045_PD_SDHI2,
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)), 0),
+ DEF_PD("ssi0", R9A08G045_PD_SSI0,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(10)), 0),
+ DEF_PD("ssi1", R9A08G045_PD_SSI1,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(11)), 0),
+ DEF_PD("ssi2", R9A08G045_PD_SSI2,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(12)), 0),
+ DEF_PD("ssi3", R9A08G045_PD_SSI3,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(13)), 0),
DEF_PD("usb0", R9A08G045_PD_USB0,
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)), 0),
DEF_PD("usb1", R9A08G045_PD_USB1,
@@ -306,6 +341,18 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(13)), 0),
DEF_PD("scif0", R9A08G045_PD_SCIF0,
DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)), 0),
+ DEF_PD("scif1", R9A08G045_PD_SCIF1,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(2)), 0),
+ DEF_PD("scif2", R9A08G045_PD_SCIF2,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(3)), 0),
+ DEF_PD("scif3", R9A08G045_PD_SCIF3,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(4)), 0),
+ DEF_PD("scif4", R9A08G045_PD_SCIF4,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(5)), 0),
+ DEF_PD("scif5", R9A08G045_PD_SCIF5,
+ DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(4)), 0),
+ DEF_PD("adc", R9A08G045_PD_ADC,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(14)), 0),
DEF_PD("vbat", R9A08G045_PD_VBAT,
DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(8)),
GENPD_FLAG_ALWAYS_ON),
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
new file mode 100644
index 000000000000..536d922bed70
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G3E CPG driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g047-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G047_IOTOP_0_SHCLK,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV16,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G047_CA55_0_CORECLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G047_CA55_0_CORECLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G047_CA55_0_CORECLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G047_CA55_0_CORECLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
+ DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+};
+
+static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
+ DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
+ DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
+ DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */
+ DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */
+ DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */
+ DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
+ DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
+ DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+};
+
+const struct rzv2h_cpg_info r9a09g047_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g047_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g047_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g047_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g047_mod_clks),
+ .num_hw_mod_clks = 28 * 16,
+
+ /* Resets */
+ .resets = r9a09g047_resets,
+ .num_resets = ARRAY_SIZE(r9a09g047_resets),
+
+ .num_mstop_bits = 208,
+};
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index 7c4507fd34e6..3705e18f66ad 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -28,6 +28,7 @@ enum clk_ids {
CLK_PLLCLN,
CLK_PLLDTY,
CLK_PLLCA55,
+ CLK_PLLVDO,
/* Internal Core Clocks */
CLK_PLLCM33_DIV16,
@@ -35,7 +36,13 @@ enum clk_ids {
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV16,
+ CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_CRU1,
+ CLK_PLLVDO_CRU2,
+ CLK_PLLVDO_CRU3,
/* Module Clocks */
MOD_CLK_BASE,
@@ -49,6 +56,12 @@ static const struct clk_div_table dtable_1_8[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_4[] = {
+ {0, 2},
+ {1, 4},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -69,6 +82,7 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
@@ -78,7 +92,14 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+
+ DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru1", CLK_PLLVDO_CRU1, CLK_PLLVDO, CDDIV4_DIVCTL0, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
@@ -94,49 +115,117 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
- DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5),
- DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3),
- DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4),
- DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5),
- DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6),
- DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7),
- DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8),
- DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9),
- DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10),
- DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11),
- DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12),
- DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13),
- DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14),
- DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15),
- DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16),
- DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17),
- DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18),
- DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15),
- DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19),
- DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20),
- DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21),
- DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22),
- DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23),
- DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24),
- DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25),
- DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26),
- DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27),
- DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3),
- DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4),
- DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5),
- DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6),
- DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7),
- DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8),
- DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9),
- DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10),
- DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11),
- DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12),
- DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13),
- DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14),
+ DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
+ BUS_MSTOP_NONE),
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3,
+ BUS_MSTOP(5, BIT(10))),
+ DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4,
+ BUS_MSTOP(5, BIT(11))),
+ DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5,
+ BUS_MSTOP(2, BIT(13))),
+ DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6,
+ BUS_MSTOP(2, BIT(14))),
+ DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7,
+ BUS_MSTOP(11, BIT(13))),
+ DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8,
+ BUS_MSTOP(11, BIT(14))),
+ DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9,
+ BUS_MSTOP(11, BIT(15))),
+ DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10,
+ BUS_MSTOP(12, BIT(0))),
+ DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_1_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 5, 6, 21,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD_NO_PM("cru_1_vclk", CLK_PLLVDO_CRU1, 13, 6, 6, 22,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_1_pclk", CLK_PLLDTY_DIV16, 13, 7, 6, 23,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_2_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 8, 6, 24,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD_NO_PM("cru_2_vclk", CLK_PLLVDO_CRU2, 13, 9, 6, 25,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD("cru_2_pclk", CLK_PLLDTY_DIV16, 13, 10, 6, 26,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD("cru_3_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 11, 6, 27,
+ BUS_MSTOP(9, BIT(7))),
+ DEF_MOD_NO_PM("cru_3_vclk", CLK_PLLVDO_CRU3, 13, 12, 6, 28,
+ BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
+ BUS_MSTOP(9, BIT(7))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */
DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */
DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */
@@ -162,6 +251,18 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
+ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
+ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(12, 8, 5, 25), /* CRU_1_PRESETN */
+ DEF_RST(12, 9, 5, 26), /* CRU_1_ARESETN */
+ DEF_RST(12, 10, 5, 27), /* CRU_1_S_RESETN */
+ DEF_RST(12, 11, 5, 28), /* CRU_2_PRESETN */
+ DEF_RST(12, 12, 5, 29), /* CRU_2_ARESETN */
+ DEF_RST(12, 13, 5, 30), /* CRU_2_S_RESETN */
+ DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
+ DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
+ DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
@@ -179,4 +280,6 @@ const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
/* Resets */
.resets = r9a09g057_resets,
.num_resets = ARRAY_SIZE(r9a09g057_resets),
+
+ .num_mstop_bits = 192,
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 79e7a90c3b1b..bf85501709f0 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -979,7 +979,7 @@ static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
const struct cpg_mssr_info *info)
{
- struct device_node *soc = of_find_node_by_path("/soc");
+ struct device_node *soc __free(device_node) = of_find_node_by_path("/soc");
struct device_node *node;
uint32_t args[MAX_PHANDLE_ARGS];
unsigned int *ids = NULL;
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index b524a9d33610..a4c1e92e1fd7 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/refcount.h>
#include <linux/reset-controller.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -40,6 +41,9 @@
#define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
#define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
+#define CPG_BUS_1_MSTOP (0xd00)
+#define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
+
#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
@@ -64,6 +68,7 @@
* @resets: Array of resets
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @mstop_count: Array of mstop values
* @rcdev: Reset controller entity
*/
struct rzv2h_cpg_priv {
@@ -78,6 +83,8 @@ struct rzv2h_cpg_priv {
unsigned int num_resets;
unsigned int last_dt_core_clk;
+ atomic_t *mstop_count;
+
struct reset_controller_dev rcdev;
};
@@ -97,7 +104,9 @@ struct pll_clk {
* struct mod_clock - Module clock
*
* @priv: CPG private data
+ * @mstop_data: mstop data relating to module clock
* @hw: handle between common and hardware-specific interfaces
+ * @no_pm: flag to indicate PM is not supported
* @on_index: register offset
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
@@ -105,7 +114,9 @@ struct pll_clk {
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
+ unsigned int mstop_data;
struct clk_hw hw;
+ bool no_pm;
u8 on_index;
u8 on_bit;
s8 mon_index;
@@ -431,8 +442,71 @@ fail:
core->name, PTR_ERR(clk));
}
+static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
+ u32 mstop_data)
+{
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
+ unsigned int index = (mstop_index - 1) * 16;
+ atomic_t *mstop = &priv->mstop_count[index];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (!atomic_read(&mstop[i]))
+ val |= BIT(i) << 16;
+ atomic_inc(&mstop[i]);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
+ u32 mstop_data)
+{
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
+ unsigned int index = (mstop_index - 1) * 16;
+ atomic_t *mstop = &priv->mstop_count[index];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (!atomic_read(&mstop[i]) ||
+ atomic_dec_and_test(&mstop[i]))
+ val |= BIT(i) << 16 | BIT(i);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
+{
+ struct mod_clock *clock = to_mod_clock(hw);
+ struct rzv2h_cpg_priv *priv = clock->priv;
+ u32 bitmask;
+ u32 offset;
+
+ if (clock->mon_index >= 0) {
+ offset = GET_CLK_MON_OFFSET(clock->mon_index);
+ bitmask = BIT(clock->mon_bit);
+ } else {
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+ }
+
+ return readl(priv->base + offset) & bitmask;
+}
+
static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
{
+ bool enabled = rzv2h_mod_clock_is_enabled(hw);
struct mod_clock *clock = to_mod_clock(hw);
unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
struct rzv2h_cpg_priv *priv = clock->priv;
@@ -444,11 +518,20 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
enable ? "ON" : "OFF");
+ if (enabled == enable)
+ return 0;
+
value = bitmask << 16;
- if (enable)
+ if (enable) {
value |= bitmask;
-
- writel(value, priv->base + reg);
+ writel(value, priv->base + reg);
+ if (clock->mstop_data != BUS_MSTOP_NONE)
+ rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
+ } else {
+ if (clock->mstop_data != BUS_MSTOP_NONE)
+ rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data);
+ writel(value, priv->base + reg);
+ }
if (!enable || clock->mon_index < 0)
return 0;
@@ -474,24 +557,6 @@ static void rzv2h_mod_clock_disable(struct clk_hw *hw)
rzv2h_mod_clock_endisable(hw, false);
}
-static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
-{
- struct mod_clock *clock = to_mod_clock(hw);
- struct rzv2h_cpg_priv *priv = clock->priv;
- u32 bitmask;
- u32 offset;
-
- if (clock->mon_index >= 0) {
- offset = GET_CLK_MON_OFFSET(clock->mon_index);
- bitmask = BIT(clock->mon_bit);
- } else {
- offset = GET_CLK_ON_OFFSET(clock->on_index);
- bitmask = BIT(clock->on_bit);
- }
-
- return readl(priv->base + offset) & bitmask;
-}
-
static const struct clk_ops rzv2h_mod_clock_ops = {
.enable = rzv2h_mod_clock_enable,
.disable = rzv2h_mod_clock_disable,
@@ -541,8 +606,10 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
clock->on_bit = mod->on_bit;
clock->mon_index = mod->mon_index;
clock->mon_bit = mod->mon_bit;
+ clock->no_pm = mod->no_pm;
clock->priv = priv;
clock->hw.init = &init;
+ clock->mstop_data = mod->mstop_data;
ret = devm_clk_hw_register(dev, &clock->hw);
if (ret) {
@@ -552,6 +619,41 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
priv->clks[id] = clock->hw.clk;
+ /*
+ * Ensure the module clocks and MSTOP bits are synchronized when they are
+ * turned ON by the bootloader. Enable MSTOP bits for module clocks that were
+ * turned ON in an earlier boot stage.
+ */
+ if (clock->mstop_data != BUS_MSTOP_NONE &&
+ !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) {
+ rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
+ } else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
+ unsigned int index = (mstop_index - 1) * 16;
+ atomic_t *mstop = &priv->mstop_count[index];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ /*
+ * Critical clocks are turned ON immediately upon registration, and the
+ * MSTOP counter is updated through the rzv2h_mod_clock_enable() path.
+ * However, if the critical clocks were already turned ON by the initial
+ * bootloader, synchronize the atomic counter here and clear the MSTOP bit.
+ */
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (atomic_read(&mstop[i]))
+ continue;
+ val |= BIT(i) << 16;
+ atomic_inc(&mstop[i]);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ }
+
return;
fail:
@@ -668,17 +770,51 @@ struct rzv2h_cpg_pd {
struct generic_pm_domain genpd;
};
+static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd,
+ const struct of_phandle_args *clkspec)
+{
+ if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
+ return false;
+
+ switch (clkspec->args[0]) {
+ case CPG_MOD: {
+ struct rzv2h_cpg_priv *priv = pd->priv;
+ unsigned int id = clkspec->args[1];
+ struct mod_clock *clock;
+
+ if (id >= priv->num_mod_clks)
+ return false;
+
+ if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT))
+ return false;
+
+ clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id]));
+
+ return !clock->no_pm;
+ }
+
+ case CPG_CORE:
+ default:
+ return false;
+ }
+}
+
static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
+ struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd);
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
struct clk *clk;
+ unsigned int i;
int error;
- int i = 0;
- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
- &clkspec)) {
+ for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
+ if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) {
+ of_node_put(clkspec.np);
+ continue;
+ }
+
if (once) {
once = false;
error = pm_clk_create(dev);
@@ -700,7 +836,6 @@ static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device
error);
goto fail_put;
}
- i++;
}
return 0;
@@ -786,6 +921,11 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
if (!clks)
return -ENOMEM;
+ priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits,
+ sizeof(*priv->mstop_count), GFP_KERNEL);
+ if (!priv->mstop_count)
+ return -ENOMEM;
+
priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
info->num_resets, GFP_KERNEL);
if (!priv->resets)
@@ -833,6 +973,12 @@ static const struct of_device_id rzv2h_cpg_match[] = {
.data = &r9a09g057_cpg_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G047
+ {
+ .compatible = "renesas,r9a09g047-cpg",
+ .data = &r9a09g047_cpg_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 819029c81904..fd8eb985c75b 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -8,6 +8,8 @@
#ifndef __RENESAS_RZV2H_CPG_H__
#define __RENESAS_RZV2H_CPG_H__
+#include <linux/bitfield.h>
+
/**
* struct ddiv - Structure for dynamic switching divider
*
@@ -33,12 +35,24 @@ struct ddiv {
#define CPG_CDDIV0 (0x400)
#define CPG_CDDIV1 (0x404)
+#define CPG_CDDIV3 (0x40C)
+#define CPG_CDDIV4 (0x410)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
#define CDDIV1_DIVCTL0 DDIV_PACK(CPG_CDDIV1, 0, 2, 4)
#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
+#define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16)
+#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
+#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+
+#define BUS_MSTOP_IDX_MASK GENMASK(31, 16)
+#define BUS_MSTOP_BITS_MASK GENMASK(15, 0)
+#define BUS_MSTOP(idx, mask) (FIELD_PREP_CONST(BUS_MSTOP_IDX_MASK, (idx)) | \
+ FIELD_PREP_CONST(BUS_MSTOP_BITS_MASK, (mask)))
+#define BUS_MSTOP_NONE GENMASK(31, 0)
/**
* Definitions of CPG Core Clocks
@@ -98,8 +112,10 @@ enum clk_types {
* struct rzv2h_mod_clk - Module Clocks definitions
*
* @name: handle between common and hardware-specific interfaces
+ * @mstop_data: packed data mstop register offset and mask
* @parent: id of parent clock
* @critical: flag to indicate the clock is critical
+ * @no_pm: flag to indicate PM is not supported
* @on_index: control register index
* @on_bit: ON bit
* @mon_index: monitor register index
@@ -107,30 +123,37 @@ enum clk_types {
*/
struct rzv2h_mod_clk {
const char *name;
+ u32 mstop_data;
u16 parent;
bool critical;
+ bool no_pm;
u8 on_index;
u8 on_bit;
s8 mon_index;
u8 mon_bit;
};
-#define DEF_MOD_BASE(_name, _parent, _critical, _onindex, _onbit, _monindex, _monbit) \
+#define DEF_MOD_BASE(_name, _mstop, _parent, _critical, _no_pm, _onindex, _onbit, _monindex, _monbit) \
{ \
.name = (_name), \
+ .mstop_data = (_mstop), \
.parent = (_parent), \
.critical = (_critical), \
+ .no_pm = (_no_pm), \
.on_index = (_onindex), \
.on_bit = (_onbit), \
.mon_index = (_monindex), \
.mon_bit = (_monbit), \
}
-#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
- DEF_MOD_BASE(_name, _parent, false, _onindex, _onbit, _monindex, _monbit)
+#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit)
+
+#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, true, false, _onindex, _onbit, _monindex, _monbit)
-#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
- DEF_MOD_BASE(_name, _parent, true, _onindex, _onbit, _monindex, _monbit)
+#define DEF_MOD_NO_PM(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, true, _onindex, _onbit, _monindex, _monbit)
/**
* struct rzv2h_reset - Reset definitions
@@ -172,6 +195,9 @@ struct rzv2h_reset {
*
* @resets: Array of Module Reset definitions
* @num_resets: Number of entries in resets[]
+ *
+ * @num_mstop_bits: Maximum number of MSTOP bits supported, equivalent to the
+ * number of CPG_BUS_m_MSTOP registers multiplied by 16.
*/
struct rzv2h_cpg_info {
/* Core Clocks */
@@ -188,8 +214,11 @@ struct rzv2h_cpg_info {
/* Resets */
const struct rzv2h_reset *resets;
unsigned int num_resets;
+
+ unsigned int num_mstop_bits;
};
+extern const struct rzv2h_cpg_info r9a09g047_cpg_info;
extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index af2ade54a7ef..3fe7616f0ebe 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -13,6 +13,7 @@ clk-rockchip-y += clk-inverter.o
clk-rockchip-y += clk-mmc-phase.o
clk-rockchip-y += clk-muxgrf.o
clk-rockchip-y += clk-ddr.o
+clk-rockchip-y += gate-link.o
clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-$(CONFIG_CLK_PX30) += clk-px30.o
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 0ffaf639f807..4031733def4e 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -12,28 +12,6 @@
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
#include "clk.h"
-/*
- * Recent Rockchip SoCs have a new hardware block called Native Interface
- * Unit (NIU), which gates clocks to devices behind them. These effectively
- * need two parent clocks.
- *
- * Downstream enables the linked clock via runtime PM whenever the gate is
- * enabled. This implementation uses separate clock nodes for each of the
- * linked gate clocks, which leaks parts of the clock tree into DT.
- *
- * The GATE_LINK macro instead takes the second parent via 'linkname', but
- * ignores the information. Once the clock framework is ready to handle it, the
- * information should be passed on here. But since these clocks are required to
- * access multiple relevant IP blocks, such as PCIe or USB, we mark all linked
- * clocks critical until a better solution is available. This will waste some
- * power, but avoids leaking implementation details into DT or hanging the
- * system.
- */
-#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
- GATE(_id, cname, pname, f, o, b, gf)
-#define RK3588_LINKED_CLK CLK_IS_CRITICAL
-
-
#define RK3588_GRF_SOC_STATUS0 0x600
#define RK3588_PHYREF_ALT_GATE 0xc38
@@ -266,6 +244,8 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = {
}, \
}
+static struct rockchip_clk_provider *early_ctx;
+
static struct rockchip_cpuclk_rate_table rk3588_cpub0clk_rates[] __initdata = {
RK3588_CPUB01CLK_RATE(2496000000, 1),
RK3588_CPUB01CLK_RATE(2400000000, 1),
@@ -694,7 +674,7 @@ static struct rockchip_pll_clock rk3588_pll_clks[] __initdata = {
RK3588_MODE_CON0, 10, 15, 0, rk3588_pll_rates),
};
-static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
+static struct rockchip_clk_branch rk3588_early_clk_branches[] __initdata = {
/*
* CRU Clock-Architecture
*/
@@ -792,10 +772,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(MCLK_GMAC0_OUT, "mclk_gmac0_out", gpll_cpll_p, 0,
RK3588_CLKSEL_CON(15), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 3, GFLAGS),
- COMPOSITE(REFCLKO25M_ETH0_OUT, "refclko25m_eth0_out", gpll_cpll_p, 0,
+ COMPOSITE(REFCLKO25M_ETH0_OUT, "refclko25m_eth0_out", gpll_cpll_p, CLK_IS_CRITICAL,
RK3588_CLKSEL_CON(15), 15, 1, MFLAGS, 8, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 4, GFLAGS),
- COMPOSITE(REFCLKO25M_ETH1_OUT, "refclko25m_eth1_out", gpll_cpll_p, 0,
+ COMPOSITE(REFCLKO25M_ETH1_OUT, "refclko25m_eth1_out", gpll_cpll_p, CLK_IS_CRITICAL,
RK3588_CLKSEL_CON(16), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 5, GFLAGS),
COMPOSITE(CLK_CIFOUT_OUT, "clk_cifout_out", gpll_cpll_24m_spll_p, 0,
@@ -1456,7 +1436,7 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE_NODIV(HCLK_NVM_ROOT, "hclk_nvm_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(77), 0, 2, MFLAGS,
RK3588_CLKGATE_CON(31), 0, GFLAGS),
- COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, 0,
RK3588_CLKSEL_CON(77), 7, 1, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(31), 1, GFLAGS),
GATE(ACLK_EMMC, "aclk_emmc", "aclk_nvm_root", 0,
@@ -1685,13 +1665,13 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(42), 9, GFLAGS),
/* vdpu */
- COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, 0,
RK3588_CLKSEL_CON(98), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(44), 0, GFLAGS),
COMPOSITE_NODIV(ACLK_VDPU_LOW_ROOT, "aclk_vdpu_low_root", mux_400m_200m_100m_24m_p, 0,
RK3588_CLKSEL_CON(98), 7, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(98), 9, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 2, GFLAGS),
COMPOSITE(ACLK_JPEG_DECODER_ROOT, "aclk_jpeg_decoder_root", gpll_cpll_aupll_spll_p, 0,
@@ -1742,9 +1722,9 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_RKVENC0_ROOT, "aclk_rkvenc0_root", gpll_cpll_npll_p, 0,
RK3588_CLKSEL_CON(102), 7, 2, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(47), 1, GFLAGS),
- GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", RK3588_LINKED_CLK,
+ GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", 0,
RK3588_CLKGATE_CON(47), 4, GFLAGS),
- GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", RK3588_LINKED_CLK,
+ GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", 0,
RK3588_CLKGATE_CON(47), 5, GFLAGS),
COMPOSITE(CLK_RKVENC0_CORE, "clk_rkvenc0_core", gpll_cpll_aupll_npll_p, 0,
RK3588_CLKSEL_CON(102), 14, 2, MFLAGS, 9, 5, DFLAGS,
@@ -1754,10 +1734,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(48), 6, GFLAGS),
/* vi */
- COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, 0,
RK3588_CLKSEL_CON(106), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(49), 0, GFLAGS),
- COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(106), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(49), 1, GFLAGS),
COMPOSITE_NODIV(PCLK_VI_ROOT, "pclk_vi_root", mux_100m_50m_24m_p, 0,
@@ -1927,10 +1907,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", gpll_cpll_dmyaupll_npll_spll_p, 0,
RK3588_CLKSEL_CON(110), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(52), 0, GFLAGS),
- COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, 0,
RK3588_CLKSEL_CON(110), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(110), 10, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 2, GFLAGS),
COMPOSITE_NODIV(PCLK_VOP_ROOT, "pclk_vop_root", mux_100m_50m_24m_p, 0,
@@ -2428,10 +2408,12 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(68), 5, GFLAGS),
GATE(ACLK_AV1, "aclk_av1", "aclk_av1_pre", 0,
RK3588_CLKGATE_CON(68), 2, GFLAGS),
+};
+static struct rockchip_clk_branch rk3588_clk_branches[] = {
GATE_LINK(ACLK_ISP1_PRE, "aclk_isp1_pre", "aclk_isp1_root", ACLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 6, GFLAGS),
GATE_LINK(HCLK_ISP1_PRE, "hclk_isp1_pre", "hclk_isp1_root", HCLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 8, GFLAGS),
- GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", ACLK_NVM_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(31), 2, GFLAGS),
+ GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", ACLK_NVM_ROOT, 0, RK3588_CLKGATE_CON(31), 2, GFLAGS),
GATE_LINK(ACLK_USB, "aclk_usb", "aclk_usb_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 2, GFLAGS),
GATE_LINK(HCLK_USB, "hclk_usb", "hclk_usb_root", HCLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 3, GFLAGS),
GATE_LINK(ACLK_JPEG_DECODER_PRE, "aclk_jpeg_decoder_pre", "aclk_jpeg_decoder_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(44), 7, GFLAGS),
@@ -2443,9 +2425,9 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE_LINK(HCLK_RKVDEC1_PRE, "hclk_rkvdec1_pre", "hclk_rkvdec1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 4, GFLAGS),
GATE_LINK(ACLK_RKVDEC1_PRE, "aclk_rkvdec1_pre", "aclk_rkvdec1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 5, GFLAGS),
GATE_LINK(ACLK_HDCP0_PRE, "aclk_hdcp0_pre", "aclk_vo0_root", ACLK_VOP_LOW_ROOT, 0, RK3588_CLKGATE_CON(55), 9, GFLAGS),
- GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", HCLK_VOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(55), 5, GFLAGS),
+ GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", HCLK_VOP_ROOT, 0, RK3588_CLKGATE_CON(55), 5, GFLAGS),
GATE_LINK(ACLK_HDCP1_PRE, "aclk_hdcp1_pre", "aclk_hdcp1_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(59), 6, GFLAGS),
- GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", HCLK_VO1USB_TOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(59), 9, GFLAGS),
+ GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", HCLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(59), 9, GFLAGS),
GATE_LINK(ACLK_AV1_PRE, "aclk_av1_pre", "aclk_av1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 1, GFLAGS),
GATE_LINK(PCLK_AV1_PRE, "pclk_av1_pre", "pclk_av1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 4, GFLAGS),
GATE_LINK(HCLK_SDIO_PRE, "hclk_sdio_pre", "hclk_sdio_root", HCLK_NVM, 0, RK3588_CLKGATE_CON(75), 1, GFLAGS),
@@ -2453,26 +2435,31 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE_LINK(PCLK_VO1GRF, "pclk_vo1grf", "pclk_vo1_root", HCLK_VO1, CLK_IGNORE_UNUSED, RK3588_CLKGATE_CON(59), 12, GFLAGS),
};
-static void __init rk3588_clk_init(struct device_node *np)
+static void __init rk3588_clk_early_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
- unsigned long clk_nr_clks;
+ unsigned long clk_nr_clks, max_clk_id1, max_clk_id2;
void __iomem *reg_base;
- clk_nr_clks = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
- ARRAY_SIZE(rk3588_clk_branches)) + 1;
+ max_clk_id1 = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches));
+ max_clk_id2 = rockchip_clk_find_max_clk_id(rk3588_early_clk_branches,
+ ARRAY_SIZE(rk3588_early_clk_branches));
+ clk_nr_clks = max(max_clk_id1, max_clk_id2) + 1;
+
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
return;
}
- ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ ctx = rockchip_clk_init_early(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
return;
}
+ early_ctx = ctx;
rockchip_clk_register_plls(ctx, rk3588_pll_clks,
ARRAY_SIZE(rk3588_pll_clks),
@@ -2491,14 +2478,55 @@ static void __init rk3588_clk_init(struct device_node *np)
&rk3588_cpub1clk_data, rk3588_cpub1clk_rates,
ARRAY_SIZE(rk3588_cpub1clk_rates));
- rockchip_clk_register_branches(ctx, rk3588_clk_branches,
- ARRAY_SIZE(rk3588_clk_branches));
+ rockchip_clk_register_branches(ctx, rk3588_early_clk_branches,
+ ARRAY_SIZE(rk3588_early_clk_branches));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE_DRIVER(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_early_init);
+
+static int clk_rk3588_probe(struct platform_device *pdev)
+{
+ struct rockchip_clk_provider *ctx = early_ctx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ rockchip_clk_register_late_branches(dev, ctx, rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches));
- rk3588_rst_init(np, reg_base);
+ rockchip_clk_finalize(ctx);
+ rk3588_rst_init(np, ctx->reg_base);
rockchip_register_restart_notifier(ctx, RK3588_GLB_SRST_FST, NULL);
+ /*
+ * Re-add clock provider, so that the newly added clocks are also
+ * re-parented and get their defaults configured.
+ */
+ of_clk_del_provider(np);
rockchip_clk_of_add_provider(np, ctx);
+
+ return 0;
}
-CLK_OF_DECLARE(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_init);
+static const struct of_device_id clk_rk3588_match_table[] = {
+ {
+ .compatible = "rockchip,rk3588-cru",
+ },
+ { }
+};
+
+static struct platform_driver clk_rk3588_driver = {
+ .probe = clk_rk3588_probe,
+ .driver = {
+ .name = "clk-rk3588",
+ .of_match_table = clk_rk3588_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rockchip_clk_rk3588_drv_register(void)
+{
+ return platform_driver_register(&clk_rk3588_driver);
+}
+core_initcall(rockchip_clk_rk3588_drv_register);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 88629a9abc9c..cbf93ea119a9 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
@@ -197,12 +198,6 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n);
}
-static void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
- struct clk *clk, unsigned int id)
-{
- ctx->clk_data.clks[id] = clk;
-}
-
static struct clk *rockchip_clk_register_frac_branch(
struct rockchip_clk_provider *ctx, const char *name,
const char *const *parent_names, u8 num_parents,
@@ -292,7 +287,7 @@ static struct clk *rockchip_clk_register_frac_branch(
return mux_clk;
}
- rockchip_clk_add_lookup(ctx, mux_clk, child->id);
+ rockchip_clk_set_lookup(ctx, mux_clk, child->id);
/* notifier on the fraction divider to catch rate changes */
if (frac->mux_frac_idx >= 0) {
@@ -359,14 +354,17 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name,
return hw->clk;
}
-struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
- void __iomem *base,
- unsigned long nr_clks)
+static struct rockchip_clk_provider *rockchip_clk_init_base(
+ struct device_node *np, void __iomem *base,
+ unsigned long nr_clks, bool has_late_clocks)
{
struct rockchip_clk_provider *ctx;
struct clk **clk_table;
+ struct clk *default_clk_val;
int i;
+ default_clk_val = ERR_PTR(has_late_clocks ? -EPROBE_DEFER : -ENOENT);
+
ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -376,7 +374,7 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
goto err_free;
for (i = 0; i < nr_clks; ++i)
- clk_table[i] = ERR_PTR(-ENOENT);
+ clk_table[i] = default_clk_val;
ctx->reg_base = base;
ctx->clk_data.clks = clk_table;
@@ -393,8 +391,33 @@ err_free:
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
+
+struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_clks)
+{
+ return rockchip_clk_init_base(np, base, nr_clks, false);
+}
EXPORT_SYMBOL_GPL(rockchip_clk_init);
+struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_clks)
+{
+ return rockchip_clk_init_base(np, base, nr_clks, true);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_init_early);
+
+void rockchip_clk_finalize(struct rockchip_clk_provider *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->clk_data.clk_num; ++i)
+ if (ctx->clk_data.clks[i] == ERR_PTR(-EPROBE_DEFER))
+ ctx->clk_data.clks[i] = ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_finalize);
+
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx)
{
@@ -424,7 +447,7 @@ void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
continue;
}
- rockchip_clk_add_lookup(ctx, clk, list->id);
+ rockchip_clk_set_lookup(ctx, clk, list->id);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
@@ -446,6 +469,29 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
}
EXPORT_SYMBOL_GPL(rockchip_clk_find_max_clk_id);
+static struct platform_device *rockchip_clk_register_gate_link(
+ struct device *parent_dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *clkbr)
+{
+ struct rockchip_gate_link_platdata gate_link_pdata = {
+ .ctx = ctx,
+ .clkbr = clkbr,
+ };
+
+ struct platform_device_info pdevinfo = {
+ .parent = parent_dev,
+ .name = "rockchip-gate-link-clk",
+ .id = clkbr->id,
+ .fwnode = dev_fwnode(parent_dev),
+ .of_node_reused = true,
+ .data = &gate_link_pdata,
+ .size_data = sizeof(gate_link_pdata),
+ };
+
+ return platform_device_register_full(&pdevinfo);
+}
+
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
@@ -571,6 +617,9 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->div_width, list->div_flags,
ctx->reg_base, &ctx->lock);
break;
+ case branch_linked_gate:
+ /* must be registered late, fall-through for error message */
+ break;
}
/* none of the cases above matched */
@@ -586,11 +635,36 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
continue;
}
- rockchip_clk_add_lookup(ctx, clk, list->id);
+ rockchip_clk_set_lookup(ctx, clk, list->id);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_branches);
+void rockchip_clk_register_late_branches(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ struct platform_device *pdev = NULL;
+
+ switch (list->branch_type) {
+ case branch_linked_gate:
+ pdev = rockchip_clk_register_gate_link(dev, ctx, list);
+ break;
+ default:
+ dev_err(dev, "unknown clock type %d\n", list->branch_type);
+ break;
+ }
+
+ if (!pdev)
+ dev_err(dev, "failed to register device for clock %s\n", list->name);
+ }
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_register_late_branches);
+
void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
unsigned int lookup_id,
const char *name, const char *const *parent_names,
@@ -610,7 +684,7 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
return;
}
- rockchip_clk_add_lookup(ctx, clk, lookup_id);
+ rockchip_clk_set_lookup(ctx, clk, lookup_id);
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index f1957e1c1178..9b37d44b9e5d 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -570,6 +570,7 @@ enum rockchip_clk_branch_type {
branch_divider,
branch_fraction_divider,
branch_gate,
+ branch_linked_gate,
branch_mmc,
branch_inverter,
branch_factor,
@@ -597,6 +598,7 @@ struct rockchip_clk_branch {
int gate_offset;
u8 gate_shift;
u8 gate_flags;
+ unsigned int linked_clk_id;
struct rockchip_clk_branch *child;
};
@@ -895,6 +897,20 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_linked_gate, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .linked_clk_id = linkedclk, \
+ .num_parents = 1, \
+ .flags = f, \
+ .gate_offset = o, \
+ .gate_shift = b, \
+ .gate_flags = gf, \
+ }
+
#define MMC(_id, cname, pname, offset, shift) \
{ \
.id = _id, \
@@ -1022,8 +1038,28 @@ struct rockchip_clk_branch {
#define SGRF_GATE(_id, cname, pname) \
FACTOR(_id, cname, pname, 0, 1, 1)
+static inline struct clk *rockchip_clk_get_lookup(struct rockchip_clk_provider *ctx,
+ unsigned int id)
+{
+ return ctx->clk_data.clks[id];
+}
+
+static inline void rockchip_clk_set_lookup(struct rockchip_clk_provider *ctx,
+ struct clk *clk, unsigned int id)
+{
+ ctx->clk_data.clks[id] = clk;
+}
+
+struct rockchip_gate_link_platdata {
+ struct rockchip_clk_provider *ctx;
+ struct rockchip_clk_branch *clkbr;
+};
+
struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks);
+struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
+ void __iomem *base, unsigned long nr_clks);
+void rockchip_clk_finalize(struct rockchip_clk_provider *ctx);
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx);
unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
@@ -1031,6 +1067,10 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk);
+void rockchip_clk_register_late_branches(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk);
void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
struct rockchip_pll_clock *pll_list,
unsigned int nr_pll, int grf_lock_offset);
diff --git a/drivers/clk/rockchip/gate-link.c b/drivers/clk/rockchip/gate-link.c
new file mode 100644
index 000000000000..cd0f7a2d30ab
--- /dev/null
+++ b/drivers/clk/rockchip/gate-link.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2024 Collabora Ltd.
+ * Author: Sebastian Reichel <sebastian.reichel@collabora.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include "clk.h"
+
+static int rk_clk_gate_link_register(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *clkbr)
+{
+ unsigned long flags = clkbr->flags | CLK_SET_RATE_PARENT;
+ struct clk *clk;
+
+ clk = clk_register_gate(dev, clkbr->name, clkbr->parent_names[0],
+ flags, ctx->reg_base + clkbr->gate_offset,
+ clkbr->gate_shift, clkbr->gate_flags,
+ &ctx->lock);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rockchip_clk_set_lookup(ctx, clk, clkbr->id);
+ return 0;
+}
+
+static int rk_clk_gate_link_probe(struct platform_device *pdev)
+{
+ struct rockchip_gate_link_platdata *pdata;
+ struct device *dev = &pdev->dev;
+ struct clk *linked_clk;
+ int ret;
+
+ pdata = dev_get_platdata(dev);
+ if (!pdata)
+ return dev_err_probe(dev, -ENODEV, "missing platform data");
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ linked_clk = rockchip_clk_get_lookup(pdata->ctx, pdata->clkbr->linked_clk_id);
+ ret = pm_clk_add_clk(dev, linked_clk);
+ if (ret)
+ return ret;
+
+ ret = rk_clk_gate_link_register(dev, pdata->ctx, pdata->clkbr);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ pm_clk_remove_clk(dev, linked_clk);
+ return ret;
+}
+
+static const struct dev_pm_ops rk_clk_gate_link_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver rk_clk_gate_link_driver = {
+ .probe = rk_clk_gate_link_probe,
+ .driver = {
+ .name = "rockchip-gate-link-clk",
+ .pm = &rk_clk_gate_link_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rk_clk_gate_link_drv_register(void)
+{
+ return platform_driver_register(&rk_clk_gate_link_driver);
+}
+core_initcall(rk_clk_gate_link_drv_register);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 7a88331a658d..90e5b114872c 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos8895.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos990.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o
diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
new file mode 100644
index 000000000000..8e2a2e8eccee
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos990.c
@@ -0,0 +1,1343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Igor Belwon <igor.belwon@mentallysanemainliners.org>
+ *
+ * Common Clock Framework support for Exynos990.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos990.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+#include "clk-pll.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (CLK_GOUT_CMU_VRA_BUS + 1)
+#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_XIU_D_HSI0_ACLK + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x1a330000) */
+#define PLL_LOCKTIME_PLL_G3D 0x0000
+#define PLL_LOCKTIME_PLL_MMC 0x0004
+#define PLL_LOCKTIME_PLL_SHARED0 0x0008
+#define PLL_LOCKTIME_PLL_SHARED1 0x000c
+#define PLL_LOCKTIME_PLL_SHARED2 0x0010
+#define PLL_LOCKTIME_PLL_SHARED3 0x0014
+#define PLL_LOCKTIME_PLL_SHARED4 0x0018
+#define PLL_CON0_PLL_G3D 0x0100
+#define PLL_CON3_PLL_G3D 0x010c
+#define PLL_CON0_PLL_MMC 0x0140
+#define PLL_CON3_PLL_MMC 0x014c
+#define PLL_CON0_PLL_SHARED0 0x0180
+#define PLL_CON3_PLL_SHARED0 0x018c
+#define PLL_CON0_PLL_SHARED1 0x01c0
+#define PLL_CON3_PLL_SHARED1 0x01cc
+#define PLL_CON0_PLL_SHARED2 0x0200
+#define PLL_CON3_PLL_SHARED2 0x020c
+#define PLL_CON0_PLL_SHARED3 0x0240
+#define PLL_CON3_PLL_SHARED3 0x024c
+#define PLL_CON0_PLL_SHARED4 0x0280
+#define PLL_CON3_PLL_SHARED4 0x028c
+#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS 0x1038
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_BUS 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_DNS_BUS 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_DPU_ALT 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_BUS 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_HPM 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_IPP_BUS 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_ITP_BUS 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_NPU_BUS 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_SSP_BUS 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_TNR_BUS 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_VRA_BUS 0x10e8
+#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1804
+#define CLK_CON_DIV_CLKCMU_BUS0_BUS 0x1808
+#define CLK_CON_DIV_CLKCMU_BUS1_BUS 0x180c
+#define CLK_CON_DIV_CLKCMU_BUS1_SSS 0x1810
+#define CLK_CON_DIV_CLKCMU_CIS_CLK0 0x1814
+#define CLK_CON_DIV_CLKCMU_CIS_CLK1 0x1818
+#define CLK_CON_DIV_CLKCMU_CIS_CLK2 0x181c
+#define CLK_CON_DIV_CLKCMU_CIS_CLK3 0x1820
+#define CLK_CON_DIV_CLKCMU_CIS_CLK4 0x1824
+#define CLK_CON_DIV_CLKCMU_CIS_CLK5 0x1828
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST 0x182c
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1830
+#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1838
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c
+#define CLK_CON_DIV_CLKCMU_CPUCL2_BUSP 0x1840
+#define CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH 0x1844
+#define CLK_CON_DIV_CLKCMU_CSIS_BUS 0x1848
+#define CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU 0x184c
+#define CLK_CON_DIV_CLKCMU_DNC_BUS 0x1850
+#define CLK_CON_DIV_CLKCMU_DNC_BUSM 0x1854
+#define CLK_CON_DIV_CLKCMU_DNS_BUS 0x1858
+#define CLK_CON_DIV_CLKCMU_DSP_BUS 0x185c
+#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x1860
+#define CLK_CON_DIV_CLKCMU_G2D_MSCL 0x1864
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1868
+#define CLK_CON_DIV_CLKCMU_HPM 0x186c
+#define CLK_CON_DIV_CLKCMU_HSI0_BUS 0x1870
+#define CLK_CON_DIV_CLKCMU_HSI0_DPGTC 0x1874
+#define CLK_CON_DIV_CLKCMU_HSI0_USB31DRD 0x1878
+#define CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG 0x187c
+#define CLK_CON_DIV_CLKCMU_HSI1_BUS 0x1880
+#define CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD 0x1884
+#define CLK_CON_DIV_CLKCMU_HSI1_PCIE 0x1888
+#define CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD 0x188c
+#define CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD 0x1890
+#define CLK_CON_DIV_CLKCMU_HSI2_BUS 0x1894
+#define CLK_CON_DIV_CLKCMU_HSI2_PCIE 0x1898
+#define CLK_CON_DIV_CLKCMU_IPP_BUS 0x189c
+#define CLK_CON_DIV_CLKCMU_ITP_BUS 0x18a0
+#define CLK_CON_DIV_CLKCMU_MCSC_BUS 0x18a4
+#define CLK_CON_DIV_CLKCMU_MCSC_GDC 0x18a8
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU 0x18ac
+#define CLK_CON_DIV_CLKCMU_MFC0_MFC0 0x18b0
+#define CLK_CON_DIV_CLKCMU_MFC0_WFD 0x18b4
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x18b8
+#define CLK_CON_DIV_CLKCMU_NPU_BUS 0x18bc
+#define CLK_CON_DIV_CLKCMU_OTP 0x18c0
+#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x18c4
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x18c8
+#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18cc
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18d0
+#define CLK_CON_DIV_CLKCMU_PERIS_BUS 0x18d4
+#define CLK_CON_DIV_CLKCMU_SSP_BUS 0x18d8
+#define CLK_CON_DIV_CLKCMU_TNR_BUS 0x18dc
+#define CLK_CON_DIV_CLKCMU_VRA_BUS 0x18e0
+#define CLK_CON_DIV_DIV_CLKCMU_DPU 0x18e8
+#define CLK_CON_DIV_DIV_CLKCMU_DPU_ALT 0x18ec
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18f4
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18f8
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18fc
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1900
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x1904
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x1908
+#define CLK_CON_DIV_PLL_SHARED2_DIV2 0x190c
+#define CLK_CON_DIV_PLL_SHARED4_DIV2 0x1910
+#define CLK_CON_DIV_PLL_SHARED4_DIV3 0x1914
+#define CLK_CON_DIV_PLL_SHARED4_DIV4 0x1918
+#define CLK_CON_GAT_CLKCMU_G3D_BUS 0x2000
+#define CLK_CON_GAT_CLKCMU_MIF_SWITCH 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2008
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU 0x200c
+#define CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS 0x2010
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS 0x2014
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS 0x2018
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2040
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_BUS 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_DNS_BUS 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_DPU 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_DPU_BUS 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_BUS 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2074
+#define CLK_CON_GAT_GATE_CLKCMU_HPM 0x2078
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_IPP_BUS 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_ITP_BUS 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS 0x20b0
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC 0x20b4
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_NPU_BUS 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP 0x20d8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS 0x20dc
+#define CLK_CON_GAT_GATE_CLKCMU_SSP_BUS 0x20e0
+#define CLK_CON_GAT_GATE_CLKCMU_TNR_BUS 0x20e4
+#define CLK_CON_GAT_GATE_CLKCMU_VRA_BUS 0x20e8
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_G3D,
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON3_PLL_G3D,
+ PLL_CON3_PLL_MMC,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED4,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM,
+ CLK_CON_MUX_MUX_CLKCMU_DNS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DPU,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_ALT,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL,
+ CLK_CON_MUX_MUX_CLKCMU_HPM,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_IPP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_ITP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_NPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_SSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_TNR_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_VRA_BUS,
+ CLK_CON_DIV_CLKCMU_APM_BUS,
+ CLK_CON_DIV_CLKCMU_AUD_CPU,
+ CLK_CON_DIV_CLKCMU_BUS0_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_SSS,
+ CLK_CON_DIV_CLKCMU_CIS_CLK0,
+ CLK_CON_DIV_CLKCMU_CIS_CLK1,
+ CLK_CON_DIV_CLKCMU_CIS_CLK2,
+ CLK_CON_DIV_CLKCMU_CIS_CLK3,
+ CLK_CON_DIV_CLKCMU_CIS_CLK4,
+ CLK_CON_DIV_CLKCMU_CIS_CLK5,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_DIV_CLKCMU_CSIS_BUS,
+ CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_DIV_CLKCMU_DNC_BUS,
+ CLK_CON_DIV_CLKCMU_DNC_BUSM,
+ CLK_CON_DIV_CLKCMU_DNS_BUS,
+ CLK_CON_DIV_CLKCMU_DSP_BUS,
+ CLK_CON_DIV_CLKCMU_G2D_G2D,
+ CLK_CON_DIV_CLKCMU_G2D_MSCL,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_CLKCMU_HPM,
+ CLK_CON_DIV_CLKCMU_HSI0_BUS,
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC,
+ CLK_CON_DIV_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_DIV_CLKCMU_HSI1_BUS,
+ CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE,
+ CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_HSI2_BUS,
+ CLK_CON_DIV_CLKCMU_HSI2_PCIE,
+ CLK_CON_DIV_CLKCMU_IPP_BUS,
+ CLK_CON_DIV_CLKCMU_ITP_BUS,
+ CLK_CON_DIV_CLKCMU_MCSC_BUS,
+ CLK_CON_DIV_CLKCMU_MCSC_GDC,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0,
+ CLK_CON_DIV_CLKCMU_MFC0_WFD,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_NPU_BUS,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP,
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP,
+ CLK_CON_DIV_CLKCMU_PERIS_BUS,
+ CLK_CON_DIV_CLKCMU_SSP_BUS,
+ CLK_CON_DIV_CLKCMU_TNR_BUS,
+ CLK_CON_DIV_CLKCMU_VRA_BUS,
+ CLK_CON_DIV_DIV_CLKCMU_DPU,
+ CLK_CON_DIV_DIV_CLKCMU_DPU_ALT,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_DIV_PLL_SHARED2_DIV2,
+ CLK_CON_DIV_PLL_SHARED4_DIV2,
+ CLK_CON_DIV_PLL_SHARED4_DIV3,
+ CLK_CON_DIV_PLL_SHARED4_DIV4,
+ CLK_CON_GAT_CLKCMU_G3D_BUS,
+ CLK_CON_GAT_CLKCMU_MIF_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU,
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM,
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DPU,
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_HPM,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD,
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_SSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_VRA_BUS,
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ PLL(pll_0717x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+ PLL(pll_0717x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+ PLL(pll_0718x, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+ PLL(pll_0718x, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+ PLL(pll_0717x, CLK_FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ PLL(pll_0732x, CLK_FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+ PLL(pll_0718x, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
+ PLL_LOCKTIME_PLL_G3D, PLL_CON3_PLL_G3D, NULL),
+};
+
+/* Parent clock list for CMU_TOP muxes*/
+PNAME(mout_pll_shared0_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_pll_shared1_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_pll_shared2_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" };
+PNAME(mout_pll_mmc_p) = { "oscclk", "fout_mmc_pll" };
+PNAME(mout_pll_g3d_p) = { "oscclk", "fout_g3d_pll" };
+PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_aud_cpu_p) = { "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_bus0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_bus1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_bus1_sss_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_cis_clk0_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk1_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk2_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk3_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk4_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk5_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cmu_boost_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_core_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "fout_shared3_pll", "oscclk" };
+PNAME(mout_cmu_cpucl0_dbg_bus_p) = { "fout_shared2_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4",
+ "oscclk" };
+PNAME(mout_cmu_cpucl0_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_cpucl1_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_cpucl2_busp_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cpucl2_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_csis_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3" };
+PNAME(mout_cmu_csis_ois_mcu_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_dnc_bus_p) = { "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_dnc_busm_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div4" };
+PNAME(mout_cmu_dns_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_dpu_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_dpu_alt_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_dsp_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "fout_shared3_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_g2d_g2d_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_g2d_mscl_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div4",
+ "oscclk" };
+PNAME(mout_cmu_hpm_p) = { "oscclk",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi0_dpgtc_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_usb31drd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_usbdp_debug_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_hsi1_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "fout_mmc_pll", "oscclk", "oscclk" };
+PNAME(mout_cmu_hsi1_mmc_card_p) = { "oscclk", "fout_shared2_pll",
+ "fout_mmc_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_hsi1_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_hsi1_ufs_card_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi1_ufs_embd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi2_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi2_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_ipp_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_itp_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mcsc_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mcsc_gdc_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cmu_boost_cpu_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_mfc0_mfc0_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mfc0_wfd_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mif_busp_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_mif_switch_p) = { "fout_shared0_pll",
+ "fout_shared1_pll",
+ "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_npu_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "fout_shared3_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_peric0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric0_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peris_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_ssp_bus_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_tnr_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3" };
+
+/*
+ * Register name to clock name mangling strategy used in this file
+ *
+ * Replace PLL_CON{0,3}_PLL with CLK_MOUT_PLL and mout_pll
+ * Replace CLK_CON_MUX_MUX_CLKCMU with CLK_MOUT_CMU and mout_cmu
+ * Replace CLK_CON_DIV_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_DIV_DIV_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_DIV_PLL_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_GAT_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ * Replace CLK_CON_GAT_GATE_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ *
+ * For gates remove _UID _BLK _IPCLKPORT, _I and _RSTNSYNC
+ */
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
+ PLL_CON3_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
+ PLL_CON3_PLL_SHARED1, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
+ PLL_CON3_PLL_SHARED2, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
+ PLL_CON3_PLL_SHARED3, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
+ PLL_CON0_PLL_SHARED4, 4, 1),
+ MUX(CLK_MOUT_PLL_MMC, "mout_pll_mmc", mout_pll_mmc_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+ MUX(CLK_MOUT_PLL_G3D, "mout_pll_g3d", mout_pll_g3d_p,
+ PLL_CON0_PLL_G3D, 4, 1),
+ MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus",
+ mout_cmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_AUD_CPU, "mout_cmu_aud_cpu",
+ mout_cmu_aud_cpu_p, CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS0_BUS, "mout_cmu_bus0_bus",
+ mout_cmu_bus0_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS1_BUS, "mout_cmu_bus1_bus",
+ mout_cmu_bus1_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS1_SSS, "mout_cmu_bus1_sss",
+ mout_cmu_bus1_sss_p, CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS, 0, 2),
+ MUX(CLK_MOUT_CMU_CIS_CLK0, "mout_cmu_cis_clk0",
+ mout_cmu_cis_clk0_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK1, "mout_cmu_cis_clk1",
+ mout_cmu_cis_clk1_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK2, "mout_cmu_cis_clk2",
+ mout_cmu_cis_clk2_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK3, "mout_cmu_cis_clk3",
+ mout_cmu_cis_clk3_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK4, "mout_cmu_cis_clk4",
+ mout_cmu_cis_clk4_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK5, "mout_cmu_cis_clk5",
+ mout_cmu_cis_clk5_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5, 0, 1),
+ MUX(CLK_MOUT_CMU_CMU_BOOST, "mout_cmu_cmu_boost",
+ mout_cmu_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(CLK_MOUT_CMU_CORE_BUS, "mout_cmu_core_bus",
+ mout_cmu_core_bus_p, CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_DBG_BUS, "mout_cmu_cpucl0_dbg_bus",
+ mout_cmu_cpucl0_dbg_bus_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch",
+ mout_cmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch",
+ mout_cmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL2_BUSP, "mout_cmu_cpucl2_busp",
+ mout_cmu_cpucl2_busp_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_CPUCL2_SWITCH, "mout_cmu_cpucl2_switch",
+ mout_cmu_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CSIS_BUS, "mout_cmu_csis_bus",
+ mout_cmu_csis_bus_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CSIS_OIS_MCU, "mout_cmu_csis_ois_mcu",
+ mout_cmu_csis_ois_mcu_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_DNC_BUS, "mout_cmu_dnc_bus",
+ mout_cmu_dnc_bus_p, CLK_CON_MUX_MUX_CLKCMU_DNC_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_DNC_BUSM, "mout_cmu_dnc_busm",
+ mout_cmu_dnc_busm_p, CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM, 0, 2),
+ MUX(CLK_MOUT_CMU_DNS_BUS, "mout_cmu_dns_bus",
+ mout_cmu_dns_bus_p, CLK_CON_MUX_MUX_CLKCMU_DNS_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_DPU, "mout_cmu_dpu",
+ mout_cmu_dpu_p, CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 1),
+ MUX(CLK_MOUT_CMU_DPU_ALT, "mout_cmu_dpu_alt",
+ mout_cmu_dpu_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPU_ALT, 0, 2),
+ MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus",
+ mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d",
+ mout_cmu_g2d_g2d_p, CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_MSCL, "mout_cmu_g2d_mscl",
+ mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 1),
+ MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm",
+ mout_cmu_hpm_p, CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_BUS, "mout_cmu_hsi0_bus",
+ mout_cmu_hsi0_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI0_DPGTC, "mout_cmu_hsi0_dpgtc",
+ mout_cmu_hsi0_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_USB31DRD, "mout_cmu_hsi0_usb31drd",
+ mout_cmu_hsi0_usb31drd_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_USBDP_DEBUG, "mout_cmu_hsi0_usbdp_debug",
+ mout_cmu_hsi0_usbdp_debug_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_BUS, "mout_cmu_hsi1_bus",
+ mout_cmu_hsi1_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_HSI1_MMC_CARD, "mout_cmu_hsi1_mmc_card",
+ mout_cmu_hsi1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_PCIE, "mout_cmu_hsi1_pcie",
+ mout_cmu_hsi1_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI1_UFS_CARD, "mout_cmu_hsi1_ufs_card",
+ mout_cmu_hsi1_ufs_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_UFS_EMBD, "mout_cmu_hsi1_ufs_embd",
+ mout_cmu_hsi1_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_HSI2_BUS, "mout_cmu_hsi2_bus",
+ mout_cmu_hsi2_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI2_PCIE, "mout_cmu_hsi2_pcie",
+ mout_cmu_hsi2_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_IPP_BUS, "mout_cmu_ipp_bus",
+ mout_cmu_ipp_bus_p, CLK_CON_MUX_MUX_CLKCMU_IPP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_ITP_BUS, "mout_cmu_itp_bus",
+ mout_cmu_itp_bus_p, CLK_CON_MUX_MUX_CLKCMU_ITP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_BUS, "mout_cmu_mcsc_bus",
+ mout_cmu_mcsc_bus_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_GDC, "mout_cmu_mcsc_gdc",
+ mout_cmu_mcsc_gdc_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC, 0, 3),
+ MUX(CLK_MOUT_CMU_CMU_BOOST_CPU, "mout_cmu_cmu_boost_cpu",
+ mout_cmu_cmu_boost_cpu_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MFC0_MFC0, "mout_cmu_mfc0_mfc0",
+ mout_cmu_mfc0_mfc0_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0, 0, 2),
+ MUX(CLK_MOUT_CMU_MFC0_WFD, "mout_cmu_mfc0_wfd",
+ mout_cmu_mfc0_wfd_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_BUSP, "mout_cmu_mif_busp",
+ mout_cmu_mif_busp_p, CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_SWITCH, "mout_cmu_mif_switch",
+ mout_cmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_NPU_BUS, "mout_cmu_npu_bus",
+ mout_cmu_npu_bus_p, CLK_CON_MUX_MUX_CLKCMU_NPU_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_PERIC0_BUS, "mout_cmu_peric0_bus",
+ mout_cmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC0_IP, "mout_cmu_peric0_ip",
+ mout_cmu_peric0_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_BUS, "mout_cmu_peric1_bus",
+ mout_cmu_peric1_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_IP, "mout_cmu_peric1_ip",
+ mout_cmu_peric1_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIS_BUS, "mout_cmu_peris_bus",
+ mout_cmu_peris_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_SSP_BUS, "mout_cmu_ssp_bus",
+ mout_cmu_ssp_bus_p, CLK_CON_MUX_MUX_CLKCMU_SSP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_TNR_BUS, "mout_cmu_tnr_bus",
+ mout_cmu_tnr_bus_p, CLK_CON_MUX_MUX_CLKCMU_TNR_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_VRA_BUS, "mout_cmu_vra_bus",
+ mout_cmu_vra_bus_p, CLK_CON_MUX_MUX_CLKCMU_VRA_BUS, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* SHARED0 region*/
+ DIV(CLK_DOUT_CMU_SHARED0_DIV2, "dout_cmu_shared0_div2", "mout_pll_shared0",
+ CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV3, "dout_cmu_shared0_div3", "mout_pll_shared0",
+ CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV4, "dout_cmu_shared0_div4", "dout_cmu_shared0_div2",
+ CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+
+ /* SHARED1 region*/
+ DIV(CLK_DOUT_CMU_SHARED1_DIV2, "dout_cmu_shared1_div2", "mout_pll_shared1",
+ CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV3, "dout_cmu_shared1_div3", "mout_pll_shared1",
+ CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV4, "dout_cmu_shared1_div4", "dout_cmu_shared1_div2",
+ CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+
+ /* SHARED2 region */
+ DIV(CLK_DOUT_CMU_SHARED2_DIV2, "dout_cmu_shared2_div2", "mout_pll_shared2",
+ CLK_CON_DIV_PLL_SHARED2_DIV2, 0, 1),
+
+ /* SHARED4 region*/
+ DIV(CLK_DOUT_CMU_SHARED4_DIV2, "dout_cmu_shared4_div2", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED4_DIV3, "dout_cmu_shared4_div3", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED4_DIV4, "dout_cmu_shared4_div4", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1),
+
+ DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ DIV(CLK_DOUT_CMU_AUD_CPU, "dout_cmu_aud_cpu", "gout_cmu_aud_cpu",
+ CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
+ DIV(CLK_DOUT_CMU_BUS0_BUS, "dout_cmu_bus0_bus", "gout_cmu_bus0_bus",
+ CLK_CON_DIV_CLKCMU_BUS0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS1_BUS, "dout_cmu_bus1_bus", "gout_cmu_bus1_bus",
+ CLK_CON_DIV_CLKCMU_BUS1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS1_SSS, "dout_cmu_bus1_sss", "gout_cmu_bus1_sss",
+ CLK_CON_DIV_CLKCMU_BUS1_SSS, 0, 4),
+ DIV(CLK_DOUT_CMU_CIS_CLK0, "dout_cmu_cis_clk0", "gout_cmu_cis_clk0",
+ CLK_CON_DIV_CLKCMU_CIS_CLK0, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK1, "dout_cmu_cis_clk1", "gout_cmu_cis_clk1",
+ CLK_CON_DIV_CLKCMU_CIS_CLK1, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK2, "dout_cmu_cis_clk2", "gout_cmu_cis_clk2",
+ CLK_CON_DIV_CLKCMU_CIS_CLK2, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK3, "dout_cmu_cis_clk3", "gout_cmu_cis_clk3",
+ CLK_CON_DIV_CLKCMU_CIS_CLK3, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK4, "dout_cmu_cis_clk4", "gout_cmu_cis_clk4",
+ CLK_CON_DIV_CLKCMU_CIS_CLK4, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK5, "dout_cmu_cis_clk5", "gout_cmu_cis_clk5",
+ CLK_CON_DIV_CLKCMU_CIS_CLK5, 0, 5),
+ DIV(CLK_DOUT_CMU_CMU_BOOST, "dout_cmu_cmu_boost", "mout_cmu_cmu_boost",
+ CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 2),
+ DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_debug",
+ "gout_cmu_cpucl0_dbg_bus", CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
+ "gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
+ "gout_cmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL2_BUSP, "dout_cmu_cpucl2_busp",
+ "gout_cmu_cpucl2_busp", CLK_CON_DIV_CLKCMU_CPUCL2_BUSP, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL2_SWITCH, "dout_cmu_cpucl2_switch",
+ "gout_cmu_cpucl2_switch", CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CSIS_BUS, "dout_cmu_csis_bus", "gout_cmu_csis_bus",
+ CLK_CON_DIV_CLKCMU_CSIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_OIS_MCU, "dout_cmu_csis_ois_mcu",
+ "gout_cmu_csis_ois_mcu", CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU, 0, 4),
+ DIV(CLK_DOUT_CMU_DNC_BUS, "dout_cmu_dnc_bus", "gout_cmu_dnc_bus",
+ CLK_CON_DIV_CLKCMU_DNC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DNC_BUSM, "dout_cmu_dnc_busm", "gout_cmu_dnc_busm",
+ CLK_CON_DIV_CLKCMU_DNC_BUSM, 0, 4),
+ DIV(CLK_DOUT_CMU_DNS_BUS, "dout_cmu_dns_bus", "gout_cmu_dns_bus",
+ CLK_CON_DIV_CLKCMU_DNS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DSP_BUS, "dout_cmu_dsp_bus", "gout_cmu_dsp_bus",
+ CLK_CON_DIV_CLKCMU_DSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_G2D, "dout_cmu_g2d_g2d", "gout_cmu_g2d_g2d",
+ CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_MSCL, "dout_cmu_g2d_mscl", "gout_cmu_g2d_mscl",
+ CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_switch",
+ "gout_cmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_HPM, "dout_cmu_hpm", "gout_cmu_hpm",
+ CLK_CON_DIV_CLKCMU_HPM, 0, 2),
+ DIV(CLK_DOUT_CMU_HSI0_BUS, "dout_cmu_hsi0_bus", "gout_cmu_hsi0_bus",
+ CLK_CON_DIV_CLKCMU_HSI0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_DPGTC, "dout_cmu_hsi0_dpgtc", "gout_cmu_hsi0_dpgtc",
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI0_USB31DRD, "dout_cmu_hsi0_usb31drd",
+ "gout_cmu_hsi0_usb31drd", CLK_CON_DIV_CLKCMU_HSI0_USB31DRD, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
+ "gout_cmu_hsi0_usbdp_debug", CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_HSI1_BUS, "dout_cmu_hsi1_bus", "gout_cmu_hsi1_bus",
+ CLK_CON_DIV_CLKCMU_HSI1_BUS, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI1_MMC_CARD, "dout_cmu_hsi1_mmc_card",
+ "gout_cmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie", "gout_cmu_hsi1_pcie",
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 7),
+ DIV(CLK_DOUT_CMU_HSI1_UFS_CARD, "dout_cmu_hsi1_ufs_card",
+ "gout_cmu_hsi1_ufs_card", CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_HSI1_UFS_EMBD, "dout_cmu_hsi1_ufs_embd",
+ "gout_cmu_hsi1_ufs_embd", CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_HSI2_BUS, "dout_cmu_hsi2_bus", "gout_cmu_hsi2_bus",
+ CLK_CON_DIV_CLKCMU_HSI2_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie", "gout_cmu_hsi2_pcie",
+ CLK_CON_DIV_CLKCMU_HSI2_PCIE, 0, 7),
+ DIV(CLK_DOUT_CMU_IPP_BUS, "dout_cmu_ipp_bus", "gout_cmu_ipp_bus",
+ CLK_CON_DIV_CLKCMU_IPP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_ITP_BUS, "dout_cmu_itp_bus", "gout_cmu_itp_bus",
+ CLK_CON_DIV_CLKCMU_ITP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_BUS, "dout_cmu_mcsc_bus", "gout_cmu_mcsc_bus",
+ CLK_CON_DIV_CLKCMU_MCSC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_GDC, "dout_cmu_mcsc_gdc", "gout_cmu_mcsc_gdc",
+ CLK_CON_DIV_CLKCMU_MCSC_GDC, 0, 4),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_CPU, "dout_cmu_cmu_boost_cpu",
+ "mout_cmu_cmu_boost_cpu", CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ DIV(CLK_DOUT_CMU_MFC0_MFC0, "dout_cmu_mfc0_mfc0", "gout_cmu_mfc0_mfc0",
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC0_WFD, "dout_cmu_mfc0_wfd", "gout_cmu_mfc0_wfd",
+ CLK_CON_DIV_CLKCMU_MFC0_WFD, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_BUSP, "dout_cmu_mif_busp", "gout_cmu_mif_busp",
+ CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 4),
+ DIV(CLK_DOUT_CMU_NPU_BUS, "dout_cmu_npu_bus", "gout_cmu_npu_bus",
+ CLK_CON_DIV_CLKCMU_NPU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_BUS, "dout_cmu_peric0_bus", "gout_cmu_peric0_bus",
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_IP, "dout_cmu_peric0_ip", "gout_cmu_peric0_ip",
+ CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_BUS, "dout_cmu_peric1_bus", "gout_cmu_peric1_bus",
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_IP, "dout_cmu_peric1_ip", "gout_cmu_peric1_ip",
+ CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_BUS, "dout_cmu_peris_bus", "gout_cmu_peris_bus",
+ CLK_CON_DIV_CLKCMU_PERIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_SSP_BUS, "dout_cmu_ssp_bus", "gout_cmu_ssp_bus",
+ CLK_CON_DIV_CLKCMU_SSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_TNR_BUS, "dout_cmu_tnr_bus", "gout_cmu_tnr_bus",
+ CLK_CON_DIV_CLKCMU_TNR_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_VRA_BUS, "dout_cmu_vra_bus", "gout_cmu_vra_bus",
+ CLK_CON_DIV_CLKCMU_VRA_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU, "dout_cmu_clkcmu_dpu", "gout_cmu_dpu",
+ CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 4),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CMU_APM_BUS, "gout_cmu_apm_bus", "mout_cmu_apm_bus",
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_AUD_CPU, "gout_cmu_aud_cpu", "mout_cmu_aud_cpu",
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BUS0_BUS, "gout_cmu_bus0_bus", "mout_cmu_bus0_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_BUS, "gout_cmu_bus1_bus", "mout_cmu_bus1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_SSS, "gout_cmu_bus1_sss", "mout_cmu_bus1_sss",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK0, "gout_cmu_cis_clk0", "mout_cmu_cis_clk0",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK1, "gout_cmu_cis_clk1", "mout_cmu_cis_clk1",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK2, "gout_cmu_cis_clk2", "mout_cmu_cis_clk2",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK3, "gout_cmu_cis_clk3", "mout_cmu_cis_clk3",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK4, "gout_cmu_cis_clk4", "mout_cmu_cis_clk4",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK5, "gout_cmu_cis_clk5", "mout_cmu_cis_clk5",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CORE_BUS, "gout_cmu_core_bus", "mout_cmu_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_DBG_BUS, "gout_cmu_cpucl0_dbg_bus",
+ "mout_cmu_cpucl0_dbg_bus", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_SWITCH, "gout_cmu_cpucl0_switch",
+ "mout_cmu_cpucl0_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL1_SWITCH, "gout_cmu_cpucl1_switch",
+ "mout_cmu_cpucl1_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_BUSP, "gout_cmu_cpucl2_busp",
+ "mout_cmu_cpucl2_busp", CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_SWITCH, "gout_cmu_cpucl2_switch",
+ "mout_cmu_cpucl2_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CSIS_BUS, "gout_cmu_csis_bus", "mout_cmu_csis_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CSIS_OIS_MCU, "gout_cmu_csis_ois_mcu",
+ "mout_cmu_csis_ois_mcu", CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNC_BUS, "gout_cmu_dnc_bus", "mout_cmu_dnc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNC_BUSM, "gout_cmu_dnc_busm", "mout_cmu_dnc_busm",
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNS_BUS, "gout_cmu_dns_bus", "mout_cmu_dns_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU, "gout_cmu_dpu", "mout_cmu_dpu",
+ CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU_BUS, "gout_cmu_dpu_bus", "mout_cmu_dpu_alt",
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_DSP_BUS, "gout_cmu_dsp_bus", "mout_cmu_dsp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_G2D, "gout_cmu_g2d_g2d", "mout_cmu_g2d_g2d",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_MSCL, "gout_cmu_g2d_mscl", "mout_cmu_g2d_mscl",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3D_SWITCH, "gout_cmu_g3d_switch",
+ "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HPM, "gout_cmu_hpm", "mout_cmu_hpm",
+ CLK_CON_GAT_GATE_CLKCMU_HPM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_BUS, "gout_cmu_hsi0_bus",
+ "mout_cmu_hsi0_bus", CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_DPGTC, "gout_cmu_hsi0_dpgtc",
+ "mout_cmu_hsi0_dpgtc", CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USB31DRD, "gout_cmu_hsi0_usb31drd",
+ "mout_cmu_hsi0_usb31drd", CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USBDP_DEBUG, "gout_cmu_hsi0_usbdp_debug",
+ "mout_cmu_hsi0_usbdp_debug", CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_BUS, "gout_cmu_hsi1_bus", "mout_cmu_hsi1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_MMC_CARD, "gout_cmu_hsi1_mmc_card",
+ "mout_cmu_hsi1_mmc_card", CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_PCIE, "gout_cmu_hsi1_pcie",
+ "mout_cmu_hsi1_pcie", CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_UFS_CARD, "gout_cmu_hsi1_ufs_card",
+ "mout_cmu_hsi1_ufs_card", CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_UFS_EMBD, "gout_cmu_hsi1_ufs_embd",
+ "mout_cmu_hsi1_ufs_embd", CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_BUS, "gout_cmu_hsi2_bus", "mout_cmu_hsi2_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_PCIE, "gout_cmu_hsi2_pcie",
+ "mout_cmu_hsi2_pcie", CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_IPP_BUS, "gout_cmu_ipp_bus", "mout_cmu_ipp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ITP_BUS, "gout_cmu_itp_bus", "mout_cmu_itp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_BUS, "gout_cmu_mcsc_bus", "mout_cmu_mcsc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_GDC, "gout_cmu_mcsc_gdc", "mout_cmu_mcsc_gdc",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC0_MFC0, "gout_cmu_mfc0_mfc0",
+ "mout_cmu_mfc0_mfc0", CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC0_WFD, "gout_cmu_mfc0_wfd", "mout_cmu_mfc0_wfd",
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MIF_BUSP, "gout_cmu_mif_busp", "mout_cmu_mif_busp",
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_NPU_BUS, "gout_cmu_npu_bus", "mout_cmu_npu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_BUS, "gout_cmu_peric0_bus",
+ "mout_cmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_IP, "gout_cmu_peric0_ip",
+ "mout_cmu_peric0_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_BUS, "gout_cmu_peric1_bus",
+ "mout_cmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_IP, "gout_cmu_peric1_ip",
+ "mout_cmu_peric1_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIS_BUS, "gout_cmu_peris_bus",
+ "mout_cmu_peris_bus", CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_SSP_BUS, "gout_cmu_ssp_bus", "mout_cmu_ssp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_SSP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TNR_BUS, "gout_cmu_tnr_bus", "mout_cmu_tnr_bus",
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_VRA_BUS, "gout_cmu_vra_bus", "mout_cmu_vra_bus",
+ CLK_CON_GAT_GATE_CLKCMU_VRA_BUS, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos990_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos990_cmu_top, "samsung,exynos990-cmu-top",
+ exynos990_cmu_top_init);
+
+/* ---- CMU_HSI0 ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_HSI0 (0x10a00000) */
+#define PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER 0x0630
+#define PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER 0x0610
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK 0x2004
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2 0x2024
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL 0x202c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40 0x2034
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY 0x2030
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL 0x2038
+
+static const unsigned long hsi0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_hsi0_bus_user_p) = { "oscclk", "dout_cmu_hsi0_bus" };
+PNAME(mout_hsi0_usb31drd_user_p) = { "oscclk", "dout_cmu_hsi0_usb31drd" };
+PNAME(mout_hsi0_usbdp_debug_user_p) = { "oscclk",
+ "dout_cmu_hsi0_usbdp_debug" };
+PNAME(mout_hsi0_dpgtc_user_p) = { "oscclk", "dout_cmu_hsi0_dpgtc" };
+
+static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI0_BUS_USER, "mout_hsi0_bus_user",
+ mout_hsi0_bus_user_p, PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_USB31DRD_USER, "mout_hsi0_usb31drd_user",
+ mout_hsi0_usb31drd_user_p, PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_USBDP_DEBUG_USER, "mout_hsi0_usbdp_debug_user",
+ mout_hsi0_usbdp_debug_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_DPGTC_USER, "mout_hsi0_dpgtc_user",
+ mout_hsi0_dpgtc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock hsi0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_HSI0_DP_LINK_DP_GTC_CLK,
+ "gout_hsi0_dp_link_dp_gtc_clk", "mout_hsi0_dpgtc_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_DP_LINK_PCLK,
+ "gout_hsi0_dp_link_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK,
+ "gout_hsi0_d_tzpc_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_LHM_AXI_P_HSI0_CLK,
+ "gout_hsi0_lhm_axi_p_hsi0_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS1_ACLK,
+ "gout_hsi0_ppmu_hsi0_bus1_aclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS1_PCLK,
+ "gout_hsi0_ppmu_hsi0_bus1_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK,
+ "gout_hsi0_clk_hsi0_bus_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2,
+ "gout_hsi0_sysmmu_usb_clk_s2", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_SYSREG_HSI0_PCLK,
+ "gout_hsi0_sysreg_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL,
+ "gout_hsi0_usb31drd_aclk_phyctrl", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY,
+ "gout_hsi0_usb31drd_bus_clk_early",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USB31DRD_REF_CLK_40,
+ "gout_hsi0_usb31drd_usb31drd_ref_clk_40",
+ "mout_hsi0_usb31drd_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_REF_SOC_PLL,
+ "gout_hsi0_usb31drd_usbdpphy_ref_soc_pll",
+ "mout_hsi0_usbdp_debug_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_SCL_APB,
+ "gout_hsi0_usb31drd_ipclkport_i_usbdpphy_scl_apb_pclk",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBPCS_APB_CLK,
+ "gout_hsi0_usb31drd_usbpcs_apb_clk",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_VGEN_LITE_HSI0_CLK,
+ "gout_hsi0_vgen_lite_ipclkport_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CMU_HSI0_PCLK,
+ "gout_hsi0_cmu_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_XIU_D_HSI0_ACLK,
+ "gout_hsi0_xiu_d_hsi0_aclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
+ .mux_clks = hsi0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks),
+ .gate_clks = hsi0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(hsi0_gate_clks),
+ .nr_clk_ids = CLKS_NR_HSI0,
+ .clk_regs = hsi0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ----- platform_driver ----- */
+
+static int __init exynos990_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos990_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos990-cmu-hsi0",
+ .data = &hsi0_cmu_info,
+ },
+ { },
+};
+
+static struct platform_driver exynos990_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos990-cmu",
+ .of_match_table = exynos990_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos990_cmu_probe,
+};
+
+static int __init exynos990_cmu_init(void)
+{
+ return platform_driver_register(&exynos990_cmu_driver);
+}
+
+core_initcall(exynos990_cmu_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index be6b51694919..2e94bba6c396 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -430,7 +430,10 @@ static const struct clk_ops samsung_pll36xx_clk_min_ops = {
#define PLL0822X_LOCK_STAT_SHIFT (29)
#define PLL0822X_ENABLE_SHIFT (31)
-/* PLL1418x is similar to PLL0822x, except that MDIV is one bit smaller */
+/*
+ * PLL1418x, PLL0717x and PLL0718x are similar
+ * to PLL0822x, except that MDIV is one bit smaller
+ */
#define PLL1418X_MDIV_MASK (0x1FF)
static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
@@ -441,10 +444,14 @@ static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
u64 fvco = parent_rate;
pll_con3 = readl_relaxed(pll->con_reg);
- if (pll->type != pll_1418x)
+
+ if (pll->type != pll_1418x &&
+ pll->type != pll_0717x &&
+ pll->type != pll_0718x)
mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
else
mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL1418X_MDIV_MASK;
+
pdiv = (pll_con3 >> PLL0822X_PDIV_SHIFT) & PLL0822X_PDIV_MASK;
sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK;
@@ -1377,6 +1384,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_0516x:
case pll_0517x:
case pll_0518x:
+ case pll_0717x:
+ case pll_0718x:
+ case pll_0732x:
pll->enable_offs = PLL0822X_ENABLE_SHIFT;
pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
if (!pll->rate_table)
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 858ab367eb65..6ddc54d173a0 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -45,6 +45,9 @@ enum samsung_pll_type {
pll_531x,
pll_1051x,
pll_1052x,
+ pll_0717x,
+ pll_0718x,
+ pll_0732x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index b028f25c658a..62eed964c3d0 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -35,7 +35,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, reg;
+ u32 divf, divq, reg;
unsigned long long vco_freq;
/* read VCO1 reg for numerator and denominator */
diff --git a/drivers/clk/starfive/clk-starfive-jh7100-audio.c b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
index 1fcf4e62f347..7de23f6749aa 100644
--- a/drivers/clk/starfive/clk-starfive-jh7100-audio.c
+++ b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
@@ -84,17 +84,6 @@ static const struct jh71x0_clk_data jh7100_audclk_data[] = {
JH7100_AUDCLK_AUDIO_12288),
};
-static struct clk_hw *jh7100_audclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7100_AUDCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7100_audclk_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -106,6 +95,7 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7100_AUDCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -146,7 +136,7 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
return ret;
}
- return devm_of_clk_add_hw_provider(priv->dev, jh7100_audclk_get, priv);
+ return devm_of_clk_add_hw_provider(priv->dev, jh71x0_clk_get, priv);
}
static const struct of_device_id jh7100_audclk_match[] = {
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-aon.c b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
index 418efdad719b..6f67587f4335 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-aon.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
@@ -54,17 +54,6 @@ static const struct jh71x0_clk_data jh7110_aonclk_data[] = {
JH71X0_GATE(JH7110_AONCLK_RTC_CAL, "rtc_cal", 0, JH7110_AONCLK_OSC),
};
-static struct clk_hw *jh7110_aonclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_AONCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7110_aoncrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -78,6 +67,7 @@ static int jh7110_aoncrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_AONCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -127,7 +117,7 @@ static int jh7110_aoncrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_aonclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-isp.c b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
index 8c4c3a958a9f..f3fa069db193 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-isp.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
@@ -75,17 +75,6 @@ static inline int jh7110_isp_top_rst_init(struct jh71x0_clk_priv *priv)
return reset_control_deassert(top_rsts);
}
-static struct clk_hw *jh7110_ispclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_ISPCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
#ifdef CONFIG_PM
static int jh7110_ispcrg_suspend(struct device *dev)
{
@@ -126,6 +115,7 @@ static int jh7110_ispcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_ISPCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -186,7 +176,7 @@ static int jh7110_ispcrg_probe(struct platform_device *pdev)
goto err_exit;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_ispclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
goto err_exit;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-stg.c b/drivers/clk/starfive/clk-starfive-jh7110-stg.c
index dafcb7190592..2a5ad0e07d1d 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-stg.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-stg.c
@@ -75,17 +75,6 @@ static const struct jh71x0_clk_data jh7110_stgclk_data[] = {
JH71X0_GATE(JH7110_STGCLK_DMA1P_AHB, "dma1p_ahb", 0, JH7110_STGCLK_STG_AXIAHB),
};
-static struct clk_hw *jh7110_stgclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_STGCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7110_stgcrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -98,6 +87,7 @@ static int jh7110_stgcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_STGCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -145,7 +135,7 @@ static int jh7110_stgcrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_stgclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
index 17325f17696f..e9d8168d02b8 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -323,17 +323,6 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH7110_SYSCLK_OSC),
};
-static struct clk_hw *jh7110_sysclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_SYSCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static void jh7110_reset_unregister_adev(void *_adev)
{
struct auxiliary_device *adev = _adev;
@@ -425,6 +414,7 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_SYSCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -526,7 +516,7 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_sysclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
index 04eeed199087..bad20d5d794a 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
@@ -80,17 +80,6 @@ static int jh7110_vout_top_rst_init(struct jh71x0_clk_priv *priv)
return reset_control_deassert(top_rst);
}
-static struct clk_hw *jh7110_voutclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_VOUTCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
#ifdef CONFIG_PM
static int jh7110_voutcrg_suspend(struct device *dev)
{
@@ -131,6 +120,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_VOUTCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -193,7 +183,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
goto err_exit;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_voutclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
goto err_exit;
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.c b/drivers/clk/starfive/clk-starfive-jh71x0.c
index aebc99264a0b..80e9157347eb 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.c
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.c
@@ -325,3 +325,15 @@ const struct clk_ops *starfive_jh71x0_clk_ops(u32 max)
return &jh71x0_clk_inv_ops;
}
EXPORT_SYMBOL_GPL(starfive_jh71x0_clk_ops);
+
+struct clk_hw *jh71x0_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct jh71x0_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < priv->num_reg)
+ return &priv->reg[idx].hw;
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(jh71x0_clk_get);
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h
index e3f441393e48..9d5dec1d5cd1 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.h
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.h
@@ -117,9 +117,11 @@ struct jh71x0_clk_priv {
struct clk *original_clk;
struct notifier_block pll_clk_nb;
struct clk_hw *pll[3];
- struct jh71x0_clk reg[];
+ unsigned int num_reg;
+ struct jh71x0_clk reg[] __counted_by(num_reg);
};
const struct clk_ops *starfive_jh71x0_clk_ops(u32 max);
+struct clk_hw *jh71x0_clk_get(struct of_phandle_args *clkspec, void *data);
#endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
index 7133377d4163..1f81c7ac41af 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -436,7 +436,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
0, 4, /* M */
@@ -444,7 +444,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
0, 4, /* M */
@@ -452,7 +452,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 3a7d61c81667..ba1ad267f123 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -535,11 +535,11 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
CLK_SET_RATE_PARENT);
/*
- * DSI output seems to work only when PLL_MIPI selected. Set it and prevent
- * the mux from reparenting.
+ * Experiments showed that RGB output requires pll-video0-2x, while DSI
+ * requires pll-mipi. It will not work with incorrect clock, the screen will
+ * be blank.
+ * sun50i-a64.dtsi assigns pll-mipi as TCON0 parent by default
*/
-#define SUN50I_A64_TCON0_CLK_REG 0x118
-
static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
static const u8 tcon0_table[] = { 0, 2, };
static SUNXI_CCU_MUX_TABLE_WITH_GATE_CLOSEST(tcon0_clk, "tcon0", tcon0_parents,
@@ -959,11 +959,6 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
- /* Set PLL MIPI as parent for TCON0 */
- val = readl(reg + SUN50I_A64_TCON0_CLK_REG);
- val &= ~GENMASK(26, 24);
- writel(val | (0 << 24), reg + SUN50I_A64_TCON0_CLK_REG);
-
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc);
if (ret)
return ret;
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index a8c11c0b4e06..dfba88a5ad0f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -21,7 +21,6 @@
/* PLL_VIDEO0 exported for HDMI PHY */
-#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10
@@ -32,7 +31,6 @@
#define CLK_PLL_PERIPH1_2X 14
#define CLK_PLL_VIDEO1 15
#define CLK_PLL_GPU 16
-#define CLK_PLL_MIPI 17
#define CLK_PLL_HSIC 18
#define CLK_PLL_DE 19
#define CLK_PLL_DDR1 20
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 1086669b91da..190816c35da9 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -1107,11 +1107,24 @@ static const u32 usb2_clk_regs[] = {
SUN50I_H616_USB3_CLK_REG,
};
+static struct ccu_mux_nb sun50i_h616_cpu_nb = {
+ .common = &cpux_clk.common,
+ .cm = &cpux_clk.mux,
+ .delay_us = 1, /* manual doesn't really say */
+ .bypass_index = 4, /* PLL_PERI0@600MHz, as recommended by manual */
+};
+
+static struct ccu_pll_nb sun50i_h616_pll_cpu_nb = {
+ .common = &pll_cpux_clk.common,
+ .enable = BIT(29), /* LOCK_ENABLE */
+ .lock = BIT(28),
+};
+
static int sun50i_h616_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
u32 val;
- int i;
+ int ret, i;
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
@@ -1166,7 +1179,18 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
val |= BIT(24);
writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
- return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Reparent CPU during CPU PLL rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun50i_h616_cpu_nb);
+
+ /* Re-lock the CPU PLL after any rate changes */
+ ccu_pll_notifier_register(&sun50i_h616_pll_cpu_nb);
+
+ return 0;
}
static const struct of_device_id sun50i_h616_ccu_ids[] = {
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 1015fab95251..4c9555fc6184 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -657,7 +657,7 @@ static struct ccu_div apb_pclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("apb-pclk",
apb_parents,
&ccu_div_ops,
- 0),
+ CLK_IGNORE_UNUSED),
},
};
@@ -794,13 +794,13 @@ static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_
0x134, BIT(7), 0);
static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd, 0x138, BIT(8), 0);
static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
- 0x140, BIT(9), 0);
+ 0x140, BIT(9), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
0x150, BIT(9), 0);
static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(10), 0);
+ 0x150, BIT(10), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(11), 0);
+ 0x150, BIT(11), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", perisys_ahb_hclk_pd,
0x150, BIT(12), 0);
static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
@@ -896,7 +896,6 @@ static struct ccu_common *th1520_div_clks[] = {
&vo_axi_clk.common,
&vp_apb_clk.common,
&vp_axi_clk.common,
- &cpu2vp_clk.common,
&venc_clk.common,
&dpu0_clk.common,
&dpu1_clk.common,
@@ -916,6 +915,7 @@ static struct ccu_common *th1520_gate_clks[] = {
&bmu_clk.common,
&cpu2aon_x2h_clk.common,
&cpu2peri_x2h_clk.common,
+ &cpu2vp_clk.common,
&perisys_apb1_hclk.common,
&perisys_apb2_hclk.common,
&perisys_apb3_hclk.common,
@@ -1048,7 +1048,8 @@ static int th1520_clk_probe(struct platform_device *pdev)
hw = devm_clk_hw_register_gate_parent_data(dev,
cg->common.hw.init->name,
cg->common.hw.init->parent_data,
- 0, base + cg->common.cfg0,
+ cg->common.hw.init->flags,
+ base + cg->common.cfg0,
ffs(cg->enable) - 1, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 216d85d6aac6..f684fc306ecc 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -180,7 +180,7 @@ static void of_mux_clk_setup(struct device_node *node)
pr_err("mux-clock %pOFn must have parents\n", node);
return;
}
- parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
if (!parent_names)
goto cleanup;
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index b2233d3ff9a9..bbf7714480e7 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -52,6 +52,8 @@
#define WZRD_CLKFBOUT_MULT_SHIFT 8
#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_CLKFBOUT_MULT_FRAC_MASK GENMASK(25, 16)
+#define WZRD_CLKFBOUT_O_MASK GENMASK(7, 0)
#define WZRD_CLKFBOUT_L_SHIFT 0
#define WZRD_CLKFBOUT_H_SHIFT 8
#define WZRD_CLKFBOUT_L_MASK GENMASK(7, 0)
@@ -87,14 +89,14 @@
#define DIV_O 0x01
#define DIV_ALL 0x03
-#define WZRD_M_MIN 2
-#define WZRD_M_MAX 128
-#define WZRD_D_MIN 1
-#define WZRD_D_MAX 106
-#define WZRD_VCO_MIN 800000000
-#define WZRD_VCO_MAX 1600000000
-#define WZRD_O_MIN 1
-#define WZRD_O_MAX 128
+#define WZRD_M_MIN 2ULL
+#define WZRD_M_MAX 128ULL
+#define WZRD_D_MIN 1ULL
+#define WZRD_D_MAX 106ULL
+#define WZRD_VCO_MIN 800000000ULL
+#define WZRD_VCO_MAX 1600000000ULL
+#define WZRD_O_MIN 2ULL
+#define WZRD_O_MAX 128ULL
#define VER_WZRD_M_MIN 4
#define VER_WZRD_M_MAX 432
#define VER_WZRD_D_MIN 1
@@ -153,8 +155,10 @@ struct clk_wzrd {
* @flags: clk_wzrd divider flags
* @table: array of value/divider pairs, last entry should have div = 0
* @m: value of the multiplier
+ * @m_frac: fractional value of the multiplier
* @d: value of the common divider
* @o: value of the leaf divider
+ * @o_frac: value of the fractional leaf divider
* @lock: register lock
*/
struct clk_wzrd_divider {
@@ -166,8 +170,10 @@ struct clk_wzrd_divider {
u8 flags;
const struct clk_div_table *table;
u32 m;
+ u32 m_frac;
u32 d;
u32 o;
+ u32 o_frac;
spinlock_t *lock; /* divider lock */
};
@@ -372,38 +378,40 @@ static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u64 vco_freq, freq, diff, vcomin, vcomax;
- u32 m, d, o;
- u32 mmin, mmax, dmin, dmax, omin, omax;
+ u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
+ u64 m, d, o;
+ u64 mmin, mmax, dmin, dmax, omin, omax, mdmin, mdmax;
- mmin = WZRD_M_MIN;
- mmax = WZRD_M_MAX;
+ mmin = WZRD_M_MIN << 3;
+ mmax = WZRD_M_MAX << 3;
dmin = WZRD_D_MIN;
dmax = WZRD_D_MAX;
- omin = WZRD_O_MIN;
- omax = WZRD_O_MAX;
- vcomin = WZRD_VCO_MIN;
- vcomax = WZRD_VCO_MAX;
+ omin = WZRD_O_MIN << 3;
+ omax = WZRD_O_MAX << 3;
+ vcomin = WZRD_VCO_MIN << 3;
+ vcomax = WZRD_VCO_MAX << 3;
for (m = mmin; m <= mmax; m++) {
- for (d = dmin; d <= dmax; d++) {
- vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
- if (vco_freq >= vcomin && vco_freq <= vcomax) {
- for (o = omin; o <= omax; o++) {
- freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
- diff = abs(freq - rate);
-
- if (diff < WZRD_MIN_ERR) {
- divider->m = m;
- divider->d = d;
- divider->o = o;
- return 0;
- }
- }
+ mdmin = max(dmin, div64_u64(parent_rate * m + vcomax / 2, vcomax));
+ mdmax = min(dmax, div64_u64(parent_rate * m + vcomin / 2, vcomin));
+ for (d = mdmin; d <= mdmax; d++) {
+ vco_freq = DIV_ROUND_CLOSEST_ULL((parent_rate * m), d);
+ o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
+ if (o < omin || o > omax)
+ continue;
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = freq - rate;
+ if (diff < best_diff) {
+ best_diff = diff;
+ divider->m = m >> 3;
+ divider->m_frac = (m - (divider->m << 3)) * 125;
+ divider->d = d;
+ divider->o = o >> 3;
+ divider->o_frac = (o - (divider->o << 3)) * 125;
}
}
}
- return -EBUSY;
+ return best_diff < WZRD_MIN_ERR ? 0 : -EBUSY;
}
static int clk_wzrd_reconfig(struct clk_wzrd_divider *divider, void __iomem *div_addr)
@@ -496,33 +504,22 @@ static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- unsigned long vco_freq, rate_div, clockout0_div;
void __iomem *div_addr;
- u32 reg, pre, f;
+ u32 reg;
int err;
err = clk_wzrd_get_divisors(hw, rate, parent_rate);
if (err)
return err;
- vco_freq = DIV_ROUND_CLOSEST(parent_rate * divider->m, divider->d);
- rate_div = DIV_ROUND_CLOSEST_ULL((vco_freq * WZRD_FRAC_POINTS), rate);
-
- clockout0_div = div_u64(rate_div, WZRD_FRAC_POINTS);
-
- pre = DIV_ROUND_CLOSEST_ULL(vco_freq * WZRD_FRAC_POINTS, rate);
- f = (pre - (clockout0_div * WZRD_FRAC_POINTS));
- f &= WZRD_CLKOUT_FRAC_MASK;
-
- reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, clockout0_div) |
- FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, f);
+ reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, divider->o) |
+ FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, divider->o_frac);
writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 2));
- /* Set divisor and clear phase offset */
reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
+ FIELD_PREP(WZRD_CLKFBOUT_MULT_FRAC_MASK, divider->m_frac) |
FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 0));
- writel(divider->o, divider->base + WZRD_CLK_CFG_REG(0, 2));
writel(0, divider->base + WZRD_CLK_CFG_REG(0, 3));
div_addr = divider->base + WZRD_DR_INIT_REG_OFFSET;
return clk_wzrd_reconfig(divider, div_addr);
@@ -564,18 +561,19 @@ static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u32 m, d, o, div, reg, f;
+ u32 m, d, o, reg, f, mf;
+ u64 mul;
reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 0));
d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
+ mf = FIELD_GET(WZRD_CLKFBOUT_MULT_FRAC_MASK, reg);
reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 2));
o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
- div = DIV_ROUND_CLOSEST(d * (WZRD_FRAC_POINTS * o + f), WZRD_FRAC_POINTS);
- return divider_recalc_rate(hw, parent_rate * m, div, divider->table,
- divider->flags, divider->width);
+ mul = m * 1000 + mf;
+ return DIV_ROUND_CLOSEST_ULL(parent_rate * mul, d * (o * 1000 + f));
}
static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
@@ -648,6 +646,25 @@ static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ u32 m, d, o;
+ int err;
+
+ err = clk_wzrd_get_divisors(hw, rate, *prate);
+ if (err)
+ return err;
+
+ m = divider->m;
+ d = divider->d;
+ o = divider->o;
+
+ rate = div_u64(*prate * (m * 1000 + divider->m_frac), d * (o * 1000 + divider->o_frac));
+ return rate;
+}
+
+static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
unsigned long int_freq;
u32 m, d, o, div, f;
int err;
@@ -678,7 +695,7 @@ static const struct clk_ops clk_wzrd_ver_divider_ops = {
};
static const struct clk_ops clk_wzrd_ver_div_all_ops = {
- .round_rate = clk_wzrd_round_rate_all,
+ .round_rate = clk_wzrd_ver_round_rate_all,
.set_rate = clk_wzrd_dynamic_all_ver,
.recalc_rate = clk_wzrd_recalc_rate_all_ver,
};
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 92a83a9bb2e1..d64b07ec48e5 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -232,7 +232,7 @@ config CPUFREQ_VIRT
If in doubt, say N.
config CPUFREQ_DT_PLATDEV
- tristate "Generic DT based cpufreq platdev driver"
+ bool "Generic DT based cpufreq platdev driver"
depends on OF
help
This adds a generic DT based cpufreq platdev driver for frequency
@@ -325,8 +325,6 @@ config QORIQ_CPUFREQ
This adds the CPUFreq driver support for Freescale QorIQ SoCs
which are capable of changing the CPU's frequency dynamically.
-endif
-
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
depends on ACPI_PROCESSOR
@@ -355,4 +353,6 @@ config ACPI_CPPC_CPUFREQ_FIE
If in doubt, say N.
+endif
+
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5f7e13e60c80..704e84d00639 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -15,6 +15,14 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
To compile this driver as a module, choose M here: the
module will be called sun50i-cpufreq-nvmem.
+config ARM_AIROHA_SOC_CPUFREQ
+ tristate "Airoha EN7581 SoC CPUFreq support"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ select PM_OPP
+ default ARCH_AIROHA
+ help
+ This adds the CPUFreq driver for Airoha EN7581 SoCs.
+
config ARM_APPLE_SOC_CPUFREQ
tristate "Apple Silicon SoC CPUFreq support"
depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index d35a28dd9463..890fff99f37d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_AIROHA_SOC_CPUFREQ) += airoha-cpufreq.o
obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index c9ebacf5c88e..302df42d6887 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -623,7 +623,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
-static u64 get_max_boost_ratio(unsigned int cpu)
+/*
+ * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
+ * between the highest_perf and the nominal_perf.
+ *
+ * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
+ * frequency via @nominal_freq if it is non-NULL pointer.
+ */
+static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
@@ -652,6 +659,9 @@ static u64 get_max_boost_ratio(unsigned int cpu)
nominal_perf = perf_caps.nominal_perf;
+ if (nominal_freq)
+ *nominal_freq = perf_caps.nominal_freq;
+
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
return 0;
@@ -664,8 +674,12 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
+
#else
-static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
+{
+ return 0;
+}
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -675,9 +689,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct acpi_cpufreq_data *data;
unsigned int cpu = policy->cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u64 max_boost_ratio, nominal_freq = 0;
unsigned int valid_states = 0;
unsigned int result = 0;
- u64 max_boost_ratio;
unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
@@ -827,16 +841,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
- max_boost_ratio = get_max_boost_ratio(cpu);
+ max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
if (max_boost_ratio) {
- unsigned int freq = freq_table[0].frequency;
+ unsigned int freq = nominal_freq;
/*
- * Because the loop above sorts the freq_table entries in the
- * descending order, freq is the maximum frequency in the table.
- * Assume that it corresponds to the CPPC nominal frequency and
- * use it to set cpuinfo.max_freq.
+ * The loop above sorts the freq_table entries in the
+ * descending order. If ACPI CPPC has not advertised
+ * the nominal frequency (this is possible in CPPC
+ * revisions prior to 3), then use the first entry in
+ * the pstate table as a proxy for nominal frequency.
*/
+ if (!freq)
+ freq = freq_table[0].frequency;
+
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
} else {
/*
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
new file mode 100644
index 000000000000..4fe39eadd163
--- /dev/null
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "cpufreq-dt.h"
+
+struct airoha_cpufreq_priv {
+ int opp_token;
+ struct dev_pm_domain_list *pd_list;
+ struct platform_device *cpufreq_dt;
+};
+
+static struct platform_device *cpufreq_pdev;
+
+/* NOP function to disable OPP from setting clock */
+static int airoha_cpufreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp,
+ void *data, bool scaling_down)
+{
+ return 0;
+}
+
+static const char * const airoha_cpufreq_clk_names[] = { "cpu", NULL };
+static const char * const airoha_cpufreq_pd_names[] = { "perf" };
+
+static int airoha_cpufreq_probe(struct platform_device *pdev)
+{
+ const struct dev_pm_domain_attach_data attach_data = {
+ .pd_names = airoha_cpufreq_pd_names,
+ .num_pd_names = ARRAY_SIZE(airoha_cpufreq_pd_names),
+ .pd_flags = PD_FLAG_DEV_LINK_ON | PD_FLAG_REQUIRED_OPP,
+ };
+ struct dev_pm_opp_config config = {
+ .clk_names = airoha_cpufreq_clk_names,
+ .config_clks = airoha_cpufreq_config_clks_nop,
+ };
+ struct platform_device *cpufreq_dt;
+ struct airoha_cpufreq_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev;
+ int ret;
+
+ /* CPUs refer to the same OPP table */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Set OPP table conf with NOP config_clks */
+ priv->opp_token = dev_pm_opp_set_config(cpu_dev, &config);
+ if (priv->opp_token < 0)
+ return dev_err_probe(dev, priv->opp_token, "Failed to set OPP config\n");
+
+ /* Attach PM for OPP */
+ ret = dev_pm_domain_attach_list(cpu_dev, &attach_data,
+ &priv->pd_list);
+ if (ret)
+ goto clear_opp_config;
+
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (ret) {
+ dev_err(dev, "failed to create cpufreq-dt device: %d\n", ret);
+ goto detach_pm;
+ }
+
+ priv->cpufreq_dt = cpufreq_dt;
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+detach_pm:
+ dev_pm_domain_detach_list(priv->pd_list);
+clear_opp_config:
+ dev_pm_opp_clear_config(priv->opp_token);
+
+ return ret;
+}
+
+static void airoha_cpufreq_remove(struct platform_device *pdev)
+{
+ struct airoha_cpufreq_priv *priv = platform_get_drvdata(pdev);
+
+ platform_device_unregister(priv->cpufreq_dt);
+
+ dev_pm_domain_detach_list(priv->pd_list);
+
+ dev_pm_opp_clear_config(priv->opp_token);
+}
+
+static struct platform_driver airoha_cpufreq_driver = {
+ .probe = airoha_cpufreq_probe,
+ .remove = airoha_cpufreq_remove,
+ .driver = {
+ .name = "airoha-cpufreq",
+ },
+};
+
+static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,en7581" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_cpufreq_match_list);
+
+static int __init airoha_cpufreq_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(airoha_cpufreq_match_list, np);
+ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&airoha_cpufreq_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ cpufreq_pdev = platform_device_register_data(NULL, "airoha-cpufreq",
+ -1, match, sizeof(*match));
+ ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
+ if (ret)
+ platform_driver_unregister(&airoha_cpufreq_driver);
+
+ return ret;
+}
+module_init(airoha_cpufreq_init);
+
+static void __exit airoha_cpufreq_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&airoha_cpufreq_driver);
+}
+module_exit(airoha_cpufreq_exit);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Airoha SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 35f38ae67fb1..8d692415d905 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -32,7 +32,6 @@ TRACE_EVENT(amd_pstate_perf,
u64 aperf,
u64 tsc,
unsigned int cpu_id,
- bool changed,
bool fast_switch
),
@@ -44,7 +43,6 @@ TRACE_EVENT(amd_pstate_perf,
aperf,
tsc,
cpu_id,
- changed,
fast_switch
),
@@ -57,7 +55,6 @@ TRACE_EVENT(amd_pstate_perf,
__field(unsigned long long, aperf)
__field(unsigned long long, tsc)
__field(unsigned int, cpu_id)
- __field(bool, changed)
__field(bool, fast_switch)
),
@@ -70,11 +67,10 @@ TRACE_EVENT(amd_pstate_perf,
__entry->aperf = aperf;
__entry->tsc = tsc;
__entry->cpu_id = cpu_id;
- __entry->changed = changed;
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
+ TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
(unsigned long)__entry->min_perf,
(unsigned long)__entry->target_perf,
(unsigned long)__entry->capacity,
@@ -83,11 +79,55 @@ TRACE_EVENT(amd_pstate_perf,
(unsigned long long)__entry->aperf,
(unsigned long long)__entry->tsc,
(unsigned int)__entry->cpu_id,
- (__entry->changed) ? "true" : "false",
(__entry->fast_switch) ? "true" : "false"
)
);
+TRACE_EVENT(amd_pstate_epp_perf,
+
+ TP_PROTO(unsigned int cpu_id,
+ unsigned int highest_perf,
+ unsigned int epp,
+ unsigned int min_perf,
+ unsigned int max_perf,
+ bool boost
+ ),
+
+ TP_ARGS(cpu_id,
+ highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ boost),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu_id)
+ __field(unsigned int, highest_perf)
+ __field(unsigned int, epp)
+ __field(unsigned int, min_perf)
+ __field(unsigned int, max_perf)
+ __field(bool, boost)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->highest_perf = highest_perf;
+ __entry->epp = epp;
+ __entry->min_perf = min_perf;
+ __entry->max_perf = max_perf;
+ __entry->boost = boost;
+ ),
+
+ TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+ (unsigned int)__entry->cpu_id,
+ (unsigned int)__entry->min_perf,
+ (unsigned int)__entry->max_perf,
+ (unsigned int)__entry->highest_perf,
+ (unsigned int)__entry->epp,
+ (bool)__entry->boost
+ )
+);
+
#endif /* _AMD_PSTATE_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index a261d7300951..3a0a380c3590 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -207,7 +207,6 @@ static void amd_pstate_ut_check_freq(u32 index)
int cpu = 0;
struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata = NULL;
- u32 nominal_freq_khz;
for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
@@ -215,14 +214,13 @@ static void amd_pstate_ut_check_freq(u32 index)
break;
cpudata = policy->driver_data;
- nominal_freq_khz = cpudata->nominal_freq*1000;
- if (!((cpudata->max_freq >= nominal_freq_khz) &&
- (nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
+ if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
(cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
(cpudata->min_freq > 0))) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, nominal_freq_khz,
+ __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
goto skip_test;
}
@@ -236,13 +234,13 @@ static void amd_pstate_ut_check_freq(u32 index)
if (cpudata->boost_supported) {
if ((policy->max == cpudata->max_freq) ||
- (policy->max == nominal_freq_khz))
+ (policy->max == cpudata->nominal_freq))
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
else {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
__func__, cpu, policy->max, cpudata->max_freq,
- nominal_freq_khz);
+ cpudata->nominal_freq);
goto skip_test;
}
} else {
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 66e5dfc711c0..dd9b8d6993d6 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -88,6 +89,11 @@ static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
static struct quirk_entry *quirks;
+#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
+#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
+#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
+#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
+
/*
* AMD Energy Preference Performance (EPP)
* The EPP is used in the CCLK DPM controller to drive
@@ -180,120 +186,145 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+static s16 msr_get_epp(struct amd_cpudata *cpudata)
{
- u64 epp;
+ u64 value;
int ret;
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- if (!cppc_req_cached) {
- epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- &cppc_req_cached);
- if (epp)
- return epp;
- }
- epp = (cppc_req_cached >> 24) & 0xFF;
- } else {
- ret = cppc_get_epp_perf(cpudata->cpu, &epp);
- if (ret < 0) {
- pr_debug("Could not retrieve energy perf value (%d)\n", ret);
- return -EIO;
- }
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return (s16)(epp & 0xff);
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value);
}
-static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
+DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
+
+static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
{
- s16 epp;
- int index = -EINVAL;
+ return static_call(amd_pstate_get_epp)(cpudata);
+}
- epp = amd_pstate_get_epp(cpudata, 0);
- if (epp < 0)
- return epp;
+static s16 shmem_get_epp(struct amd_cpudata *cpudata)
+{
+ u64 epp;
+ int ret;
- switch (epp) {
- case AMD_CPPC_EPP_PERFORMANCE:
- index = EPP_INDEX_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
- index = EPP_INDEX_BALANCE_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_POWERSAVE:
- index = EPP_INDEX_BALANCE_POWERSAVE;
- break;
- case AMD_CPPC_EPP_POWERSAVE:
- index = EPP_INDEX_POWERSAVE;
- break;
- default:
- break;
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return index;
+ return (s16)(epp & 0xff);
}
-static void msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
+static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
{
- if (fast_switch)
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
+ u64 value, prev;
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (value == prev)
+ return 0;
+
+ if (fast_switch) {
+ wrmsrl(MSR_AMD_CPPC_REQ, value);
+ return 0;
+ } else {
+ int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+
+ if (ret)
+ return ret;
+ }
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ WRITE_ONCE(cpudata->epp_cached, epp);
+
+ return 0;
}
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+ u32 max_perf, u32 epp,
+ bool fast_switch)
{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ max_perf, epp, fast_switch);
}
-static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
+ u64 value, prev;
int ret;
- struct cppc_perf_ctrls perf_ctrls;
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
-
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
- ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- if (!ret)
- cpudata->epp_cached = epp;
- } else {
- amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, false);
+ if (value == prev)
+ return 0;
- perf_ctrls.energy_perf = epp;
- ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- if (ret) {
- pr_debug("failed to set energy perf value (%d)\n", ret);
- return ret;
- }
- cpudata->epp_cached = epp;
+ ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (ret) {
+ pr_err("failed to set energy perf value (%d)\n", ret);
+ return ret;
}
+ /* update both so that msr_update_perf() can effectively check */
+ WRITE_ONCE(cpudata->epp_cached, epp);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
return ret;
}
-static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
- int pref_index)
+DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
+
+static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+ return static_call(amd_pstate_set_epp)(cpudata, epp);
+}
+
+static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
- int epp = -EINVAL;
int ret;
+ struct cppc_perf_ctrls perf_ctrls;
+
+ if (epp == cpudata->epp_cached)
+ return 0;
+
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+ WRITE_ONCE(cpudata->epp_cached, epp);
+
+ return ret;
+}
+
+static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
+ int pref_index)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int epp;
if (!pref_index)
epp = cpudata->epp_default;
-
- if (epp == -EINVAL)
+ else
epp = epp_values[pref_index];
if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -301,9 +332,15 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
return -EBUSY;
}
- ret = amd_pstate_set_epp(cpudata, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ policy->boost_enabled);
+ }
- return ret;
+ return amd_pstate_set_epp(cpudata, epp);
}
static inline int msr_cppc_enable(bool enable)
@@ -442,17 +479,23 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void shmem_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
{
struct cppc_perf_ctrls perf_ctrls;
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ int ret = shmem_set_epp(cpudata, epp);
+
+ if (ret)
+ return ret;
+ }
+
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ return cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
@@ -493,14 +536,8 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
{
unsigned long max_freq;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
- u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
- u64 value = prev;
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
max_freq = READ_ONCE(cpudata->max_limit_freq);
@@ -511,34 +548,18 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
des_perf = 0;
}
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(des_perf);
-
/* limit the max perf when core performance boost feature is disabled */
if (!cpudata->boost_supported)
max_perf = min_t(unsigned long, nominal_perf, max_perf);
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
- cpudata->cpu, (value != prev), fast_switch);
+ cpudata->cpu, fast_switch);
}
- if (value == prev)
- goto cpufreq_policy_put;
+ amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
- WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- amd_pstate_update_perf(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
-
-cpufreq_policy_put:
cpufreq_cpu_put(policy);
}
@@ -570,7 +591,7 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf, max_freq;
+ u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
max_perf = READ_ONCE(cpudata->highest_perf);
@@ -578,12 +599,8 @@ static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
max_limit_perf = div_u64(policy->max * max_perf, max_freq);
min_limit_perf = div_u64(policy->min * max_perf, max_freq);
- lowest_perf = READ_ONCE(cpudata->lowest_perf);
- if (min_limit_perf < lowest_perf)
- min_limit_perf = lowest_perf;
-
- if (max_limit_perf < min_limit_perf)
- max_limit_perf = min_limit_perf;
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min_limit_perf = min(cpudata->nominal_perf, max_limit_perf);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
@@ -704,8 +721,8 @@ static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
if (on)
policy->cpuinfo.max_freq = max_freq;
- else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
- policy->cpuinfo.max_freq = nominal_freq * 1000;
+ else if (policy->cpuinfo.max_freq > nominal_freq)
+ policy->cpuinfo.max_freq = nominal_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -727,12 +744,11 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
+
ret = amd_pstate_cpu_boost_update(policy, state);
- WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
policy->boost_enabled = !ret ? state : false;
refresh_frequency_limits(policy);
- mutex_unlock(&amd_pstate_driver_lock);
return ret;
}
@@ -752,9 +768,6 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
goto exit_err;
}
- /* at least one CPU supports CPB, even if others fail later on to set up */
- current_pstate_driver->boost_enabled = true;
-
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
if (ret) {
pr_err_once("failed to read initial CPU boost state!\n");
@@ -802,7 +815,7 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
*/
- sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
+ sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
schedule_work(&sched_prefcore_work);
}
@@ -823,7 +836,8 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (!amd_pstate_prefcore)
return;
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
+
ret = amd_get_highest_perf(cpu, &cur_high);
if (ret)
goto free_cpufreq_put;
@@ -843,7 +857,6 @@ free_cpufreq_put:
if (!highest_perf_changed)
cpufreq_update_policy(cpu);
- mutex_unlock(&amd_pstate_driver_lock);
}
/*
@@ -895,9 +908,8 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
int ret;
u32 min_freq, max_freq;
- u32 nominal_perf, nominal_freq;
+ u32 highest_perf, nominal_perf, nominal_freq;
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
- u32 boost_ratio, lowest_nonlinear_ratio;
struct cppc_perf_caps cppc_perf;
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
@@ -905,29 +917,25 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
return ret;
if (quirks && quirks->lowest_freq)
- min_freq = quirks->lowest_freq * 1000;
+ min_freq = quirks->lowest_freq;
else
- min_freq = cppc_perf.lowest_freq * 1000;
+ min_freq = cppc_perf.lowest_freq;
if (quirks && quirks->nominal_freq)
- nominal_freq = quirks->nominal_freq ;
+ nominal_freq = quirks->nominal_freq;
else
nominal_freq = cppc_perf.nominal_freq;
+ highest_perf = READ_ONCE(cpudata->highest_perf);
nominal_perf = READ_ONCE(cpudata->nominal_perf);
-
- boost_ratio = div_u64(cpudata->highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
- max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+ max_freq = div_u64((u64)highest_perf * nominal_freq, nominal_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
- lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
-
- WRITE_ONCE(cpudata->min_freq, min_freq);
- WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
- WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
- WRITE_ONCE(cpudata->max_freq, max_freq);
+ lowest_nonlinear_freq = div_u64((u64)nominal_freq * lowest_nonlinear_perf, nominal_perf);
+ WRITE_ONCE(cpudata->min_freq, min_freq * 1000);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq * 1000);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq * 1000);
+ WRITE_ONCE(cpudata->max_freq, max_freq * 1000);
/**
* Below values need to be initialized correctly, otherwise driver will fail to load
@@ -937,13 +945,13 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
*/
if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
- min_freq, max_freq, nominal_freq * 1000);
+ min_freq, max_freq, nominal_freq);
return -EINVAL;
}
- if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
+ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq) {
pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
- lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
+ lowest_nonlinear_freq, min_freq, nominal_freq);
return -EINVAL;
}
@@ -1160,7 +1168,6 @@ static ssize_t show_energy_performance_available_preferences(
static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
- struct amd_cpudata *cpudata = policy->driver_data;
char str_preference[21];
ssize_t ret;
@@ -1172,11 +1179,11 @@ static ssize_t store_energy_performance_preference(
if (ret < 0)
return -EINVAL;
- mutex_lock(&amd_pstate_limits_lock);
- ret = amd_pstate_set_energy_pref_index(cpudata, ret);
- mutex_unlock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
+
+ ret = amd_pstate_set_energy_pref_index(policy, ret);
- return ret ?: count;
+ return ret ? ret : count;
}
static ssize_t show_energy_performance_preference(
@@ -1185,9 +1192,22 @@ static ssize_t show_energy_performance_preference(
struct amd_cpudata *cpudata = policy->driver_data;
int preference;
- preference = amd_pstate_get_energy_pref_index(cpudata);
- if (preference < 0)
- return preference;
+ switch (cpudata->epp_cached) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ preference = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ preference = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ preference = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ preference = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
}
@@ -1236,6 +1256,9 @@ static int amd_pstate_register_driver(int mode)
return ret;
}
+ /* at least one CPU supports CPB */
+ current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
+
ret = cpufreq_register_driver(current_pstate_driver);
if (ret) {
amd_pstate_driver_cleanup();
@@ -1340,13 +1363,10 @@ EXPORT_SYMBOL_GPL(amd_pstate_update_status);
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- ssize_t ret;
- mutex_lock(&amd_pstate_driver_lock);
- ret = amd_pstate_show_status(buf);
- mutex_unlock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
- return ret;
+ return amd_pstate_show_status(buf);
}
static ssize_t status_store(struct device *a, struct device_attribute *b,
@@ -1355,9 +1375,8 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&amd_pstate_driver_lock);
return ret < 0 ? ret : count;
}
@@ -1451,7 +1470,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return -ENOMEM;
cpudata->cpu = policy->cpu;
- cpudata->epp_policy = 0;
ret = amd_pstate_init_perf(cpudata);
if (ret)
@@ -1477,8 +1495,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
- cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
-
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -1489,10 +1505,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
* the default cpufreq governor is neither powersave nor performance.
*/
if (amd_pstate_acpi_pm_profile_server() ||
- amd_pstate_acpi_pm_profile_undefined())
+ amd_pstate_acpi_pm_profile_undefined()) {
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
+ cpudata->epp_default = amd_pstate_get_epp(cpudata);
+ } else {
policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
+ }
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
@@ -1505,6 +1524,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return ret;
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
+ ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
+ if (ret)
+ return ret;
current_pstate_driver->adjust_perf = NULL;
@@ -1530,51 +1552,24 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf;
- u64 value;
- s16 epp;
+ u32 epp;
- max_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
amd_pstate_update_min_max_limit(policy);
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_perf = min(cpudata->nominal_perf, max_perf);
-
- /* Initial min/max values for CPPC Performance Controls Register */
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
- /* CPPC EPP feature require to set zero to the desire perf bit */
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(0);
-
- cpudata->epp_policy = cpudata->policy;
+ epp = 0;
+ else
+ epp = READ_ONCE(cpudata->epp_cached);
- /* Get BIOS pre-defined epp value */
- epp = amd_pstate_get_epp(cpudata, value);
- if (epp < 0) {
- /**
- * This return value can only be negative for shared_memory
- * systems where EPP register read/write not supported.
- */
- return epp;
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
+ cpudata->min_limit_perf,
+ cpudata->max_limit_perf,
+ policy->boost_enabled);
}
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- epp = 0;
-
- WRITE_ONCE(cpudata->cppc_req_cached, value);
- return amd_pstate_set_epp(cpudata, epp);
+ return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+ cpudata->max_limit_perf, epp, false);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1603,87 +1598,63 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
{
- struct cppc_perf_ctrls perf_ctrls;
- u64 value, max_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u64 max_perf;
int ret;
ret = amd_pstate_cppc_enable(true);
if (ret)
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
- value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.max_perf = max_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
- cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ cpudata->epp_cached,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
+ max_perf, policy->boost_enabled);
}
+
+ return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
}
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- amd_pstate_epp_reenable(cpudata);
- cpudata->suspended = false;
- }
+ ret = amd_pstate_epp_reenable(policy);
+ if (ret)
+ return ret;
+ cpudata->suspended = false;
return 0;
}
-static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
- int min_perf;
- u64 value;
-
- min_perf = READ_ONCE(cpudata->lowest_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
- mutex_lock(&amd_pstate_limits_lock);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
-
- /* Set max perf same as min perf */
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(min_perf);
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.desired_perf = 0;
- perf_ctrls.min_perf = min_perf;
- perf_ctrls.max_perf = min_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
- cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- }
- mutex_unlock(&amd_pstate_limits_lock);
-}
-
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
-
- pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+ int min_perf;
if (cpudata->suspended)
return 0;
- if (cppc_state == AMD_PSTATE_ACTIVE)
- amd_pstate_epp_offline(policy);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
- return 0;
+ guard(mutex)(&amd_pstate_limits_lock);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ min_perf, min_perf, policy->boost_enabled);
+ }
+
+ return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
}
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
@@ -1711,12 +1682,10 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- mutex_lock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
/* enable amd pstate from suspend state*/
- amd_pstate_epp_reenable(cpudata);
-
- mutex_unlock(&amd_pstate_limits_lock);
+ amd_pstate_epp_reenable(policy);
cpudata->suspended = false;
}
@@ -1869,6 +1838,8 @@ static int __init amd_pstate_init(void)
static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
static_call_update(amd_pstate_init_perf, shmem_init_perf);
static_call_update(amd_pstate_update_perf, shmem_update_perf);
+ static_call_update(amd_pstate_get_epp, shmem_get_epp);
+ static_call_update(amd_pstate_set_epp, shmem_set_epp);
}
if (amd_pstate_prefcore) {
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index cd573bc6b6db..9747e3be6cee 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -57,7 +57,6 @@ struct amd_aperf_mperf {
* @hw_prefcore: check whether HW supports preferred core featue.
* Only when hw_prefcore and early prefcore param are true,
* AMD P-State driver supports preferred core featue.
- * @epp_policy: Last saved policy used to set energy-performance preference
* @epp_cached: Cached CPPC energy-performance preference value
* @policy: Cpufreq policy value
* @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
@@ -94,13 +93,11 @@ struct amd_cpudata {
bool hw_prefcore;
/* EPP feature related attributes*/
- s16 epp_policy;
s16 epp_cached;
u32 policy;
u64 cppc_cap1_cached;
bool suspended;
s16 epp_default;
- bool boost_state;
};
/*
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 4dcacab9b4bf..269b18c62d04 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -22,11 +22,14 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define APPLE_DVFS_CMD 0x20
-#define APPLE_DVFS_CMD_BUSY BIT(31)
-#define APPLE_DVFS_CMD_SET BIT(25)
-#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12)
-#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD 0x20
+#define APPLE_DVFS_CMD_BUSY BIT(31)
+#define APPLE_DVFS_CMD_SET BIT(25)
+#define APPLE_DVFS_CMD_PS1_S5L8960X GENMASK(24, 22)
+#define APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT 22
+#define APPLE_DVFS_CMD_PS2 GENMASK(15, 12)
+#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD_PS1_SHIFT 0
/* Same timebase as CPU counter (24MHz) */
#define APPLE_DVFS_LAST_CHG_TIME 0x38
@@ -35,6 +38,9 @@
* Apple ran out of bits and had to shift this in T8112...
*/
#define APPLE_DVFS_STATUS 0x50
+#define APPLE_DVFS_STATUS_CUR_PS_S5L8960X GENMASK(5, 3)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X 3
+#define APPLE_DVFS_STATUS_TGT_PS_S5L8960X GENMASK(2, 0)
#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4)
#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4
#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0)
@@ -52,12 +58,15 @@
#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16)
#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0)
-#define APPLE_DVFS_TRANSITION_TIMEOUT 100
+#define APPLE_DVFS_TRANSITION_TIMEOUT 400
struct apple_soc_cpufreq_info {
+ bool has_ps2;
u64 max_pstate;
u64 cur_pstate_mask;
u64 cur_pstate_shift;
+ u64 ps1_mask;
+ u64 ps1_shift;
};
struct apple_cpu_priv {
@@ -68,25 +77,47 @@ struct apple_cpu_priv {
static struct cpufreq_driver apple_soc_cpufreq_driver;
+static const struct apple_soc_cpufreq_info soc_s5l8960x_info = {
+ .has_ps2 = false,
+ .max_pstate = 7,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_S5L8960X,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X,
+ .ps1_mask = APPLE_DVFS_CMD_PS1_S5L8960X,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT,
+};
+
static const struct apple_soc_cpufreq_info soc_t8103_info = {
+ .has_ps2 = true,
.max_pstate = 15,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_t8112_info = {
+ .has_ps2 = false,
.max_pstate = 31,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_default_info = {
+ .has_ps2 = false,
.max_pstate = 15,
.cur_pstate_mask = 0, /* fallback */
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
{
+ .compatible = "apple,s5l8960x-cluster-cpufreq",
+ .data = &soc_s5l8960x_info,
+ },
+ {
.compatible = "apple,t8103-cluster-cpufreq",
.data = &soc_t8103_info,
},
@@ -109,7 +140,7 @@ static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
unsigned int pstate;
if (priv->info->cur_pstate_mask) {
- u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+ u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift;
} else {
@@ -148,9 +179,12 @@ static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy,
return -EIO;
}
- reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ reg &= ~priv->info->ps1_mask;
+ reg |= pstate << priv->info->ps1_shift;
+ if (priv->info->has_ps2) {
+ reg &= ~APPLE_DVFS_CMD_PS2;
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ }
reg |= APPLE_DVFS_CMD_SET;
writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD);
@@ -275,7 +309,7 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = APPLE_DVFS_TRANSITION_TIMEOUT * NSEC_PER_USEC;
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index bd8f75accfa0..2486a6c5256a 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -225,7 +225,7 @@ static void __init cppc_freq_invariance_init(void)
if (fie_disabled)
return;
- kworker_fie = kthread_create_worker(0, "cppc_fie");
+ kworker_fie = kthread_run_worker(0, "cppc_fie");
if (IS_ERR(kworker_fie)) {
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
PTR_ERR(kworker_fie));
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 2a3e8bd317c9..2aa00769cf09 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,8 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,en7581", },
+
{ .compatible = "allwinner,sun50i-a100" },
{ .compatible = "allwinner,sun50i-h6", },
{ .compatible = "allwinner,sun50i-h616", },
@@ -235,5 +237,3 @@ create_pdev:
sizeof(struct cpufreq_dt_platform_data)));
}
core_initcall(cpufreq_dt_platdev_init);
-MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1a4cae54a01b..1076e37a18ad 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
@@ -602,12 +603,12 @@ static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
if (cpufreq_boost_trigger_state(enable)) {
pr_err("%s: Cannot %s BOOST!\n",
- __func__, enable ? "enable" : "disable");
+ __func__, str_enable_disable(enable));
return -EINVAL;
}
pr_debug("%s: cpufreq BOOST %s\n",
- __func__, enable ? "enabled" : "disabled");
+ __func__, str_enabled_disabled(enable));
return count;
}
@@ -1538,7 +1539,7 @@ static int cpufreq_online(unsigned int cpu)
/*
* Register with the energy model before
- * sugov_eas_rebuild_sd() is called, which will result
+ * em_rebuild_sched_domains() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.
@@ -2812,7 +2813,7 @@ err_reset_state:
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
- __func__, state ? "enable" : "disable");
+ __func__, str_enable_disable(state));
return ret;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b8e2396a708a..9c4cc01fd51a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -28,6 +28,7 @@
#include <linux/pm_qos.h>
#include <linux/bitfield.h>
#include <trace/events/power.h>
+#include <linux/units.h>
#include <asm/cpu.h>
#include <asm/div64.h>
@@ -302,11 +303,11 @@ static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
-#define HYBRID_SCALING_FACTOR 78741
+#define HYBRID_SCALING_FACTOR_ADL 78741
#define HYBRID_SCALING_FACTOR_MTL 80000
#define HYBRID_SCALING_FACTOR_LNL 86957
-static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
+static int hybrid_scaling_factor;
static inline int core_get_scaling(void)
{
@@ -414,18 +415,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
static int intel_pstate_cppc_get_scaling(int cpu)
{
struct cppc_perf_caps cppc_perf;
- int ret;
-
- ret = cppc_get_perf_caps(cpu, &cppc_perf);
/*
- * If the nominal frequency and the nominal performance are not
- * zero and the ratio between them is not 100, return the hybrid
- * scaling factor.
+ * Compute the perf-to-frequency scaling factor for the given CPU if
+ * possible, unless it would be 0.
*/
- if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
- cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
- return hybrid_scaling_factor;
+ if (!cppc_get_perf_caps(cpu, &cppc_perf) &&
+ cppc_perf.nominal_perf && cppc_perf.nominal_freq)
+ return div_u64(cppc_perf.nominal_freq * KHZ_PER_MHZ,
+ cppc_perf.nominal_perf);
return core_get_scaling();
}
@@ -2211,24 +2209,30 @@ static void hybrid_get_type(void *data)
static int hwp_get_cpu_scaling(int cpu)
{
- u8 cpu_type = 0;
+ if (hybrid_scaling_factor) {
+ u8 cpu_type = 0;
- smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
- /* P-cores have a smaller perf level-to-freqency scaling factor. */
- if (cpu_type == 0x40)
- return hybrid_scaling_factor;
+ smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
- /* Use default core scaling for E-cores */
- if (cpu_type == 0x20)
+ /*
+ * Return the hybrid scaling factor for P-cores and use the
+ * default core scaling for E-cores.
+ */
+ if (cpu_type == 0x40)
+ return hybrid_scaling_factor;
+
+ if (cpu_type == 0x20)
+ return core_get_scaling();
+ }
+
+ /* Use core scaling on non-hybrid systems. */
+ if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
return core_get_scaling();
/*
- * If reached here, this system is either non-hybrid (like Tiger
- * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
- * no E cores (in which case CPUID for hybrid support is 0).
- *
- * The CPPC nominal_frequency field is 0 for non-hybrid systems,
- * so the default core scaling will be used for them.
+ * The system is hybrid, but the hybrid scaling factor is not known or
+ * the CPU type is not one of the above, so use CPPC to compute the
+ * scaling factor for this CPU.
*/
return intel_pstate_cppc_get_scaling(cpu);
}
@@ -2709,7 +2713,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
}
cpu->epp_powersave = -EINVAL;
- cpu->epp_policy = 0;
+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
intel_pstate_get_cpu_pstates(cpu);
@@ -3665,8 +3669,12 @@ static const struct x86_cpu_id intel_epp_default[] = {
};
static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
+ X86_MATCH_VFM(INTEL_ALDERLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, HYBRID_SCALING_FACTOR_ADL),
X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
- X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
{}
};
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 8de759247771..ae79d909943b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/cpu.h>
#include <linux/hashtable.h>
#include <trace/events/power.h>
@@ -281,7 +282,7 @@ next:
pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
- (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
+ str_enabled_disabled(powernv_pstate_info.wof_enabled));
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
if (!pstate_ids) {
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 98129565acb8..b2e7e89feaac 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -143,14 +143,12 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
}
/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
const struct qcom_cpufreq_soc_data *soc_data;
- struct cpufreq_policy *policy;
unsigned int index;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -163,12 +161,10 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
return policy->freq_table[index].frequency;
}
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -177,7 +173,12 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
if (data->throttle_irq >= 0)
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return qcom_cpufreq_get_freq(cpu);
+ return qcom_cpufreq_get_freq(policy);
+}
+
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+{
+ return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -363,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
* If h/w throttled frequency is higher than what cpufreq has requested
* for, then stop polling and switch back to interrupt mechanism.
*/
- if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
+ if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
enable_irq(data->throttle_irq);
else
mod_delayed_work(system_highpri_wq, &data->throttle_work,
@@ -441,7 +442,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return data->throttle_irq;
data->cancel_throttle = false;
- data->policy = policy;
mutex_init(&data->throttle_lock);
INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
@@ -552,6 +552,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = data;
policy->dvfs_possible_from_any_cpu = true;
+ data->policy = policy;
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
@@ -622,11 +623,24 @@ static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned lon
{
struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
- return qcom_lmh_get_throttle_freq(data);
+ return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ;
+}
+
+/*
+ * Since we cannot determine the closest rate of the target rate, let's just
+ * return the actual rate at which the clock is running at. This is needed to
+ * make clk_set_rate() API work properly.
+ */
+static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
+
+ return 0;
}
static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
.recalc_rate = qcom_cpufreq_hw_recalc_rate,
+ .determine_rate = qcom_cpufreq_hw_determine_rate,
};
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 07d6f9a9b7c8..b8fe758aeb01 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
@@ -26,6 +27,8 @@ struct scmi_data {
int nr_opp;
struct device *cpu_dev;
cpumask_var_t opp_shared_cpus;
+ struct notifier_block limit_notify_nb;
+ struct freq_qos_request limits_freq_req;
};
static struct scmi_protocol_handle *ph;
@@ -174,6 +177,22 @@ static struct freq_attr *scmi_cpufreq_hw_attr[] = {
NULL,
};
+static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
+ struct scmi_perf_limits_report *limit_notify = data;
+ unsigned int limit_freq_khz;
+ int ret;
+
+ limit_freq_khz = limit_notify->range_max_freq / HZ_PER_KHZ;
+
+ ret = freq_qos_update_request(&priv->limits_freq_req, limit_freq_khz);
+ if (ret < 0)
+ pr_warn("failed to update freq constraint: %d\n", ret);
+
+ return NOTIFY_OK;
+}
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp, domain;
@@ -181,6 +200,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -294,6 +314,23 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
}
+ ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to add qos limits request: %d\n", ret);
+ goto out_free_table;
+ }
+
+ priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb;
+ ret = sdev->handle->notify_ops->event_notifier_register(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ if (ret)
+ dev_warn(&sdev->dev,
+ "failed to register for limits change notifier for domain %d\n",
+ priv->domain_id);
+
return 0;
out_free_table:
@@ -313,7 +350,13 @@ out_free_priv:
static void scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
+ sdev->handle->notify_ops->event_notifier_unregister(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ freq_qos_remove_request(&priv->limits_freq_req);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
@@ -372,6 +415,8 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
if (!handle)
return -ENODEV;
+ scmi_cpufreq_driver.driver_data = sdev;
+
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
if (IS_ERR(perf_ops))
return PTR_ERR(perf_ops);
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 8a0cd5312a59..15899dd77c08 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -323,7 +323,7 @@ static int __init us2e_freq_init(void)
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
- us2e_freq_table = kzalloc(NR_CPUS * sizeof(*us2e_freq_table),
+ us2e_freq_table = kcalloc(NR_CPUS, sizeof(*us2e_freq_table),
GFP_KERNEL);
if (!us2e_freq_table)
return -ENOMEM;
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index b50f9d13e6d2..de50a2f3b124 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -171,7 +171,7 @@ static int __init us3_freq_init(void)
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
- us3_freq_table = kzalloc(NR_CPUS * sizeof(*us3_freq_table),
+ us3_freq_table = kcalloc(NR_CPUS, sizeof(*us3_freq_table),
GFP_KERNEL);
if (!us3_freq_table)
return -ENOMEM;
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 146f97068022..5fb5228f6bf1 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -72,6 +72,7 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
*/
if (use_osi) {
pd->power_off = psci_pd_power_off;
+ pd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
if (IS_ENABLED(CONFIG_PREEMPT_RT))
pd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
} else {
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index f2992f92d8db..173ddcac540a 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -10,25 +10,27 @@
* DOC: teo-description
*
* The idea of this governor is based on the observation that on many systems
- * timer events are two or more orders of magnitude more frequent than any
- * other interrupts, so they are likely to be the most significant cause of CPU
- * wakeups from idle states. Moreover, information about what happened in the
- * (relatively recent) past can be used to estimate whether or not the deepest
- * idle state with target residency within the (known) time till the closest
- * timer event, referred to as the sleep length, is likely to be suitable for
- * the upcoming CPU idle period and, if not, then which of the shallower idle
- * states to choose instead of it.
+ * timer interrupts are two or more orders of magnitude more frequent than any
+ * other interrupt types, so they are likely to dominate CPU wakeup patterns.
+ * Moreover, in principle, the time when the next timer event is going to occur
+ * can be determined at the idle state selection time, although doing that may
+ * be costly, so it can be regarded as the most reliable source of information
+ * for idle state selection.
*
- * Of course, non-timer wakeup sources are more important in some use cases
- * which can be covered by taking a few most recent idle time intervals of the
- * CPU into account. However, even in that context it is not necessary to
- * consider idle duration values greater than the sleep length, because the
- * closest timer will ultimately wake up the CPU anyway unless it is woken up
- * earlier.
+ * Of course, non-timer wakeup sources are more important in some use cases,
+ * but even then it is generally unnecessary to consider idle duration values
+ * greater than the time time till the next timer event, referred as the sleep
+ * length in what follows, because the closest timer will ultimately wake up the
+ * CPU anyway unless it is woken up earlier.
*
- * Thus this governor estimates whether or not the prospective idle duration of
- * a CPU is likely to be significantly shorter than the sleep length and selects
- * an idle state for it accordingly.
+ * However, since obtaining the sleep length may be costly, the governor first
+ * checks if it can select a shallow idle state using wakeup pattern information
+ * from recent times, in which case it can do without knowing the sleep length
+ * at all. For this purpose, it counts CPU wakeup events and looks for an idle
+ * state whose target residency has not exceeded the idle duration (measured
+ * after wakeup) in the majority of relevant recent cases. If the target
+ * residency of that state is small enough, it may be used right away and the
+ * sleep length need not be determined.
*
* The computations carried out by this governor are based on using bins whose
* boundaries are aligned with the target residency parameter values of the CPU
@@ -39,7 +41,11 @@
* idle state 2, the third bin spans from the target residency of idle state 2
* up to, but not including, the target residency of idle state 3 and so on.
* The last bin spans from the target residency of the deepest idle state
- * supplied by the driver to infinity.
+ * supplied by the driver to the scheduler tick period length or to infinity if
+ * the tick period length is less than the target residency of that state. In
+ * the latter case, the governor also counts events with the measured idle
+ * duration between the tick period length and the target residency of the
+ * deepest idle state.
*
* Two metrics called "hits" and "intercepts" are associated with each bin.
* They are updated every time before selecting an idle state for the given CPU
@@ -49,47 +55,46 @@
* sleep length and the idle duration measured after CPU wakeup fall into the
* same bin (that is, the CPU appears to wake up "on time" relative to the sleep
* length). In turn, the "intercepts" metric reflects the relative frequency of
- * situations in which the measured idle duration is so much shorter than the
- * sleep length that the bin it falls into corresponds to an idle state
- * shallower than the one whose bin is fallen into by the sleep length (these
- * situations are referred to as "intercepts" below).
+ * non-timer wakeup events for which the measured idle duration falls into a bin
+ * that corresponds to an idle state shallower than the one whose bin is fallen
+ * into by the sleep length (these events are also referred to as "intercepts"
+ * below).
*
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
*
- * 1. Find the deepest CPU idle state whose target residency does not exceed
- * the current sleep length (the candidate idle state) and compute 2 sums as
- * follows:
+ * 1. Find the deepest enabled CPU idle state (the candidate idle state) and
+ * compute 2 sums as follows:
*
- * - The sum of the "hits" and "intercepts" metrics for the candidate state
- * and all of the deeper idle states (it represents the cases in which the
- * CPU was idle long enough to avoid being intercepted if the sleep length
- * had been equal to the current one).
+ * - The sum of the "hits" metric for all of the idle states shallower than
+ * the candidate one (it represents the cases in which the CPU was likely
+ * woken up by a timer).
*
- * - The sum of the "intercepts" metrics for all of the idle states shallower
- * than the candidate one (it represents the cases in which the CPU was not
- * idle long enough to avoid being intercepted if the sleep length had been
- * equal to the current one).
+ * - The sum of the "intercepts" metric for all of the idle states shallower
+ * than the candidate one (it represents the cases in which the CPU was
+ * likely woken up by a non-timer wakeup source).
*
- * 2. If the second sum is greater than the first one the CPU is likely to wake
- * up early, so look for an alternative idle state to select.
+ * 2. If the second sum computed in step 1 is greater than a half of the sum of
+ * both metrics for the candidate state bin and all subsequent bins(if any),
+ * a shallower idle state is likely to be more suitable, so look for it.
*
- * - Traverse the idle states shallower than the candidate one in the
+ * - Traverse the enabled idle states shallower than the candidate one in the
* descending order.
*
* - For each of them compute the sum of the "intercepts" metrics over all
* of the idle states between it and the candidate one (including the
* former and excluding the latter).
*
- * - If each of these sums that needs to be taken into account (because the
- * check related to it has indicated that the CPU is likely to wake up
- * early) is greater than a half of the corresponding sum computed in step
- * 1 (which means that the target residency of the state in question had
- * not exceeded the idle duration in over a half of the relevant cases),
- * select the given idle state instead of the candidate one.
+ * - If this sum is greater than a half of the second sum computed in step 1,
+ * use the given idle state as the new candidate one.
*
- * 3. By default, select the candidate state.
+ * 3. If the current candidate state is state 0 or its target residency is short
+ * enough, return it and prevent the scheduler tick from being stopped.
+ *
+ * 4. Obtain the sleep length value and check if it is below the target
+ * residency of the current candidate state, in which case a new shallower
+ * candidate state needs to be found, so look for it.
*/
#include <linux/cpuidle.h>
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0a9cdd31cbd9..19ab145f912e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -200,23 +200,6 @@ config S390_PRNG
It is available as of z9.
-config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
- Each core of a Niagara2 processor contains a Stream
- Processing Unit, which itself contains several cryptographic
- sub-units. One set provides the Modular Arithmetic Unit,
- used for SSL offload. The other set provides the Cipher
- Group, which can perform encryption, decryption, hashing,
- checksumming, and raw copies.
-
config CRYPTO_DEV_SL3516
tristate "Storlink SL3516 crypto offloader"
depends on ARCH_GEMINI || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad4ccef67d12..fef18ffdb128 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -21,8 +21,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
-n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index 6283e8c6d51d..86c227caa722 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -836,7 +836,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
u32 cipher_bits = 0;
u32 ecf_bits = 0;
u8 sctx_words = 0;
- u8 *ptr = spu_hdr;
flow_log("%s()\n", __func__);
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
@@ -847,7 +846,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* starting out: zero the header (plus some) */
memset(spu_hdr, 0, sizeof(struct SPUHEADER));
- ptr += sizeof(struct SPUHEADER);
/* format master header word */
/* Do not set the next bit even though the datasheet says to */
@@ -861,10 +859,8 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* copy the encryption keys in the SAD entry */
if (cipher_parms->alg) {
- if (cipher_parms->key_len) {
- ptr += cipher_parms->key_len;
+ if (cipher_parms->key_len)
sctx_words += cipher_parms->key_len / 4;
- }
/*
* if encrypting then set IV size, use SCTX IV unless no IV
@@ -873,7 +869,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
if (cipher_parms->iv_len) {
/* Use SCTX IV */
ecf_bits |= SCTX_IV;
- ptr += cipher_parms->iv_len;
sctx_words += cipher_parms->iv_len / 4;
}
}
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 87781c1534ee..079a22cc9f02 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
@@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
}
ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
+ moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
index 5b105a23f699..410084a9039c 100644
--- a/drivers/crypto/ccp/dbc.c
+++ b/drivers/crypto/ccp/dbc.c
@@ -7,6 +7,8 @@
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
+#include <linux/mutex.h>
+
#include "dbc.h"
#define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
@@ -137,64 +139,49 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENODEV;
dbc_dev = psp_master->dbc_data;
- mutex_lock(&dbc_dev->ioctl_mutex);
+ guard(mutex)(&dbc_dev->ioctl_mutex);
switch (cmd) {
case DBCIOCNONCE:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
ret = send_dbc_nonce(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
break;
case DBCIOCUID:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
break;
case DBCIOCPARAM:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param);
ret = send_dbc_parameter(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param)))
+ return -EFAULT;
break;
default:
- ret = -EINVAL;
-
+ return -EINVAL;
}
-unlock:
- mutex_unlock(&dbc_dev->ioctl_mutex);
- return ret;
+ return 0;
}
static const struct file_operations dbc_fops = {
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index af018afd9cd7..2e87ca0e292a 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -249,7 +249,7 @@ static struct file *open_file_as_root(const char *filename, int flags, umode_t m
fp = file_open_root(&root, filename, flags, mode);
path_put(&root);
- revert_creds(old_cred);
+ put_cred(revert_creds(old_cred));
return fp;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 96fde9437b4b..f5b47e5ff48a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -1209,7 +1209,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->mode = uacce_mode;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
@@ -1396,6 +1395,17 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1428,6 +1438,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
.get_err_result = hpre_get_err_result,
+ .dev_is_abnormal = hpre_dev_is_abnormal,
};
static int hpre_pf_probe_init(struct hpre *hpre)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 19c1b5d3c954..d3f5d108b898 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -30,8 +30,6 @@
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
-#define QM_MB_CMD_DATA_SHIFT 32
-#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
@@ -102,6 +100,8 @@
#define QM_PM_CTRL 0x100148
#define QM_IDLE_DISABLE BIT(9)
+#define QM_SUB_VERSION_ID 0x210
+
#define QM_VFT_CFG_DATA_L 0x100064
#define QM_VFT_CFG_DATA_H 0x100068
#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
@@ -119,6 +119,7 @@
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
+#define QM_MAX_QC_TYPE 2
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@@ -176,6 +177,10 @@
#define QM_IFC_INT_MASK 0x0024
#define QM_IFC_INT_STATUS 0x0028
#define QM_IFC_INT_SET_V 0x002C
+#define QM_PF2VF_PF_W 0x104700
+#define QM_VF2PF_PF_R 0x104800
+#define QM_VF2PF_VF_W 0x320
+#define QM_PF2VF_VF_R 0x380
#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
#define QM_IFC_INT_SOURCE_MASK BIT(0)
@@ -185,8 +190,11 @@
#define QM_WAIT_DST_ACK 10
#define QM_MAX_PF_WAIT_COUNT 10
#define QM_MAX_VF_WAIT_COUNT 40
-#define QM_VF_RESET_WAIT_US 20000
-#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF_RESET_WAIT_US 20000
+#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF2PF_REG_SIZE 4
+#define QM_IFC_CMD_MASK GENMASK(31, 0)
+#define QM_IFC_DATA_SHIFT 32
#define QM_VF_RESET_WAIT_TIMEOUT_US \
(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
@@ -234,8 +242,6 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_AUTOSUSPEND_DELAY 3000
-#define QM_DEV_ALG_MAX_LEN 256
-
/* abnormal status value for stopping queue */
#define QM_STOP_QUEUE_FAIL 1
#define QM_DUMP_SQC_FAIL 3
@@ -276,7 +282,7 @@ enum qm_alg_type {
ALG_TYPE_1,
};
-enum qm_mb_cmd {
+enum qm_ifc_cmd {
QM_PF_FLR_PREPARE = 0x01,
QM_PF_SRST_PREPARE,
QM_PF_RESET_DONE,
@@ -333,6 +339,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0},
};
static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
@@ -396,6 +403,11 @@ struct hisi_qm_hw_ops {
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
int (*set_msi)(struct hisi_qm *qm, bool set);
+
+ /* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */
+ int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
+ void (*set_ifc_end)(struct hisi_qm *qm);
+ int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
};
struct hisi_qm_hw_error {
@@ -501,15 +513,20 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
/* Check if the error causes the master ooo block */
static bool qm_check_dev_error(struct hisi_qm *qm)
{
- u32 val, dev_val;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ u32 err_status;
- if (qm->fun_type == QM_HW_VF)
+ if (pf_qm->fun_type == QM_HW_VF)
return false;
- val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
- dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
+ err_status = qm_get_hw_error_status(pf_qm);
+ if (err_status & pf_qm->err_info.qm_shutdown_mask)
+ return true;
+
+ if (pf_qm->err_ini->dev_is_abnormal)
+ return pf_qm->err_ini->dev_is_abnormal(pf_qm);
- return val || dev_val;
+ return false;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -654,7 +671,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_mb);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct qm_mailbox mailbox;
dma_addr_t xqc_dma;
void *tmp_xqc;
@@ -688,7 +704,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
}
/* Setting xqc will fail if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm)) {
+ if (qm_check_dev_error(qm)) {
dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
return -EIO;
}
@@ -855,10 +871,10 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
strcat(algs, dev_algs[i].alg);
ptr = strrchr(algs, '\n');
- if (ptr) {
+ if (ptr)
*ptr = '\0';
- qm->uacce->algs = algs;
- }
+
+ qm->uacce->algs = algs;
return 0;
}
@@ -1052,11 +1068,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
static void qm_reset_function(struct hisi_qm *qm)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return;
ret = qm_reset_prepare_ready(qm);
@@ -1540,17 +1555,15 @@ static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
{
struct device *dev = &qm->pdev->dev;
- u32 cmd;
- u64 msg;
+ enum qm_ifc_cmd cmd;
int ret;
- ret = qm_get_mb_cmd(qm, &msg, vf_id);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id);
if (ret) {
- dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
+ dev_err(dev, "failed to get command from VF(%u)!\n", vf_id);
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_VF_PREPARE_FAIL:
dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
@@ -1562,7 +1575,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
case QM_VF_START_DONE:
break;
default:
- dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
+ dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id);
break;
}
}
@@ -1630,17 +1643,14 @@ static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
writel(val, qm->io_base + QM_IFC_INT_SET_V);
}
-static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
+static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- struct qm_mailbox mailbox;
int cnt = 0;
u64 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num);
if (ret) {
dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
goto err_unlock;
@@ -1662,27 +1672,23 @@ static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
}
err_unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return ret;
}
-static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct device *dev = &qm->pdev->dev;
u32 vfs_num = qm->vfs_num;
- struct qm_mailbox mailbox;
u64 val = 0;
int cnt = 0;
int ret;
u32 i;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
- mutex_lock(&qm->mailbox_lock);
- /* PF sends command to all VFs by mailbox */
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS);
if (ret) {
- dev_err(dev, "failed to send command to VFs!\n");
- mutex_unlock(&qm->mailbox_lock);
+ dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd);
+ qm->ops->set_ifc_end(qm);
return ret;
}
@@ -1692,7 +1698,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
val = readq(qm->io_base + QM_IFC_READY_STATUS);
/* If all VFs acked, PF notifies VFs successfully. */
if (!(val & GENMASK(vfs_num, 1))) {
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return 0;
}
@@ -1700,7 +1706,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
break;
}
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
/* Check which vf respond timeout. */
for (i = 1; i <= vfs_num; i++) {
@@ -1711,18 +1717,15 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
return -ETIMEDOUT;
}
-static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
- struct qm_mailbox mailbox;
int cnt = 0;
u32 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0);
if (ret) {
- dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
+ dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd);
goto unlock;
}
@@ -1741,7 +1744,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
}
unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
+
return ret;
}
@@ -1842,6 +1846,94 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
return ret;
}
+static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ struct qm_mailbox mailbox;
+ u64 msg;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ return qm_mb_nolock(qm, &mailbox);
+}
+
+static void qm_set_ifc_end_v3(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->mailbox_lock);
+}
+
+static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+ int ret;
+
+ ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ if (ret)
+ return ret;
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
+static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ uintptr_t offset;
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ offset = QM_PF2VF_PF_W;
+ else
+ offset = QM_VF2PF_VF_W;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ mutex_lock(&qm->ifc_lock);
+ writeq(msg, qm->io_base + offset);
+
+ return 0;
+}
+
+static void qm_set_ifc_end_v4(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->ifc_lock);
+}
+
+static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num)
+{
+ uintptr_t offset;
+
+ offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num;
+
+ return (u64)readl(qm->io_base + offset);
+}
+
+static u64 qm_get_ifc_vf(struct hisi_qm *qm)
+{
+ return readq(qm->io_base + QM_PF2VF_VF_R);
+}
+
+static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ msg = qm_get_ifc_pf(qm, fun_num);
+ else
+ msg = qm_get_ifc_vf(qm);
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.hw_error_init = qm_hw_error_init_v1,
@@ -1864,6 +1956,21 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
.set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v3,
+ .set_ifc_end = qm_set_ifc_end_v3,
+ .get_ifc = qm_get_ifc_v3,
+};
+
+static const struct hisi_qm_hw_ops qm_hw_ops_v4 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .hw_error_init = qm_hw_error_init_v3,
+ .hw_error_uninit = qm_hw_error_uninit_v3,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v4,
+ .set_ifc_end = qm_set_ifc_end_v4,
+ .get_ifc = qm_get_ifc_v4,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2156,12 +2263,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
static int qm_drain_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
u32 state = 0;
int ret;
/* No need to judge if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return 0;
/* HW V3 supports drain qp by device */
@@ -2475,7 +2581,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
+ if (qp_ctx.qc_type > QM_MAX_QC_TYPE)
return -EINVAL;
qm_set_sqctype(q, qp_ctx.qc_type);
@@ -2843,11 +2949,14 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->ops = &qm_hw_ops_v1;
else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
- else
+ else if (qm->ver == QM_HW_V3)
qm->ops = &qm_hw_ops_v3;
+ else
+ qm->ops = &qm_hw_ops_v4;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
+ mutex_init(&qm->ifc_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
@@ -3607,7 +3716,6 @@ static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 mb_cmd;
u32 qos;
int ret;
@@ -3617,10 +3725,9 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
return;
}
- mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
- ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
+ ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num);
if (ret)
- dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
+ dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num);
}
static int qm_vf_read_qos(struct hisi_qm *qm)
@@ -4109,7 +4216,7 @@ stop_fail:
return ret;
}
-static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
+static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd,
enum qm_stop_reason stop_reason)
{
struct pci_dev *pdev = qm->pdev;
@@ -4122,7 +4229,7 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
ret = qm_ping_all_vfs(qm, cmd);
if (ret)
- pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
+ pci_err(pdev, "failed to send command to all VFs before PF reset!\n");
} else {
ret = qm_vf_reset_prepare(qm, stop_reason);
if (ret)
@@ -4137,6 +4244,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_reset_prepare_ready(qm);
if (ret) {
pci_err(pdev, "Controller reset not ready!\n");
@@ -4298,7 +4411,7 @@ restart_fail:
return ret;
}
-static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
+static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4527,7 +4640,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
* Check whether there is an ECC mbit error, If it occurs, need to
* wait for soft reset to fix it.
*/
- while (qm_check_dev_error(pf_qm)) {
+ while (qm_check_dev_error(qm)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return;
@@ -4675,7 +4788,7 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
- enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4709,7 +4822,7 @@ out:
static void qm_pf_reset_vf_done(struct hisi_qm *qm)
{
- enum qm_mb_cmd cmd = QM_VF_START_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_START_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4732,7 +4845,6 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
u32 val, cmd;
- u64 msg;
int ret;
/* Wait for reset to finish */
@@ -4749,16 +4861,15 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
* Whether message is got successfully,
* VF needs to ack PF by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, 0);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, 0);
qm_clear_cmd_interrupt(qm, 0);
if (ret) {
- dev_err(dev, "failed to get msg from PF in reset done!\n");
+ dev_err(dev, "failed to get command from PF in reset done!\n");
return ret;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
if (cmd != QM_PF_RESET_DONE) {
- dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
+ dev_err(dev, "the command(0x%x) is not reset done!\n", cmd);
ret = -EINVAL;
}
@@ -4795,22 +4906,21 @@ err_get_status:
static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 msg;
- u32 cmd;
+ enum qm_ifc_cmd cmd;
+ u32 data;
int ret;
/*
* Get the msg from source by sending mailbox. Whether message is got
* successfully, destination needs to ack source by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num);
qm_clear_cmd_interrupt(qm, BIT(fun_num));
if (ret) {
- dev_err(dev, "failed to get msg from source!\n");
+ dev_err(dev, "failed to get command from source!\n");
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
qm_pf_reset_vf_process(qm, QM_DOWN);
@@ -4822,10 +4932,10 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
qm_vf_get_qos(qm, fun_num);
break;
case QM_PF_SET_QOS:
- qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
+ qm->mb_qos = data;
break;
default:
- dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
+ dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num);
break;
}
}
@@ -5167,6 +5277,20 @@ static int qm_get_hw_caps(struct hisi_qm *qm)
return qm_pre_store_caps(qm);
}
+static void qm_get_version(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 sub_version_id;
+
+ qm->ver = pdev->revision;
+
+ if (pdev->revision == QM_HW_V3) {
+ sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID);
+ if (sub_version_id)
+ qm->ver = sub_version_id;
+ }
+}
+
static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5186,6 +5310,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
+ qm_get_version(qm);
+
ret = qm_get_hw_caps(qm);
if (ret)
goto err_ioremap;
@@ -5205,6 +5331,7 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
+ hisi_qm_pre_init(qm);
ret = qm_get_qp_num(qm);
if (ret)
goto err_db_ioremap;
@@ -5247,6 +5374,14 @@ static int qm_clear_device(struct hisi_qm *qm)
return ret;
}
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret) {
+ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ return ret;
+ }
+ }
+
return qm_reset_device(qm);
}
@@ -5461,8 +5596,6 @@ int hisi_qm_init(struct hisi_qm *qm)
struct device *dev = &pdev->dev;
int ret;
- hisi_qm_pre_init(qm);
-
ret = hisi_qm_pci_init(qm);
if (ret)
return ret;
@@ -5598,6 +5731,12 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
if (ret)
return ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_set_pf_mse(qm, false);
if (ret)
pci_err(pdev, "failed to disable MSE before suspending!\n");
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 356188bee6fb..4b9970230822 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -37,6 +37,7 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
+ bool fallback;
};
/* SEC request of Crypto */
@@ -90,9 +91,7 @@ struct sec_auth_ctx {
dma_addr_t a_key_dma;
u8 *a_key;
u8 a_key_len;
- u8 mac_len;
u8 a_alg;
- bool fallback;
struct crypto_shash *hash_tfm;
struct crypto_aead *fallback_aead_tfm;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index ae9ebbb4103d..66bc07da9eb6 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -948,15 +948,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
struct aead_request *aead_req = req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
- u8 *mac_out = req->out_mac;
struct scatterlist *sgl = aead_req->src;
+ u8 *mac_out = req->out_mac;
size_t copy_size;
off_t skip_size;
/* Copy input mac */
skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
- copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
- authsize, skip_size);
+ copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
if (unlikely(copy_size != authsize))
return -EINVAL;
@@ -1120,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- if (unlikely(a_ctx->fallback_aead_tfm))
- return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
-
- return 0;
+ return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
}
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
@@ -1139,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
- const enum sec_mac_len mac_len,
const enum sec_cmode c_mode)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1151,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
- ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
@@ -1162,13 +1156,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (unlikely(a_ctx->fallback_aead_tfm)) {
- ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
- if (ret)
- return ret;
- }
-
- return 0;
+ return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
}
ret = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -1187,10 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
- if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
- (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
+ if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
ret = -EINVAL;
- dev_err(dev, "MAC or AUTH key length error!\n");
+ dev_err(dev, "AUTH key length error!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "set sec fallback key err!\n");
goto bad_key;
}
@@ -1202,27 +1195,19 @@ bad_key:
}
-#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
-static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
- u32 keylen) \
-{ \
- return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
-}
-
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
- SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
- SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
- SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -1470,9 +1455,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *a_req = &req->aead_req;
- size_t authsize = ctx->a_ctx.mac_len;
+ struct sec_cipher_req *c_req = &req->c_req;
u32 data_size = aead_req->cryptlen;
u8 flage = 0;
u8 cm, cl;
@@ -1513,10 +1499,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- size_t authsize = crypto_aead_authsize(tfm);
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
@@ -1524,15 +1508,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
/*
* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
* the counter must set to 0x01
+ * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
*/
- ctx->a_ctx.mac_len = authsize;
- /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
set_aead_auth_iv(ctx, req);
- }
-
- /* GCM 12Byte Cipher_IV == Auth_IV */
- if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
- ctx->a_ctx.mac_len = authsize;
+ } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+ /* GCM 12Byte Cipher_IV == Auth_IV */
memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
}
}
@@ -1542,9 +1522,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
@@ -1568,9 +1550,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+ sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sqe3->a_key_addr = sqe3->c_key_addr;
@@ -1594,11 +1578,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg =
- cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+ sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)((ctx->a_key_len) /
@@ -1648,11 +1633,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->mac_len /
+ cpu_to_le32((u32)(authsize /
SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
@@ -1703,9 +1690,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *aead_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
- size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct aead_request *backlog_aead_req;
struct sec_req *backlog_req;
@@ -1718,10 +1705,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
if (!err && c_req->encrypt) {
struct scatterlist *sgl = a_req->dst;
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
- aead_req->out_mac,
- authsize, a_req->cryptlen +
- a_req->assoclen);
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
+ authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1929,8 +1914,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
- struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ const char *aead_name = alg->base.cra_name;
int ret;
ret = sec_aead_init(tfm);
@@ -1939,11 +1926,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
return ret;
}
- auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(auth_ctx->hash_tfm)) {
+ a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(a_ctx->hash_tfm)) {
dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
- return PTR_ERR(auth_ctx->hash_tfm);
+ return PTR_ERR(a_ctx->hash_tfm);
+ }
+
+ a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+ dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+ return PTR_ERR(a_ctx->fallback_aead_tfm);
}
return 0;
@@ -1953,6 +1949,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
crypto_free_shash(ctx->a_ctx.hash_tfm);
sec_aead_exit(tfm);
}
@@ -1979,7 +1976,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
sec_aead_exit(tfm);
return PTR_ERR(a_ctx->fallback_aead_tfm);
}
- a_ctx->fallback = false;
return 0;
}
@@ -2233,21 +2229,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
u8 c_mode = ctx->c_ctx.c_mode;
struct device *dev = ctx->dev;
int ret;
- if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(dev, "aead input spec error!\n");
+ /* Hardware does not handle cases where authsize is less than 4 bytes */
+ if (unlikely(sz < MIN_MAC_LEN)) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
- if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
- (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
- authsize & MAC_LEN_MASK)))) {
- dev_err(dev, "aead input mac length error!\n");
+ if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+ req->assoclen > SEC_MAX_AAD_LEN)) {
+ dev_err(dev, "aead input spec error!\n");
return -EINVAL;
}
@@ -2266,7 +2261,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (sreq->c_req.encrypt)
sreq->c_req.c_len = req->cryptlen;
else
- sreq->c_req.c_len = req->cryptlen - authsize;
+ sreq->c_req.c_len = req->cryptlen - sz;
if (c_mode == SEC_CMODE_CBC) {
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "aead crypto length error!\n");
@@ -2292,8 +2287,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (ctx->sec->qm.ver == QM_HW_V2) {
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
- req->cryptlen <= authsize))) {
- ctx->a_ctx.fallback = true;
+ req->cryptlen <= authsize))) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
}
@@ -2321,16 +2316,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
bool encrypt)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- struct device *dev = ctx->dev;
struct aead_request *subreq;
int ret;
- /* Kunpeng920 aead mode not support input 0 size */
- if (!a_ctx->fallback_aead_tfm) {
- dev_err(dev, "aead fallback tfm is NULL!\n");
- return -EINVAL;
- }
-
subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
if (!subreq)
return -ENOMEM;
@@ -2362,10 +2350,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->aead_req.fallback = false;
ret = sec_aead_param_check(ctx, req);
if (unlikely(ret)) {
- if (ctx->a_ctx.fallback)
+ if (req->aead_req.fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 27a0ee5ad913..04725b514382 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -23,17 +23,6 @@ enum sec_hash_alg {
SEC_A_HMAC_SHA512 = 0x15,
};
-enum sec_mac_len {
- SEC_HMAC_CCM_MAC = 16,
- SEC_HMAC_GCM_MAC = 16,
- SEC_SM3_MAC = 32,
- SEC_HMAC_SM3_MAC = 32,
- SEC_HMAC_MD5_MAC = 16,
- SEC_HMAC_SHA1_MAC = 20,
- SEC_HMAC_SHA256_MAC = 32,
- SEC_HMAC_SHA512_MAC = 64,
-};
-
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 8ec5333bb5aa..72cf48d1f3ab 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1097,6 +1097,17 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static bool sec_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1129,6 +1140,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
.get_err_result = sec_get_err_result,
+ .dev_is_abnormal = sec_dev_is_abnormal,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1180,7 +1192,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile
index a936f099ee22..13de020b77d6 100644
--- a/drivers/crypto/hisilicon/zip/Makefile
+++ b/drivers/crypto/hisilicon/zip/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
-hisi_zip-objs = zip_main.o zip_crypto.o
+hisi_zip-objs = zip_main.o zip_crypto.o dae_main.o
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
new file mode 100644
index 000000000000..6f22e4c36e49
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 HiSilicon Limited. */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uacce.h>
+#include "zip.h"
+
+/* memory */
+#define DAE_MEM_START_OFFSET 0x331040
+#define DAE_MEM_DONE_OFFSET 0x331044
+#define DAE_MEM_START_MASK 0x1
+#define DAE_MEM_DONE_MASK 0x1
+#define DAE_REG_RD_INTVRL_US 10
+#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
+
+#define DAE_ALG_NAME "hashagg"
+
+/* error */
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_MASK (BIT(0) | BIT(5))
+#define DAE_ERR_SOURCE_OFFSET 0x331C84
+#define DAE_ERR_STATUS_OFFSET 0x331C88
+#define DAE_ERR_CE_OFFSET 0x331CA0
+#define DAE_ERR_CE_MASK BIT(3)
+#define DAE_ERR_NFE_OFFSET 0x331CA4
+#define DAE_ERR_NFE_MASK 0x17
+#define DAE_ERR_FE_OFFSET 0x331CA8
+#define DAE_ERR_FE_MASK 0
+#define DAE_ECC_MBIT_MASK BIT(2)
+#define DAE_ECC_INFO_OFFSET 0x33400C
+#define DAE_ERR_SHUTDOWN_OFFSET 0x331CAC
+#define DAE_ERR_SHUTDOWN_MASK 0x17
+#define DAE_ERR_ENABLE_OFFSET 0x331C80
+#define DAE_ERR_ENABLE_MASK (DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
+#define DAE_AM_CTRL_GLOBAL_OFFSET 0x330000
+#define DAE_AM_RETURN_OFFSET 0x330150
+#define DAE_AM_RETURN_MASK 0x3
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_EN_MASK (BIT(0) | BIT(5))
+
+struct hisi_dae_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const struct hisi_dae_hw_error dae_hw_error[] = {
+ { .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
+ { .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
+ { .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
+ { .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
+};
+
+static inline bool dae_is_support(struct hisi_qm *qm)
+{
+ if (test_bit(QM_SUPPORT_DAE, &qm->caps))
+ return true;
+
+ return false;
+}
+
+int hisi_dae_set_user_domain(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_MEM_START_OFFSET);
+ val |= DAE_MEM_START_MASK;
+ writel(val, qm->io_base + DAE_MEM_START_OFFSET);
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
+ val & DAE_MEM_DONE_MASK,
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to init dae memory!\n");
+
+ return ret;
+}
+
+int hisi_dae_set_alg(struct hisi_qm *qm)
+{
+ size_t len;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ if (!qm->uacce)
+ return 0;
+
+ len = strlen(qm->uacce->algs);
+ /* A line break may be required */
+ if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ pci_err(qm->pdev, "algorithm name is too long!\n");
+ return -EINVAL;
+ }
+
+ if (len)
+ strcat((char *)qm->uacce->algs, "\n");
+
+ strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
+
+ return 0;
+}
+
+static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
+{
+ u32 axi_val, err_val;
+
+ axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+ if (enable) {
+ axi_val |= DAE_AXI_SHUTDOWN_MASK;
+ err_val = DAE_ERR_SHUTDOWN_MASK;
+ } else {
+ axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
+ err_val = 0;
+ }
+
+ writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
+}
+
+void hisi_dae_hw_error_enable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ /* clear dae hw error source if having */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+
+ /* configure error type */
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+ writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
+
+ hisi_dae_master_ooo_ctrl(qm, true);
+
+ /* enable dae hw error interrupts */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+}
+
+void hisi_dae_hw_error_disable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+ hisi_dae_master_ooo_ctrl(qm, false);
+}
+
+static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
+}
+
+static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+}
+
+static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
+static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
+{
+ const struct hisi_dae_hw_error *err = dae_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 ecc_info;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
+ err = &dae_hw_error[i];
+ if (!(err->int_msk & err_type))
+ continue;
+
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & DAE_ECC_MBIT_MASK) {
+ ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
+ dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
+ }
+ }
+}
+
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return ACC_ERR_NONE;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (!err_status)
+ return ACC_ERR_NONE;
+
+ hisi_dae_log_hw_error(qm, err_status);
+
+ if (err_status & DAE_ERR_NFE_MASK) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_dae_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hisi_dae_clear_hw_err_status(qm, err_status);
+
+ return ACC_ERR_RECOVERED;
+}
+
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return false;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (err_status & DAE_ERR_NFE_MASK)
+ return true;
+
+ return false;
+}
+
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+ val |= BIT(0);
+ writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
+ val, (val == DAE_AM_RETURN_MASK),
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
+
+ return ret;
+}
+
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (!dae_is_support(qm))
+ return;
+
+ val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+
+ writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+}
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 2fecf346c3c9..9fb2a9c01132 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -103,4 +103,12 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
+int hisi_dae_set_user_domain(struct hisi_qm *qm);
+int hisi_dae_set_alg(struct hisi_qm *qm);
+void hisi_dae_hw_error_disable(struct hisi_qm *qm);
+void hisi_dae_hw_error_enable(struct hisi_qm *qm);
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm);
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm);
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm);
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 9239b251c2d7..d8ba23b7cc7d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -582,7 +582,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
hisi_zip_enable_clock_gate(qm);
- return 0;
+ return hisi_dae_set_user_domain(qm);
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -631,6 +631,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
@@ -643,6 +645,8 @@ static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
+
+ hisi_dae_hw_error_disable(qm);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -1129,6 +1133,8 @@ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ hisi_dae_open_axi_master_ooo(qm);
}
static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
@@ -1147,8 +1153,11 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
{
+ enum acc_err_result zip_result = ACC_ERR_NONE;
+ enum acc_err_result dae_result;
u32 err_status;
+ /* Get device hardware new error status */
err_status = hisi_zip_get_hw_err_status(qm);
if (err_status) {
if (err_status & qm->err_info.ecc_2bits_mask)
@@ -1159,11 +1168,32 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
/* Disable the same error reporting until device is recovered. */
hisi_zip_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
+ } else {
+ hisi_zip_clear_hw_err_status(qm, err_status);
}
- hisi_zip_clear_hw_err_status(qm, err_status);
}
- return ACC_ERR_RECOVERED;
+ dae_result = hisi_dae_get_err_result(qm);
+
+ return (zip_result == ACC_ERR_NEED_RESET ||
+ dae_result == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
+}
+
+static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return hisi_dae_dev_is_abnormal(qm);
+}
+
+static int hisi_zip_set_priv_status(struct hisi_qm *qm)
+{
+ return hisi_dae_close_axi_master_ooo(qm);
}
static void hisi_zip_err_info_init(struct hisi_qm *qm)
@@ -1200,6 +1230,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
.get_err_result = hisi_zip_get_err_result,
+ .set_priv_status = hisi_zip_set_priv_status,
+ .dev_is_abnormal = hisi_zip_dev_is_abnormal,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1264,7 +1296,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1301,17 +1332,24 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
ret = zip_pre_store_cap_reg(qm);
if (ret) {
pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
- hisi_qm_uninit(qm);
- return ret;
+ goto err_qm_uninit;
}
alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
- hisi_qm_uninit(qm);
+ goto err_qm_uninit;
}
+ ret = hisi_dae_set_alg(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ return 0;
+
+err_qm_uninit:
+ hisi_qm_uninit(qm);
return ret;
}
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 9e557649e5d0..c3776b0de51d 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name)
async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async")) {
- async_mode = true;
+ async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async_irq")) {
async_mode = true;
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index 449c6d3ab2db..fcc0cf4df637 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
npe_id = npe_spec.args[0];
+ of_node_put(npe_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
&queue_spec);
@@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
recv_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
&queue_spec);
@@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
send_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
} else {
/*
* Hardcoded engine when using platform data, this goes away
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
deleted file mode 100644
index 9a67dbf340f4..000000000000
--- a/drivers/crypto/n2_asm.S
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* n2_asm.S: Hypervisor calls for NCS support.
- *
- * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
- */
-
-#include <linux/linkage.h>
-#include <asm/hypervisor.h>
-#include "n2_core.h"
-
- /* o0: queue type
- * o1: RA of queue
- * o2: num entries in queue
- * o3: address of queue handle return
- */
-ENTRY(sun4v_ncs_qconf)
- mov HV_FAST_NCS_QCONF, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o3]
- retl
- nop
-ENDPROC(sun4v_ncs_qconf)
-
- /* %o0: queue handle
- * %o1: address of queue type return
- * %o2: address of queue base address return
- * %o3: address of queue num entries return
- */
-ENTRY(sun4v_ncs_qinfo)
- mov %o1, %g1
- mov %o2, %g2
- mov %o3, %g3
- mov HV_FAST_NCS_QINFO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%g1]
- stx %o2, [%g2]
- stx %o3, [%g3]
- retl
- nop
-ENDPROC(sun4v_ncs_qinfo)
-
- /* %o0: queue handle
- * %o1: address of head offset return
- */
-ENTRY(sun4v_ncs_gethead)
- mov %o1, %o2
- mov HV_FAST_NCS_GETHEAD, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gethead)
-
- /* %o0: queue handle
- * %o1: address of tail offset return
- */
-ENTRY(sun4v_ncs_gettail)
- mov %o1, %o2
- mov HV_FAST_NCS_GETTAIL, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gettail)
-
- /* %o0: queue handle
- * %o1: new tail offset
- */
-ENTRY(sun4v_ncs_settail)
- mov HV_FAST_NCS_SETTAIL, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_settail)
-
- /* %o0: queue handle
- * %o1: address of devino return
- */
-ENTRY(sun4v_ncs_qhandle_to_devino)
- mov %o1, %o2
- mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_qhandle_to_devino)
-
- /* %o0: queue handle
- * %o1: new head offset
- */
-ENTRY(sun4v_ncs_sethead_marker)
- mov HV_FAST_NCS_SETHEAD_MARKER, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
deleted file mode 100644
index 14c302d2db79..000000000000
--- a/drivers/crypto/n2_core.c
+++ /dev/null
@@ -1,2168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
- *
- * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/cpumask.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/crypto.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-
-#include <asm/hypervisor.h>
-#include <asm/mdesc.h>
-
-#include "n2_core.h"
-
-#define DRV_MODULE_NAME "n2_crypto"
-#define DRV_MODULE_VERSION "0.2"
-#define DRV_MODULE_RELDATE "July 28, 2011"
-
-static const char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("Niagara2 Crypto driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
-
-#define N2_CRA_PRIORITY 200
-
-static DEFINE_MUTEX(spu_lock);
-
-struct spu_queue {
- cpumask_t sharing;
- unsigned long qhandle;
-
- spinlock_t lock;
- u8 q_type;
- void *q;
- unsigned long head;
- unsigned long tail;
- struct list_head jobs;
-
- unsigned long devino;
-
- char irq_name[32];
- unsigned int irq;
-
- struct list_head list;
-};
-
-struct spu_qreg {
- struct spu_queue *queue;
- unsigned long type;
-};
-
-static struct spu_queue **cpu_to_cwq;
-static struct spu_queue **cpu_to_mau;
-
-static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
-{
- if (q->q_type == HV_NCS_QTYPE_MAU) {
- off += MAU_ENTRY_SIZE;
- if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
- off = 0;
- } else {
- off += CWQ_ENTRY_SIZE;
- if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
- off = 0;
- }
- return off;
-}
-
-struct n2_request_common {
- struct list_head entry;
- unsigned int offset;
-};
-#define OFFSET_NOT_RUNNING (~(unsigned int)0)
-
-/* An async job request records the final tail value it used in
- * n2_request_common->offset, test to see if that offset is in
- * the range old_head, new_head, inclusive.
- */
-static inline bool job_finished(struct spu_queue *q, unsigned int offset,
- unsigned long old_head, unsigned long new_head)
-{
- if (old_head <= new_head) {
- if (offset > old_head && offset <= new_head)
- return true;
- } else {
- if (offset > old_head || offset <= new_head)
- return true;
- }
- return false;
-}
-
-/* When the HEAD marker is unequal to the actual HEAD, we get
- * a virtual device INO interrupt. We should process the
- * completed CWQ entries and adjust the HEAD marker to clear
- * the IRQ.
- */
-static irqreturn_t cwq_intr(int irq, void *dev_id)
-{
- unsigned long off, new_head, hv_ret;
- struct spu_queue *q = dev_id;
-
- pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- spin_lock(&q->lock);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
-
- pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), new_head, hv_ret);
-
- for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
- /* XXX ... XXX */
- }
-
- hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
- if (hv_ret == HV_EOK)
- q->head = new_head;
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mau_intr(int irq, void *dev_id)
-{
- struct spu_queue *q = dev_id;
- unsigned long head, hv_ret;
-
- spin_lock(&q->lock);
-
- pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
-
- pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), head, hv_ret);
-
- sun4v_ncs_sethead_marker(q->qhandle, head);
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static void *spu_queue_next(struct spu_queue *q, void *cur)
-{
- return q->q + spu_next_offset(q, cur - q->q);
-}
-
-static int spu_queue_num_free(struct spu_queue *q)
-{
- unsigned long head = q->head;
- unsigned long tail = q->tail;
- unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
- unsigned long diff;
-
- if (head > tail)
- diff = head - tail;
- else
- diff = (end - tail) + head;
-
- return (diff / CWQ_ENTRY_SIZE) - 1;
-}
-
-static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
-{
- int avail = spu_queue_num_free(q);
-
- if (avail >= num_entries)
- return q->q + q->tail;
-
- return NULL;
-}
-
-static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
-{
- unsigned long hv_ret, new_tail;
-
- new_tail = spu_next_offset(q, last - q->q);
-
- hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
- if (hv_ret == HV_EOK)
- q->tail = new_tail;
- return hv_ret;
-}
-
-static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
- int enc_type, int auth_type,
- unsigned int hash_len,
- bool sfas, bool sob, bool eob, bool encrypt,
- int opcode)
-{
- u64 word = (len - 1) & CONTROL_LEN;
-
- word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
- word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
- word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
- if (sfas)
- word |= CONTROL_STORE_FINAL_AUTH_STATE;
- if (sob)
- word |= CONTROL_START_OF_BLOCK;
- if (eob)
- word |= CONTROL_END_OF_BLOCK;
- if (encrypt)
- word |= CONTROL_ENCRYPT;
- if (hmac_key_len)
- word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
- if (hash_len)
- word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
-
- return word;
-}
-
-#if 0
-static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
-{
- if (this_len >= 64 ||
- qp->head != qp->tail)
- return true;
- return false;
-}
-#endif
-
-struct n2_ahash_alg {
- struct list_head entry;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 auth_type;
- u8 hmac_type;
- struct ahash_alg alg;
-};
-
-static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_ahash_alg, alg);
-}
-
-struct n2_hmac_alg {
- const char *child_alg;
- struct n2_ahash_alg derived;
-};
-
-static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
-}
-
-struct n2_hash_ctx {
- struct crypto_ahash *fallback_tfm;
-};
-
-#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
-
-struct n2_hmac_ctx {
- struct n2_hash_ctx base;
-
- struct crypto_shash *child_shash;
-
- int hash_key_len;
- unsigned char hash_key[N2_HASH_KEY_MAX];
-};
-
-struct n2_hash_req_ctx {
- union {
- struct md5_state md5;
- struct sha1_state sha1;
- struct sha256_state sha256;
- } u;
-
- struct ahash_request fallback_req;
-};
-
-static int n2_hash_async_init(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- return crypto_ahash_init(&rctx->fallback_req);
-}
-
-static int n2_hash_async_update(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
-
- return crypto_ahash_update(&rctx->fallback_req);
-}
-
-static int n2_hash_async_final(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_final(&rctx->fallback_req);
-}
-
-static int n2_hash_async_finup(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_finup(&rctx->fallback_req);
-}
-
-static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_async_noexport(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct crypto_ahash *fallback_tfm;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->fallback_tfm = fallback_tfm;
- return 0;
-
-out:
- return err;
-}
-
-static void n2_hash_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->fallback_tfm);
-}
-
-static int n2_hmac_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
- struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
- struct crypto_ahash *fallback_tfm;
- struct crypto_shash *child_shash;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
- if (IS_ERR(child_shash)) {
- pr_warn("Child shash '%s' could not be loaded!\n",
- n2alg->child_alg);
- err = PTR_ERR(child_shash);
- goto out_free_fallback;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->child_shash = child_shash;
- ctx->base.fallback_tfm = fallback_tfm;
- return 0;
-
-out_free_fallback:
- crypto_free_ahash(fallback_tfm);
-
-out:
- return err;
-}
-
-static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->base.fallback_tfm);
- crypto_free_shash(ctx->child_shash);
-}
-
-static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct crypto_shash *child_shash = ctx->child_shash;
- struct crypto_ahash *fallback_tfm;
- int err, bs, ds;
-
- fallback_tfm = ctx->base.fallback_tfm;
- err = crypto_ahash_setkey(fallback_tfm, key, keylen);
- if (err)
- return err;
-
- bs = crypto_shash_blocksize(child_shash);
- ds = crypto_shash_digestsize(child_shash);
- BUG_ON(ds > N2_HASH_KEY_MAX);
- if (keylen > bs) {
- err = crypto_shash_tfm_digest(child_shash, key, keylen,
- ctx->hash_key);
- if (err)
- return err;
- keylen = ds;
- } else if (keylen <= N2_HASH_KEY_MAX)
- memcpy(ctx->hash_key, key, keylen);
-
- ctx->hash_key_len = keylen;
-
- return err;
-}
-
-static unsigned long wait_for_tail(struct spu_queue *qp)
-{
- unsigned long head, hv_ret;
-
- do {
- hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
- if (hv_ret != HV_EOK) {
- pr_err("Hypervisor error on gethead\n");
- break;
- }
- if (head == qp->tail) {
- qp->head = head;
- break;
- }
- } while (1);
- return hv_ret;
-}
-
-static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
- struct cwq_initial_entry *ent)
-{
- unsigned long hv_ret = spu_queue_submit(qp, ent);
-
- if (hv_ret == HV_EOK)
- hv_ret = wait_for_tail(qp);
-
- return hv_ret;
-}
-
-static int n2_do_async_digest(struct ahash_request *req,
- unsigned int auth_type, unsigned int digest_size,
- unsigned int result_size, void *hash_loc,
- unsigned long auth_key, unsigned int auth_key_len)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cwq_initial_entry *ent;
- struct crypto_hash_walk walk;
- struct spu_queue *qp;
- unsigned long flags;
- int err = -ENODEV;
- int nbytes, cpu;
-
- /* The total effective length of the operation may not
- * exceed 2^16.
- */
- if (unlikely(req->nbytes > (1 << 16))) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
-
- nbytes = crypto_hash_walk_first(req, &walk);
-
- cpu = get_cpu();
- qp = cpu_to_cwq[cpu];
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- /* XXX can do better, improve this later by doing a by-hand scatterlist
- * XXX walk, etc.
- */
- ent = qp->q + qp->tail;
-
- ent->control = control_word_base(nbytes, auth_key_len, 0,
- auth_type, digest_size,
- false, true, false, false,
- OPCODE_INPLACE_BIT |
- OPCODE_AUTH_MAC);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = auth_key;
- ent->auth_iv_addr = __pa(hash_loc);
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = __pa(hash_loc);
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- while (nbytes > 0) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = (nbytes - 1);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
- err = -EINVAL;
- else
- err = 0;
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
- if (!err)
- memcpy(req->result, hash_loc, result_size);
-out:
- put_cpu();
-
- return err;
-}
-
-static int n2_hash_async_digest(struct ahash_request *req)
-{
- struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- int ds;
-
- ds = n2alg->digest_size;
- if (unlikely(req->nbytes == 0)) {
- memcpy(req->result, n2alg->hash_zero, ds);
- return 0;
- }
- memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->auth_type,
- n2alg->hw_op_hashsz, ds,
- &rctx->u, 0UL, 0);
-}
-
-static int n2_hmac_async_digest(struct ahash_request *req)
-{
- struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- int ds;
-
- ds = n2alg->derived.digest_size;
- if (unlikely(req->nbytes == 0) ||
- unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
- memcpy(&rctx->u, n2alg->derived.hash_init,
- n2alg->derived.hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->derived.hmac_type,
- n2alg->derived.hw_op_hashsz, ds,
- &rctx->u,
- __pa(&ctx->hash_key),
- ctx->hash_key_len);
-}
-
-struct n2_skcipher_context {
- int key_len;
- int enc_type;
- union {
- u8 aes[AES_MAX_KEY_SIZE];
- u8 des[DES_KEY_SIZE];
- u8 des3[3 * DES_KEY_SIZE];
- } key;
-};
-
-#define N2_CHUNK_ARR_LEN 16
-
-struct n2_crypto_chunk {
- struct list_head entry;
- unsigned long iv_paddr : 44;
- unsigned long arr_len : 20;
- unsigned long dest_paddr;
- unsigned long dest_final;
- struct {
- unsigned long src_paddr : 44;
- unsigned long src_len : 20;
- } arr[N2_CHUNK_ARR_LEN];
-};
-
-struct n2_request_context {
- struct skcipher_walk walk;
- struct list_head chunk_list;
- struct n2_crypto_chunk chunk;
- u8 temp_iv[16];
-};
-
-/* The SPU allows some level of flexibility for partial cipher blocks
- * being specified in a descriptor.
- *
- * It merely requires that every descriptor's length field is at least
- * as large as the cipher block size. This means that a cipher block
- * can span at most 2 descriptors. However, this does not allow a
- * partial block to span into the final descriptor as that would
- * violate the rule (since every descriptor's length must be at lest
- * the block size). So, for example, assuming an 8 byte block size:
- *
- * 0xe --> 0xa --> 0x8
- *
- * is a valid length sequence, whereas:
- *
- * 0xe --> 0xb --> 0x7
- *
- * is not a valid sequence.
- */
-
-struct n2_skcipher_alg {
- struct list_head entry;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
-{
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-
- return container_of(alg, struct n2_skcipher_alg, skcipher);
-}
-
-static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
-
- ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->enc_type |= ENC_TYPE_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- ctx->enc_type |= ENC_TYPE_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- ctx->enc_type |= ENC_TYPE_ALG_AES256;
- break;
- default:
- return -EINVAL;
- }
-
- ctx->key_len = keylen;
- memcpy(ctx->key.aes, key, keylen);
- return 0;
-}
-
-static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des, key, keylen);
- return 0;
-}
-
-static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des3_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des3, key, keylen);
- return 0;
-}
-
-static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
-{
- int this_len = nbytes;
-
- this_len -= (nbytes & (block_size - 1));
- return this_len > (1 << 16) ? (1 << 16) : this_len;
-}
-
-static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
- struct n2_crypto_chunk *cp,
- struct spu_queue *qp, bool encrypt)
-{
- struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
- struct cwq_initial_entry *ent;
- bool in_place;
- int i;
-
- ent = spu_queue_alloc(qp, cp->arr_len);
- if (!ent) {
- pr_info("queue_alloc() of %d fails\n",
- cp->arr_len);
- return -EBUSY;
- }
-
- in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
-
- ent->control = control_word_base(cp->arr[0].src_len,
- 0, ctx->enc_type, 0, 0,
- false, true, false, encrypt,
- OPCODE_ENCRYPT |
- (in_place ? OPCODE_INPLACE_BIT : 0));
- ent->src_addr = cp->arr[0].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = __pa(&ctx->key);
- ent->enc_iv_addr = cp->iv_paddr;
- ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
-
- for (i = 1; i < cp->arr_len; i++) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = cp->arr[i].src_len - 1;
- ent->src_addr = cp->arr[i].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
-}
-
-static int n2_compute_chunks(struct skcipher_request *req)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct skcipher_walk *walk = &rctx->walk;
- struct n2_crypto_chunk *chunk;
- unsigned long dest_prev;
- unsigned int tot_len;
- bool prev_in_place;
- int err, nbytes;
-
- err = skcipher_walk_async(walk, req);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&rctx->chunk_list);
-
- chunk = &rctx->chunk;
- INIT_LIST_HEAD(&chunk->entry);
-
- chunk->iv_paddr = 0UL;
- chunk->arr_len = 0;
- chunk->dest_paddr = 0UL;
-
- prev_in_place = false;
- dest_prev = ~0UL;
- tot_len = 0;
-
- while ((nbytes = walk->nbytes) != 0) {
- unsigned long dest_paddr, src_paddr;
- bool in_place;
- int this_len;
-
- src_paddr = (page_to_phys(walk->src.phys.page) +
- walk->src.phys.offset);
- dest_paddr = (page_to_phys(walk->dst.phys.page) +
- walk->dst.phys.offset);
- in_place = (src_paddr == dest_paddr);
- this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
-
- if (chunk->arr_len != 0) {
- if (in_place != prev_in_place ||
- (!prev_in_place &&
- dest_paddr != dest_prev) ||
- chunk->arr_len == N2_CHUNK_ARR_LEN ||
- tot_len + this_len > (1 << 16)) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry,
- &rctx->chunk_list);
- chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
- if (!chunk) {
- err = -ENOMEM;
- break;
- }
- INIT_LIST_HEAD(&chunk->entry);
- }
- }
- if (chunk->arr_len == 0) {
- chunk->dest_paddr = dest_paddr;
- tot_len = 0;
- }
- chunk->arr[chunk->arr_len].src_paddr = src_paddr;
- chunk->arr[chunk->arr_len].src_len = this_len;
- chunk->arr_len++;
-
- dest_prev = dest_paddr + this_len;
- prev_in_place = in_place;
- tot_len += this_len;
-
- err = skcipher_walk_done(walk, nbytes - this_len);
- if (err)
- break;
- }
- if (!err && chunk->arr_len != 0) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry, &rctx->chunk_list);
- }
-
- return err;
-}
-
-static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct n2_crypto_chunk *c, *tmp;
-
- if (final_iv)
- memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
-
-}
-
-static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- unsigned long flags, hv_ret;
- struct spu_queue *qp;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- err = __n2_crypt_chunk(tfm, c, qp, encrypt);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, NULL);
- return err;
-}
-
-static int n2_encrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, true);
-}
-
-static int n2_decrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, false);
-}
-
-static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned long flags, hv_ret, iv_paddr;
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- struct spu_queue *qp;
- void *final_iv_addr;
-
- final_iv_addr = NULL;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- if (encrypt) {
- iv_paddr = __pa(rctx->walk.iv);
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
- entry) {
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, true);
- if (err)
- break;
- iv_paddr = c->dest_final - rctx->walk.blocksize;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- final_iv_addr = __va(iv_paddr);
- } else {
- list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
- entry) {
- if (c == &rctx->chunk) {
- iv_paddr = __pa(rctx->walk.iv);
- } else {
- iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
- tmp->arr[tmp->arr_len-1].src_len -
- rctx->walk.blocksize);
- }
- if (!final_iv_addr) {
- unsigned long pa;
-
- pa = (c->arr[c->arr_len-1].src_paddr +
- c->arr[c->arr_len-1].src_len -
- rctx->walk.blocksize);
- final_iv_addr = rctx->temp_iv;
- memcpy(rctx->temp_iv, __va(pa),
- rctx->walk.blocksize);
- }
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, false);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, err ? NULL : final_iv_addr);
- return err;
-}
-
-static int n2_encrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, true);
-}
-
-static int n2_decrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, false);
-}
-
-struct n2_skcipher_tmpl {
- const char *name;
- const char *drv_name;
- u8 block_size;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
- /* DES: ECB CBC and CFB are supported */
- { .name = "ecb(des)",
- .drv_name = "ecb-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des)",
- .drv_name = "cbc-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* 3DES: ECB CBC and CFB are supported */
- { .name = "ecb(des3_ede)",
- .drv_name = "ecb-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des3_ede)",
- .drv_name = "cbc-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* AES: ECB CBC and CTR are supported */
- { .name = "ecb(aes)",
- .drv_name = "ecb-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(aes)",
- .drv_name = "cbc-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
- { .name = "ctr(aes)",
- .drv_name = "ctr-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_COUNTER),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_encrypt_chaining,
- },
- },
-
-};
-#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
-
-static LIST_HEAD(skcipher_algs);
-
-struct n2_hash_tmpl {
- const char *name;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 statesize;
- u8 block_size;
- u8 auth_type;
- u8 hmac_type;
-};
-
-static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
- cpu_to_le32(MD5_H0),
- cpu_to_le32(MD5_H1),
- cpu_to_le32(MD5_H2),
- cpu_to_le32(MD5_H3),
-};
-static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
- SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
-};
-static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
- SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
-};
-static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
- SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
-};
-
-static const struct n2_hash_tmpl hash_tmpls[] = {
- { .name = "md5",
- .hash_zero = md5_zero_message_hash,
- .hash_init = (u8 *)n2_md5_init,
- .auth_type = AUTH_TYPE_MD5,
- .hmac_type = AUTH_TYPE_HMAC_MD5,
- .hw_op_hashsz = MD5_DIGEST_SIZE,
- .digest_size = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct md5_state),
- .block_size = MD5_HMAC_BLOCK_SIZE },
- { .name = "sha1",
- .hash_zero = sha1_zero_message_hash,
- .hash_init = (u8 *)n2_sha1_init,
- .auth_type = AUTH_TYPE_SHA1,
- .hmac_type = AUTH_TYPE_HMAC_SHA1,
- .hw_op_hashsz = SHA1_DIGEST_SIZE,
- .digest_size = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_state),
- .block_size = SHA1_BLOCK_SIZE },
- { .name = "sha256",
- .hash_zero = sha256_zero_message_hash,
- .hash_init = (u8 *)n2_sha256_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_HMAC_SHA256,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA256_BLOCK_SIZE },
- { .name = "sha224",
- .hash_zero = sha224_zero_message_hash,
- .hash_init = (u8 *)n2_sha224_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_RESERVED,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA224_BLOCK_SIZE },
-};
-#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
-
-static LIST_HEAD(ahash_algs);
-static LIST_HEAD(hmac_algs);
-
-static int algs_registered;
-
-static void __n2_unregister_algs(void)
-{
- struct n2_skcipher_alg *skcipher, *skcipher_tmp;
- struct n2_ahash_alg *alg, *alg_tmp;
- struct n2_hmac_alg *hmac, *hmac_tmp;
-
- list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
- crypto_unregister_skcipher(&skcipher->skcipher);
- list_del(&skcipher->entry);
- kfree(skcipher);
- }
- list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
- crypto_unregister_ahash(&hmac->derived.alg);
- list_del(&hmac->derived.entry);
- kfree(hmac);
- }
- list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
- crypto_unregister_ahash(&alg->alg);
- list_del(&alg->entry);
- kfree(alg);
- }
-}
-
-static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
-{
- crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
- return 0;
-}
-
-static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
-{
- struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct skcipher_alg *alg;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- alg = &p->skcipher;
- *alg = tmpl->skcipher;
-
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
- alg->base.cra_priority = N2_CRA_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY;
- alg->base.cra_blocksize = tmpl->block_size;
- p->enc_type = tmpl->enc_type;
- alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
- alg->base.cra_module = THIS_MODULE;
- alg->init = n2_skcipher_init_tfm;
-
- list_add(&p->entry, &skcipher_algs);
- err = crypto_register_skcipher(alg);
- if (err) {
- pr_err("%s alg registration failed\n", alg->base.cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", alg->base.cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
-{
- struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct ahash_alg *ahash;
- struct crypto_alg *base;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->child_alg = n2ahash->alg.halg.base.cra_name;
- memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
- INIT_LIST_HEAD(&p->derived.entry);
-
- ahash = &p->derived.alg;
- ahash->digest = n2_hmac_async_digest;
- ahash->setkey = n2_hmac_async_setkey;
-
- base = &ahash->halg.base;
- err = -EINVAL;
- if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
- if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
-
- base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
- base->cra_init = n2_hmac_cra_init;
- base->cra_exit = n2_hmac_cra_exit;
-
- list_add(&p->derived.entry, &hmac_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->derived.entry);
-out_free_p:
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
-{
- struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct hash_alg_common *halg;
- struct crypto_alg *base;
- struct ahash_alg *ahash;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->hash_zero = tmpl->hash_zero;
- p->hash_init = tmpl->hash_init;
- p->auth_type = tmpl->auth_type;
- p->hmac_type = tmpl->hmac_type;
- p->hw_op_hashsz = tmpl->hw_op_hashsz;
- p->digest_size = tmpl->digest_size;
-
- ahash = &p->alg;
- ahash->init = n2_hash_async_init;
- ahash->update = n2_hash_async_update;
- ahash->final = n2_hash_async_final;
- ahash->finup = n2_hash_async_finup;
- ahash->digest = n2_hash_async_digest;
- ahash->export = n2_hash_async_noexport;
- ahash->import = n2_hash_async_noimport;
-
- halg = &ahash->halg;
- halg->digestsize = tmpl->digest_size;
- halg->statesize = tmpl->statesize;
-
- base = &halg->base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
- base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- base->cra_blocksize = tmpl->block_size;
- base->cra_ctxsize = sizeof(struct n2_hash_ctx);
- base->cra_module = THIS_MODULE;
- base->cra_init = n2_hash_cra_init;
- base->cra_exit = n2_hash_cra_exit;
-
- list_add(&p->entry, &ahash_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
- err = __n2_register_one_hmac(p);
- return err;
-}
-
-static int n2_register_algs(void)
-{
- int i, err = 0;
-
- mutex_lock(&spu_lock);
- if (algs_registered++)
- goto out;
-
- for (i = 0; i < NUM_HASH_TMPLS; i++) {
- err = __n2_register_one_ahash(&hash_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
- for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
- err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
-
-out:
- mutex_unlock(&spu_lock);
- return err;
-}
-
-static void n2_unregister_algs(void)
-{
- mutex_lock(&spu_lock);
- if (!--algs_registered)
- __n2_unregister_algs();
- mutex_unlock(&spu_lock);
-}
-
-/* To map CWQ queues to interrupt sources, the hypervisor API provides
- * a devino. This isn't very useful to us because all of the
- * interrupts listed in the device_node have been translated to
- * Linux virtual IRQ cookie numbers.
- *
- * So we have to back-translate, going through the 'intr' and 'ino'
- * property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to figure out which
- * devino goes to which already-translated IRQ.
- */
-static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
- unsigned long dev_ino)
-{
- const unsigned int *dev_intrs;
- unsigned int intr;
- int i;
-
- for (i = 0; i < ip->num_intrs; i++) {
- if (ip->ino_table[i].ino == dev_ino)
- break;
- }
- if (i == ip->num_intrs)
- return -ENODEV;
-
- intr = ip->ino_table[i].intr;
-
- dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
- if (!dev_intrs)
- return -ENODEV;
-
- for (i = 0; i < dev->archdata.num_irqs; i++) {
- if (dev_intrs[i] == intr)
- return i;
- }
-
- return -ENODEV;
-}
-
-static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
- const char *irq_name, struct spu_queue *p,
- irq_handler_t handler)
-{
- unsigned long herr;
- int index;
-
- herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
- if (herr)
- return -EINVAL;
-
- index = find_devino_index(dev, ip, p->devino);
- if (index < 0)
- return index;
-
- p->irq = dev->archdata.irqs[index];
-
- sprintf(p->irq_name, "%s-%d", irq_name, index);
-
- return request_irq(p->irq, handler, 0, p->irq_name, p);
-}
-
-static struct kmem_cache *queue_cache[2];
-
-static void *new_queue(unsigned long q_type)
-{
- return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
-}
-
-static void free_queue(void *p, unsigned long q_type)
-{
- kmem_cache_free(queue_cache[q_type - 1], p);
-}
-
-static int queue_cache_init(void)
-{
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- queue_cache[HV_NCS_QTYPE_MAU - 1] =
- kmem_cache_create("mau_queue",
- (MAU_NUM_ENTRIES *
- MAU_ENTRY_SIZE),
- MAU_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- return -ENOMEM;
-
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
- queue_cache[HV_NCS_QTYPE_CWQ - 1] =
- kmem_cache_create("cwq_queue",
- (CWQ_NUM_ENTRIES *
- CWQ_ENTRY_SIZE),
- CWQ_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-static void queue_cache_destroy(void)
-{
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
-}
-
-static long spu_queue_register_workfn(void *arg)
-{
- struct spu_qreg *qr = arg;
- struct spu_queue *p = qr->queue;
- unsigned long q_type = qr->type;
- unsigned long hv_ret;
-
- hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
- CWQ_NUM_ENTRIES, &p->qhandle);
- if (!hv_ret)
- sun4v_ncs_sethead_marker(p->qhandle, 0);
-
- return hv_ret ? -EINVAL : 0;
-}
-
-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
-{
- int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
- struct spu_qreg qr = { .queue = p, .type = q_type };
-
- return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
-}
-
-static int spu_queue_setup(struct spu_queue *p)
-{
- int err;
-
- p->q = new_queue(p->q_type);
- if (!p->q)
- return -ENOMEM;
-
- err = spu_queue_register(p, p->q_type);
- if (err) {
- free_queue(p->q, p->q_type);
- p->q = NULL;
- }
-
- return err;
-}
-
-static void spu_queue_destroy(struct spu_queue *p)
-{
- unsigned long hv_ret;
-
- if (!p->q)
- return;
-
- hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
-
- if (!hv_ret)
- free_queue(p->q, p->q_type);
-}
-
-static void spu_list_destroy(struct list_head *list)
-{
- struct spu_queue *p, *n;
-
- list_for_each_entry_safe(p, n, list, list) {
- int i;
-
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_to_cwq[i] == p)
- cpu_to_cwq[i] = NULL;
- }
-
- if (p->irq) {
- free_irq(p->irq, p);
- p->irq = 0;
- }
- spu_queue_destroy(p);
- list_del(&p->list);
- kfree(p);
- }
-}
-
-/* Walk the backward arcs of a CWQ 'exec-unit' node,
- * gathering cpu membership information.
- */
-static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- u64 node, struct spu_queue *p,
- struct spu_queue **table)
-{
- u64 arc;
-
- mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
- u64 tgt = mdesc_arc_target(mdesc, arc);
- const char *name = mdesc_node_name(mdesc, tgt);
- const u64 *id;
-
- if (strcmp(name, "cpu"))
- continue;
- id = mdesc_get_property(mdesc, tgt, "id", NULL);
- if (table[*id] != NULL) {
- dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
- dev->dev.of_node);
- return -EINVAL;
- }
- cpumask_set_cpu(*id, &p->sharing);
- table[*id] = p;
- }
- return 0;
-}
-
-/* Process an 'exec-unit' MDESC node of type 'cwq'. */
-static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
- struct platform_device *dev, struct mdesc_handle *mdesc,
- u64 node, const char *iname, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- struct spu_queue *p;
- int err;
-
- p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
- if (!p) {
- dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- cpumask_clear(&p->sharing);
- spin_lock_init(&p->lock);
- p->q_type = q_type;
- INIT_LIST_HEAD(&p->jobs);
- list_add(&p->list, list);
-
- err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
- if (err)
- return err;
-
- err = spu_queue_setup(p);
- if (err)
- return err;
-
- return spu_map_ino(dev, ip, iname, p, handler);
-}
-
-static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
- struct spu_mdesc_info *ip, struct list_head *list,
- const char *exec_name, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- int err = 0;
- u64 node;
-
- mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
- const char *type;
-
- type = mdesc_get_property(mdesc, node, "type", NULL);
- if (!type || strcmp(type, exec_name))
- continue;
-
- err = handle_exec_unit(ip, list, dev, mdesc, node,
- exec_name, q_type, handler, table);
- if (err) {
- spu_list_destroy(list);
- break;
- }
- }
-
- return err;
-}
-
-static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
- struct spu_mdesc_info *ip)
-{
- const u64 *ino;
- int ino_len;
- int i;
-
- ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!ino) {
- printk("NO 'ino'\n");
- return -ENODEV;
- }
-
- ip->num_intrs = ino_len / sizeof(u64);
- ip->ino_table = kzalloc((sizeof(struct ino_blob) *
- ip->num_intrs),
- GFP_KERNEL);
- if (!ip->ino_table)
- return -ENOMEM;
-
- for (i = 0; i < ip->num_intrs; i++) {
- struct ino_blob *b = &ip->ino_table[i];
- b->intr = i + 1;
- b->ino = ino[i];
- }
-
- return 0;
-}
-
-static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- struct spu_mdesc_info *ip,
- const char *node_name)
-{
- u64 node, reg;
-
- if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0)
- return -ENODEV;
-
- mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
- const char *name;
- const u64 *chdl;
-
- name = mdesc_get_property(mdesc, node, "name", NULL);
- if (!name || strcmp(name, node_name))
- continue;
- chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
- if (!chdl || (*chdl != reg))
- continue;
- ip->cfg_handle = *chdl;
- return get_irq_props(mdesc, node, ip);
- }
-
- return -ENODEV;
-}
-
-static unsigned long n2_spu_hvapi_major;
-static unsigned long n2_spu_hvapi_minor;
-
-static int n2_spu_hvapi_register(void)
-{
- int err;
-
- n2_spu_hvapi_major = 2;
- n2_spu_hvapi_minor = 0;
-
- err = sun4v_hvapi_register(HV_GRP_NCS,
- n2_spu_hvapi_major,
- &n2_spu_hvapi_minor);
-
- if (!err)
- pr_info("Registered NCS HVAPI version %lu.%lu\n",
- n2_spu_hvapi_major,
- n2_spu_hvapi_minor);
-
- return err;
-}
-
-static void n2_spu_hvapi_unregister(void)
-{
- sun4v_hvapi_unregister(HV_GRP_NCS);
-}
-
-static int global_ref;
-
-static int grab_global_resources(void)
-{
- int err = 0;
-
- mutex_lock(&spu_lock);
-
- if (global_ref++)
- goto out;
-
- err = n2_spu_hvapi_register();
- if (err)
- goto out;
-
- err = queue_cache_init();
- if (err)
- goto out_hvapi_release;
-
- err = -ENOMEM;
- cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_cwq)
- goto out_queue_cache_destroy;
-
- cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_mau)
- goto out_free_cwq_table;
-
- err = 0;
-
-out:
- if (err)
- global_ref--;
- mutex_unlock(&spu_lock);
- return err;
-
-out_free_cwq_table:
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
-out_queue_cache_destroy:
- queue_cache_destroy();
-
-out_hvapi_release:
- n2_spu_hvapi_unregister();
- goto out;
-}
-
-static void release_global_resources(void)
-{
- mutex_lock(&spu_lock);
- if (!--global_ref) {
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
- kfree(cpu_to_mau);
- cpu_to_mau = NULL;
-
- queue_cache_destroy();
- n2_spu_hvapi_unregister();
- }
- mutex_unlock(&spu_lock);
-}
-
-static struct n2_crypto *alloc_n2cp(void)
-{
- struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
-
- if (np)
- INIT_LIST_HEAD(&np->cwq_list);
-
- return np;
-}
-
-static void free_n2cp(struct n2_crypto *np)
-{
- kfree(np->cwq_info.ino_table);
- np->cwq_info.ino_table = NULL;
-
- kfree(np);
-}
-
-static void n2_spu_driver_version(void)
-{
- static int n2_spu_version_printed;
-
- if (n2_spu_version_printed++ == 0)
- pr_info("%s", version);
-}
-
-static int n2_crypto_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_crypto *np;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
-
- np = alloc_n2cp();
- if (!np) {
- dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_n2cp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
- err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
- "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
- cpu_to_cwq);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- err = n2_register_algs();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
- dev->dev.of_node);
- goto out_free_spu_list;
- }
-
- dev_set_drvdata(&dev->dev, np);
-
- return 0;
-
-out_free_spu_list:
- spu_list_destroy(&np->cwq_list);
-
-out_free_global:
- release_global_resources();
-
-out_free_n2cp:
- free_n2cp(np);
-
- return err;
-}
-
-static void n2_crypto_remove(struct platform_device *dev)
-{
- struct n2_crypto *np = dev_get_drvdata(&dev->dev);
-
- n2_unregister_algs();
-
- spu_list_destroy(&np->cwq_list);
-
- release_global_resources();
-
- free_n2cp(np);
-}
-
-static struct n2_mau *alloc_ncp(void)
-{
- struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
-
- if (mp)
- INIT_LIST_HEAD(&mp->mau_list);
-
- return mp;
-}
-
-static void free_ncp(struct n2_mau *mp)
-{
- kfree(mp->mau_info.ino_table);
- mp->mau_info.ino_table = NULL;
-
- kfree(mp);
-}
-
-static int n2_mau_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_mau *mp;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found NCP at %pOF\n", dev->dev.of_node);
-
- mp = alloc_ncp();
- if (!mp) {
- dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_ncp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
-
- err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
- "mau", HV_NCS_QTYPE_MAU, mau_intr,
- cpu_to_mau);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- dev_set_drvdata(&dev->dev, mp);
-
- return 0;
-
-out_free_global:
- release_global_resources();
-
-out_free_ncp:
- free_ncp(mp);
-
- return err;
-}
-
-static void n2_mau_remove(struct platform_device *dev)
-{
- struct n2_mau *mp = dev_get_drvdata(&dev->dev);
-
- spu_list_destroy(&mp->mau_list);
-
- release_global_resources();
-
- free_ncp(mp);
-}
-
-static const struct of_device_id n2_crypto_match[] = {
- {
- .name = "n2cp",
- .compatible = "SUNW,n2-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,vf-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,kt-cwq",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_crypto_match);
-
-static struct platform_driver n2_crypto_driver = {
- .driver = {
- .name = "n2cp",
- .of_match_table = n2_crypto_match,
- },
- .probe = n2_crypto_probe,
- .remove = n2_crypto_remove,
-};
-
-static const struct of_device_id n2_mau_match[] = {
- {
- .name = "ncp",
- .compatible = "SUNW,n2-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,vf-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,kt-mau",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_mau_match);
-
-static struct platform_driver n2_mau_driver = {
- .driver = {
- .name = "ncp",
- .of_match_table = n2_mau_match,
- },
- .probe = n2_mau_probe,
- .remove = n2_mau_remove,
-};
-
-static struct platform_driver * const drivers[] = {
- &n2_crypto_driver,
- &n2_mau_driver,
-};
-
-static int __init n2_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-static void __exit n2_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-module_init(n2_init);
-module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
deleted file mode 100644
index 2406763b0306..000000000000
--- a/drivers/crypto/n2_core.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _N2_CORE_H
-#define _N2_CORE_H
-
-#ifndef __ASSEMBLY__
-
-struct ino_blob {
- u64 intr;
- u64 ino;
-};
-
-struct spu_mdesc_info {
- u64 cfg_handle;
- struct ino_blob *ino_table;
- int num_intrs;
-};
-
-struct n2_crypto {
- struct spu_mdesc_info cwq_info;
- struct list_head cwq_list;
-};
-
-struct n2_mau {
- struct spu_mdesc_info mau_info;
- struct list_head mau_list;
-};
-
-#define CWQ_ENTRY_SIZE 64
-#define CWQ_NUM_ENTRIES 64
-
-#define MAU_ENTRY_SIZE 64
-#define MAU_NUM_ENTRIES 64
-
-struct cwq_initial_entry {
- u64 control;
- u64 src_addr;
- u64 auth_key_addr;
- u64 auth_iv_addr;
- u64 final_auth_state_addr;
- u64 enc_key_addr;
- u64 enc_iv_addr;
- u64 dest_addr;
-};
-
-struct cwq_ext_entry {
- u64 len;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-struct cwq_final_entry {
- u64 control;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-#define CONTROL_LEN 0x000000000000ffffULL
-#define CONTROL_LEN_SHIFT 0
-#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
-#define CONTROL_HMAC_KEY_LEN_SHIFT 16
-#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
-#define CONTROL_ENC_TYPE_SHIFT 24
-#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
-#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
-#define ENC_TYPE_ALG_DES 0x08ULL
-#define ENC_TYPE_ALG_3DES 0x0cULL
-#define ENC_TYPE_ALG_AES128 0x10ULL
-#define ENC_TYPE_ALG_AES192 0x14ULL
-#define ENC_TYPE_ALG_AES256 0x18ULL
-#define ENC_TYPE_ALG_RESERVED 0x1cULL
-#define ENC_TYPE_ALG_MASK 0x1cULL
-#define ENC_TYPE_CHAINING_ECB 0x00ULL
-#define ENC_TYPE_CHAINING_CBC 0x01ULL
-#define ENC_TYPE_CHAINING_CFB 0x02ULL
-#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
-#define ENC_TYPE_CHAINING_MASK 0x03ULL
-#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
-#define CONTROL_AUTH_TYPE_SHIFT 32
-#define AUTH_TYPE_RESERVED 0x00ULL
-#define AUTH_TYPE_MD5 0x01ULL
-#define AUTH_TYPE_SHA1 0x02ULL
-#define AUTH_TYPE_SHA256 0x03ULL
-#define AUTH_TYPE_CRC32 0x04ULL
-#define AUTH_TYPE_HMAC_MD5 0x05ULL
-#define AUTH_TYPE_HMAC_SHA1 0x06ULL
-#define AUTH_TYPE_HMAC_SHA256 0x07ULL
-#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
-#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
-#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
-#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
-#define CONTROL_STRAND 0x000000e000000000ULL
-#define CONTROL_STRAND_SHIFT 37
-#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
-#define CONTROL_HASH_LEN_SHIFT 40
-#define CONTROL_INTERRUPT 0x0001000000000000ULL
-#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
-#define CONTROL_RESERVED 0x001c000000000000ULL
-#define CONTROL_HV_DONE 0x0004000000000000ULL
-#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
-#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
-#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
-#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
-#define CONTROL_ENCRYPT 0x0080000000000000ULL
-#define CONTROL_OPCODE 0xff00000000000000ULL
-#define CONTROL_OPCODE_SHIFT 56
-#define OPCODE_INPLACE_BIT 0x80ULL
-#define OPCODE_SSL_KEYBLOCK 0x10ULL
-#define OPCODE_COPY 0x20ULL
-#define OPCODE_ENCRYPT 0x40ULL
-#define OPCODE_AUTH_MAC 0x41ULL
-
-#endif /* !(__ASSEMBLY__) */
-
-/* NCS v2.0 hypervisor interfaces */
-#define HV_NCS_QTYPE_MAU 0x01
-#define HV_NCS_QTYPE_CWQ 0x02
-
-/* ncs_qconf()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QCONF
- * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * ARG1: Real address of queue, or handle for unconfigure
- * ARG2: Number of entries in queue, zero for unconfigure
- * RET0: status
- * RET1: queue handle
- *
- * Configure a queue in the stream processing unit.
- *
- * The real address given as the base must be 64-byte
- * aligned.
- *
- * The queue size can range from a minimum of 2 to a maximum
- * of 64. The queue size must be a power of two.
- *
- * To unconfigure a queue, specify a length of zero and place
- * the queue handle into ARG1.
- *
- * On configure success the hypervisor will set the FIRST, HEAD,
- * and TAIL registers to the address of the first entry in the
- * queue. The LAST register will be set to point to the last
- * entry in the queue.
- */
-#define HV_FAST_NCS_QCONF 0x111
-
-/* ncs_qinfo()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QINFO
- * ARG0: Queue handle
- * RET0: status
- * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * RET2: Queue base address
- * RET3: Number of entries
- */
-#define HV_FAST_NCS_QINFO 0x112
-
-/* ncs_gethead()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETHEAD
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue head offset
- */
-#define HV_FAST_NCS_GETHEAD 0x113
-
-/* ncs_gettail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETTAIL
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue tail offset
- */
-#define HV_FAST_NCS_GETTAIL 0x114
-
-/* ncs_settail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETTAIL
- * ARG0: Queue handle
- * ARG1: New tail offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETTAIL 0x115
-
-/* ncs_qhandle_to_devino()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
- * ARG0: Queue handle
- * RET0: status
- * RET1: devino
- */
-#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
-
-/* ncs_sethead_marker()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
- * ARG0: Queue handle
- * ARG1: New head offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETHEAD_MARKER 0x117
-
-#ifndef __ASSEMBLY__
-extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
- unsigned long queue_ra,
- unsigned long num_entries,
- unsigned long *qhandle);
-extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
- unsigned long *queue_type,
- unsigned long *queue_ra,
- unsigned long *num_entries);
-extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
- unsigned long *head);
-extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
- unsigned long *tail);
-extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
- unsigned long tail);
-extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
- unsigned long *devino);
-extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
- unsigned long head);
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index e27b84616743..551dd32a8db0 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -18,7 +18,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -272,9 +271,9 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
+ dd->in_sg_offset = 0;
if (out_sg_len)
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -871,21 +870,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -904,20 +900,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 0f35c9164764..41d67780fd45 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -14,8 +14,6 @@
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
/*
* OMAP TRM gives bitfields as start:end, where start is the higher bit
* number. For example 7:0
@@ -186,8 +184,8 @@ struct omap_aes_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 498cbd585ed1..a099460d5f21 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -19,7 +19,6 @@
#include <crypto/engine.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -40,8 +39,6 @@
#define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2)
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
#define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
((x ^ 0x01) * 0x04))
@@ -152,8 +149,8 @@ struct omap_des_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
@@ -379,8 +376,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->in_sg_offset = 0;
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -836,21 +833,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
omap_des_write(dd, DES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -869,20 +863,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 7d811728f047..97b56e92ea33 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
alg->init = qce_aead_init;
alg->exit = qce_aead_exit;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index e228a31fe28d..e95e84486d9a 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -3,14 +3,15 @@
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
@@ -37,9 +38,10 @@ static const struct qce_algo_ops *qce_ops[] = {
#endif
};
-static void qce_unregister_algs(struct qce_device *qce)
+static void qce_unregister_algs(void *data)
{
const struct qce_algo_ops *ops;
+ struct qce_device *qce = data;
int i;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
@@ -48,19 +50,22 @@ static void qce_unregister_algs(struct qce_device *qce)
}
}
-static int qce_register_algs(struct qce_device *qce)
+static int devm_qce_register_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
- int i, ret = -ENODEV;
+ int i, j, ret = -ENODEV;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ret = ops->register_algs(qce);
- if (ret)
- break;
+ if (ret) {
+ for (j = i - 1; j >= 0; j--)
+ ops->unregister_algs(qce);
+ return ret;
+ }
}
- return ret;
+ return devm_add_action_or_reset(qce->dev, qce_unregister_algs, qce);
}
static int qce_handle_request(struct crypto_async_request *async_req)
@@ -84,55 +89,49 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *req)
{
struct crypto_async_request *async_req, *backlog;
- unsigned long flags;
int ret = 0, err;
- spin_lock_irqsave(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ if (req)
+ ret = crypto_enqueue_request(&qce->queue, req);
- if (req)
- ret = crypto_enqueue_request(&qce->queue, req);
+ /* busy, do not dequeue request */
+ if (qce->req)
+ return ret;
- /* busy, do not dequeue request */
- if (qce->req) {
- spin_unlock_irqrestore(&qce->lock, flags);
- return ret;
+ backlog = crypto_get_backlog(&qce->queue);
+ async_req = crypto_dequeue_request(&qce->queue);
+ if (async_req)
+ qce->req = async_req;
}
- backlog = crypto_get_backlog(&qce->queue);
- async_req = crypto_dequeue_request(&qce->queue);
- if (async_req)
- qce->req = async_req;
-
- spin_unlock_irqrestore(&qce->lock, flags);
-
if (!async_req)
return ret;
if (backlog) {
- spin_lock_bh(&qce->lock);
- crypto_request_complete(backlog, -EINPROGRESS);
- spin_unlock_bh(&qce->lock);
+ scoped_guard(mutex, &qce->lock)
+ crypto_request_complete(backlog, -EINPROGRESS);
}
err = qce_handle_request(async_req);
if (err) {
qce->result = err;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
return ret;
}
-static void qce_tasklet_req_done(unsigned long data)
+static void qce_req_done_work(struct work_struct *work)
{
- struct qce_device *qce = (struct qce_device *)data;
+ struct qce_device *qce = container_of(work, struct qce_device,
+ done_work);
struct crypto_async_request *req;
- unsigned long flags;
- spin_lock_irqsave(&qce->lock, flags);
- req = qce->req;
- qce->req = NULL;
- spin_unlock_irqrestore(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ req = qce->req;
+ qce->req = NULL;
+ }
if (req)
crypto_request_complete(req, qce->result);
@@ -149,7 +148,7 @@ static int qce_async_request_enqueue(struct qce_device *qce,
static void qce_async_request_done(struct qce_device *qce, int ret)
{
qce->result = ret;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
static int qce_check_version(struct qce_device *qce)
@@ -209,15 +208,15 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- qce->core = devm_clk_get_optional(qce->dev, "core");
+ qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
- qce->iface = devm_clk_get_optional(qce->dev, "iface");
+ qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
- qce->bus = devm_clk_get_optional(qce->dev, "bus");
+ qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
@@ -229,64 +228,25 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(qce->core);
+ ret = devm_qce_dma_request(qce->dev, &qce->dma);
if (ret)
- goto err_mem_path_disable;
-
- ret = clk_prepare_enable(qce->iface);
- if (ret)
- goto err_clks_core;
-
- ret = clk_prepare_enable(qce->bus);
- if (ret)
- goto err_clks_iface;
+ return ret;
- ret = qce_dma_request(qce->dev, &qce->dma);
+ ret = qce_check_version(qce);
if (ret)
- goto err_clks;
+ return ret;
- ret = qce_check_version(qce);
+ ret = devm_mutex_init(qce->dev, &qce->lock);
if (ret)
- goto err_clks;
+ return ret;
- spin_lock_init(&qce->lock);
- tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
- (unsigned long)qce);
+ INIT_WORK(&qce->done_work, qce_req_done_work);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
- ret = qce_register_algs(qce);
- if (ret)
- goto err_dma;
-
- return 0;
-
-err_dma:
- qce_dma_release(&qce->dma);
-err_clks:
- clk_disable_unprepare(qce->bus);
-err_clks_iface:
- clk_disable_unprepare(qce->iface);
-err_clks_core:
- clk_disable_unprepare(qce->core);
-err_mem_path_disable:
- icc_set_bw(qce->mem_path, 0, 0);
-
- return ret;
-}
-
-static void qce_crypto_remove(struct platform_device *pdev)
-{
- struct qce_device *qce = platform_get_drvdata(pdev);
-
- tasklet_kill(&qce->done_tasklet);
- qce_unregister_algs(qce);
- qce_dma_release(&qce->dma);
- clk_disable_unprepare(qce->bus);
- clk_disable_unprepare(qce->iface);
- clk_disable_unprepare(qce->core);
+ return devm_qce_register_algs(qce);
}
static const struct of_device_id qce_crypto_of_match[] = {
@@ -299,7 +259,6 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
- .remove = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index 228fcd69ec51..eb6fa7a8b64a 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -6,13 +6,16 @@
#ifndef _CORE_H_
#define _CORE_H_
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
#include "dma.h"
/**
* struct qce_device - crypto engine device structure
* @queue: crypto request queue
* @lock: the lock protects queue and req
- * @done_tasklet: done tasklet object
+ * @done_work: workqueue context
* @req: current active request
* @result: result of current transform
* @base: virtual IO base
@@ -28,8 +31,8 @@
*/
struct qce_device {
struct crypto_queue queue;
- spinlock_t lock;
- struct tasklet_struct done_tasklet;
+ struct mutex lock;
+ struct work_struct done_work;
struct crypto_async_request *req;
int result;
void __iomem *base;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 46db5bf366b4..1dec7aea852d 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -3,12 +3,22 @@
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/device.h>
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+static void qce_dma_release(void *data)
+{
+ struct qce_dma_data *dma = data;
+
+ dma_release_channel(dma->txchan);
+ dma_release_channel(dma->rxchan);
+ kfree(dma->result_buf);
+}
+
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
@@ -31,7 +41,8 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
- return 0;
+ return devm_add_action_or_reset(dev, qce_dma_release, dma);
+
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
@@ -39,13 +50,6 @@ error_rx:
return ret;
}
-void qce_dma_release(struct qce_dma_data *dma)
-{
- dma_release_channel(dma->txchan);
- dma_release_channel(dma->rxchan);
- kfree(dma->result_buf);
-}
-
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
unsigned int max_len)
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 786402169360..31629185000e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -34,8 +34,7 @@ struct qce_dma_data {
void *ignore_buf;
};
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
-void qce_dma_release(struct qce_dma_data *dma);
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma);
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
int in_ents, struct scatterlist *sg_out, int out_ents,
dma_async_tx_callback cb, void *cb_param);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index fc72af8aa9a7..71b748183cfa 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
- base->cra_priority = 300;
+ base->cra_priority = 175;
base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 5b493fdc1e74..ffb334eb5b34 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->encrypt = qce_skcipher_encrypt;
alg->decrypt = qce_skcipher_decrypt;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index 9d130592cc0a..d734c9a56786 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -1750,10 +1750,13 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ int ret;
- tegra_cmac_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_cmac_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 4d4bd727f498..0b5cdd5676b1 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -615,13 +615,16 @@ static int tegra_sha_digest(struct ahash_request *req)
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- tegra_sha_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_sha_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 3ebac2496679..70219099c604 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -244,13 +244,9 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
edev = NULL;
out:
mutex_unlock(&devfreq_event_list_lock);
-
- if (!edev) {
- of_node_put(node);
- return ERR_PTR(-ENODEV);
- }
-
of_node_put(node);
+ if (!edev)
+ return ERR_PTR(-ENODEV);
return edev;
}
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 7d06c476d8e9..b9ea7ad2e51b 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -236,8 +236,7 @@ err_regulator:
return ret;
}
-static int exynos_bus_parse_of(struct device_node *np,
- struct exynos_bus *bus)
+static int exynos_bus_parse_of(struct exynos_bus *bus)
{
struct device *dev = bus->dev;
struct dev_pm_opp *opp;
@@ -408,7 +407,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
}
/* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
+ ret = exynos_bus_parse_of(bus);
if (ret < 0)
goto err_reg;
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 17f6b6367113..c9aba2304de7 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -10,6 +10,8 @@
#include <linux/interrupt.h>
#include <linux/dca.h>
+#include <asm/cpuid.h>
+
/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
#include <asm/smp.h>
@@ -58,11 +60,11 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
{
/* CPUID level 9 returns DCA configuration */
/* Bit 0 indicates DCA enabled by the BIOS */
- unsigned long cpuid_level_9;
+ u32 eax;
int res;
- cpuid_level_9 = cpuid_eax(9);
- res = test_bit(0, &cpuid_level_9);
+ eax = cpuid_eax(CPUID_LEAF_DCA);
+ res = eax & BIT(0);
if (!res)
dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 06f7b43a6f78..2051a7c944a5 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -78,6 +78,7 @@ config EDAC_GHES
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
+ depends on AMD_NODE
imply AMD_ATL
help
Support for error detection and correction of DRAM ECC errors on
@@ -303,14 +304,6 @@ config EDAC_PASEMI
Support for error detection and correction on PA Semi
PWRficient.
-config EDAC_CELL
- tristate "Cell Broadband Engine memory controller"
- depends on PPC_CELL_COMMON
- help
- Support for error detection and correction on the
- Cell Broadband Engine internal memory controller
- on platform without a hypervisor
-
config EDAC_CPC925
tristate "IBM CPC925 Memory Controller (PPC970FX)"
depends on PPC64
@@ -546,5 +539,13 @@ config EDAC_VERSAL
Support injecting both correctable and uncorrectable errors
for debugging purposes.
+config EDAC_LOONGSON
+ tristate "Loongson Memory Controller"
+ depends on LOONGARCH && ACPI
+ help
+ Support for error detection and correction on the Loongson
+ family memory controller. This driver reports single bit
+ errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000
+ are compatible.
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index f9cf19d8d13d..89789ba8275f 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -62,8 +62,6 @@ obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
i10nm_edac-y := i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
-obj-$(CONFIG_EDAC_CELL) += cell_edac.o
-
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
@@ -86,3 +84,4 @@ obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
+obj-$(CONFIG_EDAC_LOONGSON) += loongson_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 5d356b7c4589..8414ceb43e4a 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2,6 +2,7 @@
#include <linux/ras.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>
+#include <asm/amd_node.h>
static struct edac_pci_ctl_info *pci_ctl;
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
deleted file mode 100644
index c2420e2287ff..000000000000
--- a/drivers/edac/cell_edac.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Cell MIC driver for ECC counting
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- */
-#undef DEBUG
-
-#include <linux/edac.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/stop_machine.h>
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "edac_module.h"
-
-struct cell_edac_priv
-{
- struct cbe_mic_tm_regs __iomem *regs;
- int node;
- int chanmask;
-#ifdef DEBUG
- u64 prev_fir;
-#endif
-};
-
-static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset, syndrome;
-
- dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
- syndrome = (ar & 0x000000001fe00000ul) >> 21;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- csrow->first_page + pfn, offset, syndrome,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset;
-
- dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- csrow->first_page + pfn, offset, 0,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_check(struct mem_ctl_info *mci)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- u64 fir, addreg, clear = 0;
-
- fir = in_be64(&priv->regs->mic_fir);
-#ifdef DEBUG
- if (fir != priv->prev_fir) {
- dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
- priv->prev_fir = fir;
- }
-#endif
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
- cell_edac_count_ce(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
- cell_edac_count_ce(mci, 1, addreg);
- }
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
- cell_edac_count_ue(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
- cell_edac_count_ue(mci, 1, addreg);
- }
-
- /* The procedure for clearing FIR bits is a bit ... weird */
- if (clear) {
- fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
- fir |= CBE_MIC_FIR_ECC_RESET_MASK;
- fir &= ~clear;
- out_be64(&priv->regs->mic_fir, fir);
- (void)in_be64(&priv->regs->mic_fir);
-
- mb(); /* sync up */
-#ifdef DEBUG
- fir = in_be64(&priv->regs->mic_fir);
- dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
-#endif
- }
-}
-
-static void cell_edac_init_csrows(struct mem_ctl_info *mci)
-{
- struct csrow_info *csrow = mci->csrows[0];
- struct dimm_info *dimm;
- struct cell_edac_priv *priv = mci->pvt_info;
- struct device_node *np;
- int j;
- u32 nr_pages;
-
- for_each_node_by_name(np, "memory") {
- struct resource r;
-
- /* We "know" that the Cell firmware only creates one entry
- * in the "memory" nodes. If that changes, this code will
- * need to be adapted.
- */
- if (of_address_to_resource(np, 0, &r))
- continue;
- if (of_node_to_nid(np) != priv->node)
- continue;
- csrow->first_page = r.start >> PAGE_SHIFT;
- nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + nr_pages - 1;
-
- for (j = 0; j < csrow->nr_channels; j++) {
- dimm = csrow->channels[j]->dimm;
- dimm->mtype = MEM_XDR;
- dimm->edac_mode = EDAC_SECDED;
- dimm->nr_pages = nr_pages / csrow->nr_channels;
- }
- dev_dbg(mci->pdev,
- "Initialized on node %d, chanmask=0x%x,"
- " first_page=0x%lx, nr_pages=0x%x\n",
- priv->node, priv->chanmask,
- csrow->first_page, nr_pages);
- break;
- }
- of_node_put(np);
-}
-
-static int cell_edac_probe(struct platform_device *pdev)
-{
- struct cbe_mic_tm_regs __iomem *regs;
- struct mem_ctl_info *mci;
- struct edac_mc_layer layers[2];
- struct cell_edac_priv *priv;
- u64 reg;
- int rc, chanmask, num_chans;
-
- regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
- if (regs == NULL)
- return -ENODEV;
-
- edac_op_state = EDAC_OPSTATE_POLL;
-
- /* Get channel population */
- reg = in_be64(&regs->mic_mnt_cfg);
- dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
- chanmask = 0;
- if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
- chanmask |= 0x1;
- if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
- chanmask |= 0x2;
- if (chanmask == 0) {
- dev_warn(&pdev->dev,
- "Yuck ! No channel populated ? Aborting !\n");
- return -ENODEV;
- }
- dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
- in_be64(&regs->mic_fir));
-
- /* Allocate & init EDAC MC data structure */
- num_chans = chanmask == 3 ? 2 : 1;
-
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = 1;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = num_chans;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
- sizeof(struct cell_edac_priv));
- if (mci == NULL)
- return -ENOMEM;
- priv = mci->pvt_info;
- priv->regs = regs;
- priv->node = pdev->id;
- priv->chanmask = chanmask;
- mci->pdev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_XDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->mod_name = "cell_edac";
- mci->ctl_name = "MIC";
- mci->dev_name = dev_name(&pdev->dev);
- mci->edac_check = cell_edac_check;
- cell_edac_init_csrows(mci);
-
- /* Register with EDAC core */
- rc = edac_mc_add_mc(mci);
- if (rc) {
- dev_err(&pdev->dev, "failed to register with EDAC core\n");
- edac_mc_free(mci);
- return rc;
- }
-
- return 0;
-}
-
-static void cell_edac_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
- if (mci)
- edac_mc_free(mci);
-}
-
-static struct platform_driver cell_edac_driver = {
- .driver = {
- .name = "cbe-mic",
- },
- .probe = cell_edac_probe,
- .remove = cell_edac_remove,
-};
-
-static int __init cell_edac_init(void)
-{
- /* Sanity check registers data structure */
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_0) != 0xf8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_1) != 0x1b8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_config) != 0x218);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_fir) != 0x230);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_mnt_cfg) != 0x210);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_exc) != 0x208);
-
- return platform_driver_register(&cell_edac_driver);
-}
-
-static void __exit cell_edac_exit(void)
-{
- platform_driver_unregister(&cell_edac_driver);
-}
-
-module_init(cell_edac_init);
-module_exit(cell_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d6eed727b0cd..0959320fe51c 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -214,7 +214,7 @@ static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
unsigned int row, chn;
/*
- * Alocate and fill the csrow/channels structs
+ * Allocate and fill the csrow/channels structs
*/
mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4200aec04831..0f338adf7d93 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -422,7 +422,7 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
return nr_pages;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
@@ -449,7 +449,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
return 0;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
{
int err, i;
@@ -636,7 +636,7 @@ static void dimm_release(struct device *dev)
*/
}
-/* Create a DIMM object under specifed memory controller device */
+/* Create a DIMM object under specified memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
struct dimm_info *dimm)
{
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 51556c72a967..f45d849d3f15 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -938,16 +938,18 @@ static struct res_config gnr_cfg = {
};
static const struct x86_cpu_id i10nm_cpuids[] = {
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MIN, 0x3, &i10nm_cfg0),
+ X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, 0x4, X86_STEP_MAX, &i10nm_cfg1),
+ X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, X86_STEP_MIN, 0x3, &i10nm_cfg0),
+ X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, 0x4, X86_STEP_MAX, &i10nm_cfg1),
+ X86_MATCH_VFM( INTEL_ICELAKE_D, &i10nm_cfg1),
+
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_cfg),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_cfg),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -1010,7 +1012,7 @@ static struct notifier_block i10nm_mce_dec = {
static int __init i10nm_init(void)
{
- u8 mc = 0, src_id = 0, node_id = 0;
+ u8 mc = 0, src_id = 0;
const struct x86_cpu_id *id;
struct res_config *cfg;
const char *owner;
@@ -1070,19 +1072,14 @@ static int __init i10nm_init(void)
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
-
- edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
+ edac_dbg(2, "src_id = %d\n", src_id);
for (i = 0; i < imc_num; i++) {
if (!d->imc[i].mdev)
continue;
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
- d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
+ d->imc[i].src_id = src_id;
if (d->imc[i].hbm_mc) {
d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
d->imc[i].num_channels = cfg->hbm_chan_num;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 4b5a71f8739d..4a1bebc1ff14 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -338,11 +338,11 @@ struct i5000_pvt {
u16 mir0, mir1, mir2;
- u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b0_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
- u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
+ u16 b0_ambpresent1; /* Branch 0, Channel 1 */
- u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b1_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
@@ -1210,7 +1210,7 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
&pvt->b0_ambpresent1);
edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
- /* Only if we have 2 branchs (4 channels) */
+ /* Only if we have 2 branches (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent0 = 0;
pvt->b1_ambpresent1 = 0;
diff --git a/drivers/edac/loongson_edac.c b/drivers/edac/loongson_edac.c
new file mode 100644
index 000000000000..38745800ed01
--- /dev/null
+++ b/drivers/edac/loongson_edac.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/edac.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "edac_module.h"
+
+#define ECC_CS_COUNT_REG 0x18
+
+struct loongson_edac_pvt {
+ void __iomem *ecc_base;
+
+ /*
+ * The ECC register in this controller records the number of errors
+ * encountered since reset and cannot be zeroed so in order to be able
+ * to report the error count at each check, this records the previous
+ * register state.
+ */
+ int last_ce_count;
+};
+
+static int read_ecc(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ u64 ecc;
+ int cs;
+
+ ecc = readq(pvt->ecc_base + ECC_CS_COUNT_REG);
+ /* cs0 -- cs3 */
+ cs = ecc & 0xff;
+ cs += (ecc >> 8) & 0xff;
+ cs += (ecc >> 16) & 0xff;
+ cs += (ecc >> 24) & 0xff;
+
+ return cs;
+}
+
+static void edac_check(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ int new, add;
+
+ new = read_ecc(mci);
+ add = new - pvt->last_ce_count;
+ pvt->last_ce_count = new;
+ if (add <= 0)
+ return;
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add,
+ 0, 0, 0, 0, 0, -1, "error", "");
+}
+
+static void dimm_config_init(struct mem_ctl_info *mci)
+{
+ struct dimm_info *dimm;
+ u32 size, npages;
+
+ /* size not used */
+ size = -1;
+ npages = MiB_TO_PAGES(size);
+
+ dimm = edac_get_dimm(mci, 0, 0, 0);
+ dimm->nr_pages = npages;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "MC#%uChannel#%u_DIMM#%u", mci->mc_idx, 0, 0);
+ dimm->grain = 8;
+}
+
+static void pvt_init(struct mem_ctl_info *mci, void __iomem *vbase)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+
+ pvt->ecc_base = vbase;
+ pvt->last_ce_count = read_ecc(mci);
+}
+
+static int edac_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ void __iomem *vbase;
+ int ret;
+
+ vbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vbase))
+ return PTR_ERR(vbase);
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct loongson_edac_pvt));
+ if (mci == NULL)
+ return -ENOMEM;
+
+ mci->mc_idx = edac_device_alloc_index();
+ mci->mtype_cap = MEM_FLAG_RDDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "loongson_edac.c";
+ mci->ctl_name = "loongson_edac_ctl";
+ mci->dev_name = "loongson_edac_dev";
+ mci->ctl_page_to_phys = NULL;
+ mci->pdev = &pdev->dev;
+ mci->error_desc.grain = 8;
+ mci->edac_check = edac_check;
+
+ pvt_init(mci, vbase);
+ dimm_config_init(mci);
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ edac_mc_free(mci);
+ return ret;
+ }
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ return 0;
+}
+
+static void edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+
+ if (mci)
+ edac_mc_free(mci);
+}
+
+static const struct acpi_device_id loongson_edac_acpi_match[] = {
+ {"LOON0010", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, loongson_edac_acpi_match);
+
+static struct platform_driver loongson_edac_driver = {
+ .probe = edac_probe,
+ .remove = edac_remove,
+ .driver = {
+ .name = "loongson-mc-edac",
+ .acpi_match_table = loongson_edac_acpi_match,
+ },
+};
+module_platform_driver(loongson_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhao Qunqin <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("EDAC driver for loongson memory controller");
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 14cfd394b469..29897b21fb8e 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -164,7 +164,7 @@ static struct res_config skx_cfg = {
};
static const struct x86_cpu_id skx_cpuids[] = {
- X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
@@ -600,7 +600,7 @@ static int __init skx_init(void)
const struct munit *m;
const char *owner;
int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
- u8 mc = 0, src_id, node_id;
+ u8 mc = 0, src_id;
struct skx_dev *d;
edac_dbg(2, "\n");
@@ -650,15 +650,12 @@ static int __init skx_init(void)
rc = skx_get_src_id(d, 0xf0, &src_id);
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
- edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
+
+ edac_dbg(2, "src_id = %d\n", src_id);
for (i = 0; i < SKX_NUM_IMC; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 6cf17af7d911..f7bd930e058f 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -19,6 +19,7 @@
#include <linux/adxl.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
+#include <asm/uv/uv.h>
#include "edac_module.h"
#include "skx_common.h"
@@ -221,33 +222,51 @@ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
}
EXPORT_SYMBOL_GPL(skx_set_decode);
-int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
+static int skx_get_pkg_id(struct skx_dev *d, u8 *id)
{
- u32 reg;
+ int node;
+ int cpu;
- if (pci_read_config_dword(d->util_all, off, &reg)) {
- skx_printk(KERN_ERR, "Failed to read src id\n");
- return -ENODEV;
+ node = pcibus_to_node(d->util_all->bus);
+ if (numa_valid_node(node)) {
+ for_each_cpu(cpu, cpumask_of_pcibus(d->util_all->bus)) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && cpu_to_node(cpu) == node) {
+ *id = c->topo.pkg_id;
+ return 0;
+ }
+ }
}
- *id = GET_BITFIELD(reg, 12, 14);
- return 0;
+ skx_printk(KERN_ERR, "Failed to get package ID from NUMA information\n");
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(skx_get_src_id);
-int skx_get_node_id(struct skx_dev *d, u8 *id)
+int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
{
u32 reg;
- if (pci_read_config_dword(d->util_all, 0xf4, &reg)) {
- skx_printk(KERN_ERR, "Failed to read node id\n");
+ /*
+ * The 3-bit source IDs in PCI configuration space registers are limited
+ * to 8 unique IDs, and each ID is local to a UPI/QPI domain.
+ *
+ * Source IDs cannot be used to map devices to sockets on UV systems
+ * because they can exceed 8 sockets and have multiple UPI/QPI domains
+ * with identical, repeating source IDs.
+ */
+ if (is_uv_system())
+ return skx_get_pkg_id(d, id);
+
+ if (pci_read_config_dword(d->util_all, off, &reg)) {
+ skx_printk(KERN_ERR, "Failed to read src id\n");
return -ENODEV;
}
- *id = GET_BITFIELD(reg, 0, 2);
+ *id = GET_BITFIELD(reg, 12, 14);
return 0;
}
-EXPORT_SYMBOL_GPL(skx_get_node_id);
+EXPORT_SYMBOL_GPL(skx_get_src_id);
static int get_width(u32 mtr)
{
@@ -507,7 +526,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
pvt->imc = imc;
mci->ctl_name = kasprintf(GFP_KERNEL, "%s#%d IMC#%d", ctl_name,
- imc->node_id, imc->lmc);
+ imc->src_id, imc->lmc);
if (!mci->ctl_name) {
rc = -ENOMEM;
goto fail0;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 54bba8a62f72..b0845bdd4516 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -103,7 +103,7 @@ struct skx_dev {
bool hbm_mc;
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
- u8 src_id, node_id;
+ u8 src_id;
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
@@ -244,7 +244,6 @@ void skx_set_mem_cfg(bool mem_cfg_2lm);
void skx_set_res_cfg(struct res_config *cfg);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
-int skx_get_node_id(struct skx_dev *d, u8 *id);
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list);
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 48b12f81141d..10ea7962323e 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -442,7 +442,7 @@ struct scmi_transport_core_operations {
*/
struct scmi_transport {
struct device *supplier;
- struct scmi_desc *desc;
+ struct scmi_desc desc;
struct scmi_transport_core_operations **core_ops;
};
@@ -468,7 +468,7 @@ static int __tag##_probe(struct platform_device *pdev) \
device_set_of_node_from_dev(&spdev->dev, dev); \
\
strans.supplier = dev; \
- strans.desc = &(__desc); \
+ memcpy(&strans.desc, &(__desc), sizeof(strans.desc)); \
strans.core_ops = &(__core_ops); \
\
ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 1b5fb2c4ce86..60050da54bf2 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/ktime.h>
#include <linux/hashtable.h>
#include <linux/list.h>
@@ -43,6 +44,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
+#define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
+
static DEFINE_IDA(scmi_id);
static DEFINE_XARRAY(scmi_protocols);
@@ -276,6 +279,44 @@ scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
}
static const struct scmi_protocol *
+scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version)
+{
+ const struct scmi_protocol *proto;
+
+ proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ if (!proto) {
+ int ret;
+
+ pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n",
+ protocol_id, version->vendor_id);
+
+ /* Note that vendor_id is mandatory for vendor protocols */
+ ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT,
+ protocol_id, version->vendor_id);
+ if (ret) {
+ pr_warn("Problem loading module for protocol 0x%x\n",
+ protocol_id);
+ return NULL;
+ }
+
+ /* Lookup again, once modules loaded */
+ proto = scmi_vendor_protocol_lookup(protocol_id,
+ version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ }
+
+ if (proto)
+ pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
+ protocol_id, proto->vendor_id ?: "",
+ proto->sub_vendor_id ?: "", proto->impl_ver);
+
+ return proto;
+}
+
+static const struct scmi_protocol *
scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
{
const struct scmi_protocol *proto = NULL;
@@ -283,10 +324,8 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
proto = xa_load(&scmi_protocols, protocol_id);
else
- proto = scmi_vendor_protocol_lookup(protocol_id,
- version->vendor_id,
- version->sub_vendor_id,
- version->impl_ver);
+ proto = scmi_vendor_protocol_get(protocol_id, version);
+
if (!proto || !try_module_get(proto->owner)) {
pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
return NULL;
@@ -294,11 +333,6 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
- if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE)
- pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
- protocol_id, proto->vendor_id ?: "",
- proto->sub_vendor_id ?: "", proto->impl_ver);
-
return proto;
}
@@ -366,7 +400,9 @@ int scmi_protocol_register(const struct scmi_protocol *proto)
return ret;
}
- pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+ pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n",
+ proto->id, proto->vendor_id, proto->sub_vendor_id,
+ proto->impl_ver);
return 0;
}
@@ -3028,7 +3064,7 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
int ret;
trans = dev_get_platdata(dev);
- if (!trans || !trans->desc || !trans->supplier || !trans->core_ops)
+ if (!trans || !trans->supplier || !trans->core_ops)
return NULL;
if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
@@ -3043,33 +3079,33 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
- &trans->desc->max_rx_timeout_ms);
+ &trans->desc.max_rx_timeout_ms);
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
- &trans->desc->max_msg_size);
+ &trans->desc.max_msg_size);
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
ret = of_property_read_u32(dev->of_node, "arm,max-msg",
- &trans->desc->max_msg);
+ &trans->desc.max_msg);
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-msg DT property.\n");
dev_info(dev,
"SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
- trans->desc->max_rx_timeout_ms, trans->desc->max_msg_size,
- trans->desc->max_msg);
+ trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
+ trans->desc.max_msg);
/* System wide atomic threshold for atomic ops .. if any */
if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
- &trans->desc->atomic_threshold))
+ &trans->desc.atomic_threshold))
dev_info(dev,
"SCMI System wide atomic threshold set to %u us\n",
- trans->desc->atomic_threshold);
+ trans->desc.atomic_threshold);
- return trans->desc;
+ return &trans->desc;
}
static int scmi_probe(struct platform_device *pdev)
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index b66df2981456..bd041c99b92b 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -378,6 +378,7 @@ static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver,
scmi_mailbox_desc, scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/smc.c b/drivers/firmware/arm_scmi/transports/smc.c
index f632a62cfb3e..21abb571e4f2 100644
--- a/drivers/firmware/arm_scmi/transports/smc.c
+++ b/drivers/firmware/arm_scmi/transports/smc.c
@@ -301,6 +301,7 @@ static const struct of_device_id scmi_of_match[] = {
{ .compatible = "qcom,scmi-smc" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_smc, scmi_smc_driver, scmi_smc_desc,
scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index 41aea33776a9..cb934db9b2b4 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -921,6 +921,7 @@ static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
{ 0 }
};
+MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_scmi_driver = {
.driver.name = "scmi-virtio",
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
index 17799eacf06c..aa176c1a5eef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
@@ -374,10 +374,11 @@ static const struct scmi_protocol scmi_imx_bbm = {
.ops = &scmi_imx_bbm_proto_ops,
.events = &scmi_imx_bbm_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_bbm);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_BBM) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI BBM driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index a86ab9b35953..83b69fc4fba5 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -309,10 +309,11 @@ static const struct scmi_protocol scmi_imx_misc = {
.ops = &scmi_imx_misc_proto_ops,
.events = &scmi_imx_misc_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_misc);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_MISC) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI MISC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
index 3ccbe14e4b0c..ee09269c63b5 100644
--- a/drivers/firmware/cirrus/Kconfig
+++ b/drivers/firmware/cirrus/Kconfig
@@ -3,3 +3,23 @@
config FW_CS_DSP
tristate
default n
+
+config FW_CS_DSP_KUNIT_TEST_UTILS
+ tristate
+ depends on KUNIT
+ select REGMAP
+ select FW_CS_DSP
+
+config FW_CS_DSP_KUNIT_TEST
+ tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select REGMAP
+ select FW_CS_DSP
+ select FW_CS_DSP_KUNIT_TEST_UTILS
+ help
+ This builds KUnit tests for cs_dsp.
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+ If in doubt, say "N".
diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile
index b91318ca0ff4..b32dfa869491 100644
--- a/drivers/firmware/cirrus/Makefile
+++ b/drivers/firmware/cirrus/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
#
obj-$(CONFIG_FW_CS_DSP) += cs_dsp.o
+
+obj-y += test/
diff --git a/drivers/firmware/cirrus/test/Makefile b/drivers/firmware/cirrus/test/Makefile
new file mode 100644
index 000000000000..7a24a6079ddc
--- /dev/null
+++ b/drivers/firmware/cirrus/test/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+
+cs_dsp_test_utils-objs := \
+ cs_dsp_mock_mem_maps.o \
+ cs_dsp_mock_bin.o \
+ cs_dsp_mock_regmap.o \
+ cs_dsp_mock_utils.o \
+ cs_dsp_mock_wmfw.o
+
+cs_dsp_test-objs := \
+ cs_dsp_test_bin.o \
+ cs_dsp_test_bin_error.o \
+ cs_dsp_test_callbacks.o \
+ cs_dsp_test_control_parse.o \
+ cs_dsp_test_control_cache.o \
+ cs_dsp_test_control_rw.o \
+ cs_dsp_test_wmfw.o \
+ cs_dsp_test_wmfw_error.o \
+ cs_dsp_tests.o
+
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST_UTILS) += cs_dsp_test_utils.o
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST) += cs_dsp_test.o
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
new file mode 100644
index 000000000000..49d84f7e59e6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// bin file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_BIN_BUF_SIZE 32768
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+struct cs_dsp_mock_bin_builder {
+ struct cs_dsp_test *test_priv;
+ void *buf;
+ void *write_p;
+ size_t bytes_used;
+};
+
+/**
+ * cs_dsp_mock_bin_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder)
+{
+ struct firmware *fw;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_raw_block() - Add a data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID.
+ * @alg_ver: Algorithm version.
+ * @type: Type of the block.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_coeff_item *item;
+ size_t bytes_needed = struct_size_t(struct wmfw_coeff_item, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_BIN_BUF_SIZE));
+
+ item = builder->write_p;
+
+ item->offset = cpu_to_le16(offset);
+ item->type = cpu_to_le16(type);
+ item->id = cpu_to_le32(alg_id);
+ item->ver = cpu_to_le32(alg_ver << 8);
+ item->len = cpu_to_le32(payload_len_bytes);
+
+ if (payload_len_bytes)
+ memcpy(item->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info, int type)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_bin_add_raw_block(builder, 0, 0, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+
+/**
+ * cs_dsp_mock_bin_add_info() - Add an info block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, info, WMFW_INFO_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_name() - Add a name block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @name: Pointer to name string to be copied into the file.
+ */
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, name, WMFW_NAME_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_name, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_patch() - Add a patch data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID for the patch.
+ * @alg_ver: Algorithm version for the patch.
+ * @mem_region: Memory region for the patch.
+ * @reg_addr_offset: Offset to start of data in register addresses.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_bin_add_raw_block(builder, alg_id, alg_ver,
+ mem_region, reg_addr_offset,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_patch, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_init() - Initialize a struct cs_dsp_mock_bin_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required bin format version.
+ * @fw_version: Firmware version to put in bin file.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_bin_builder.
+ */
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version)
+{
+ struct cs_dsp_mock_bin_builder *builder;
+ struct wmfw_coeff_hdr *hdr;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_BIN_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ /* Create header */
+ hdr = builder->buf;
+ memcpy(hdr->magic, "WMDR", sizeof(hdr->magic));
+ hdr->len = cpu_to_le32(offsetof(struct wmfw_coeff_hdr, data));
+ hdr->ver = cpu_to_le32(fw_version | (format_version << 24));
+ hdr->core_ver = cpu_to_le32(((u32)priv->dsp->type << 24) | priv->dsp->rev);
+
+ builder->write_p = hdr->data;
+ builder->bytes_used = offsetof(struct wmfw_coeff_hdr, data);
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
new file mode 100644
index 000000000000..161272e47bda
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock DSP memory maps for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/math.h>
+
+const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[] = {
+ { .type = WMFW_HALO_PM_PACKED, .base = 0x3800000 },
+ { .type = WMFW_HALO_XM_PACKED, .base = 0x2000000 },
+ { .type = WMFW_HALO_YM_PACKED, .base = 0x2C00000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x2800000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x3400000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[] = {
+ 0x5000, /* PM_PACKED */
+ 0x6000, /* XM_PACKED */
+ 0x47F4, /* YM_PACKED */
+ 0x8000, /* XM_UNPACKED_24 */
+ 0x5FF8, /* YM_UNPACKED_24 */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x080000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x0a0000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x0c0000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x0e0000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[] = {
+ 0x9000, /* PM */
+ 0xa000, /* ZM */
+ 0x2000, /* XM */
+ 0x2000, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x100000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x180000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x190000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x1a8000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[] = {
+ 0x6000, /* PM */
+ 0x800, /* ZM */
+ 0x800, /* XM */
+ 0x800, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes)
+{
+ int i;
+
+ for (i = 0; region_sizes[i]; ++i)
+ ;
+
+ return i;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_count_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_size_of_region() - Return size of given memory region.
+ *
+ * @dsp: Pointer to struct cs_dsp.
+ * @mem_type: Memory region type.
+ *
+ * Return: Size of region in bytes.
+ */
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type)
+{
+ const unsigned int *sizes;
+ int i;
+
+ if (dsp->mem == cs_dsp_mock_halo_dsp1_regions)
+ sizes = cs_dsp_mock_halo_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_32bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_32bit_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_16bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_16bit_dsp1_region_sizes;
+ else
+ return 0;
+
+ for (i = 0; i < dsp->num_mems; ++i) {
+ if (dsp->mem[i].type == mem_type)
+ return sizes[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_size_of_region, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_base_addr_for_mem() - Base register address for memory region.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Base register address of region.
+ */
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type)
+{
+ int num_mems = priv->dsp->num_mems;
+ const struct cs_dsp_region *region = priv->dsp->mem;
+ int i;
+
+ for (i = 0; i < num_mems; ++i) {
+ if (region[i].type == mem_type)
+ return region[i].base;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected region %d\n", mem_type);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_base_addr_for_mem, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_addr_inc_per_unpacked_word() - Unpacked register address increment per DSP word.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Amount by which register address increments to move to the next
+ * DSP word in unpacked XM/YM/ZM.
+ */
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return 2; /* two 16-bit register indexes per XM/YM/ZM word */
+ case WMFW_HALO:
+ return 4; /* one byte-addressed 32-bit register per XM/YM/ZM word */
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_addr_inc_per_unpacked_word, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_bytes() - Number of bytes in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of bytes in a group of registers forming the
+ * smallest bus access size (including any padding bits). For unpacked
+ * memory this is the number of registers containing one DSP word.
+ * For packed memory this is the number of registers in one packed
+ * access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return 3 * regmap_get_val_bytes(priv->dsp->regmap);
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return sizeof(u32);
+ case WMFW_HALO_PM_PACKED:
+ return 5 * sizeof(u32);
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 3 * sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_registers() - Number of registers in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of register forming the smallest bus access size.
+ * For unpacked memory this is the number of registers containing one
+ * DSP word. For packed memory this is the number of registers in one
+ * packed access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type)
+{
+ return cs_dsp_mock_reg_block_length_bytes(priv, mem_type) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_registers, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_dsp_words() - Number of dsp_words in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of DSP words in a group of registers forming the
+ * smallest bus access size.
+ */
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return regmap_get_val_bytes(priv->dsp->regmap) / 2;
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return 1;
+ case WMFW_HALO_PM_PACKED:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 4;
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_dsp_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_has_zm() - DSP has ZM
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: True if DSP has ZM.
+ */
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_has_zm, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_packed_to_unpacked_mem_type() - Unpacked region that is
+ * the same memory as a packed region.
+ *
+ * @packed_mem_type: Type of packed memory region.
+ *
+ * Return: unpacked type that is the same memory as packed_mem_type.
+ */
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type)
+{
+ switch (packed_mem_type) {
+ case WMFW_HALO_XM_PACKED:
+ return WMFW_ADSP2_XM;
+ case WMFW_HALO_YM_PACKED:
+ return WMFW_ADSP2_YM;
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_packed_to_unpacked_mem_type, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_num_dsp_words_to_num_packed_regs() - Number of DSP words
+ * to number of packed registers.
+ *
+ * @num_dsp_words: Number of DSP words.
+ *
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ *
+ * Return: Number of packed registers.
+ */
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ /* There are 3 registers for every 4 packed words */
+ return (num_dsp_words * 3) / 4;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_num_dsp_words_to_num_packed_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct wmfw_halo_id_hdr cs_dsp_mock_halo_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_HALO << 16),
+ .block_rev = cpu_to_be32(3 << 16),
+ .vendor_id = cpu_to_be32(0x2),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm_base = cpu_to_be32(((sizeof(struct wmfw_halo_id_hdr) +
+ (40 * sizeof(struct wmfw_halo_alg_hdr)))
+ / 4) * 3),
+ .xm_size = cpu_to_be32(0x20),
+
+ /* Allocate a dummy word of YM */
+ .ym_base = cpu_to_be32(0),
+ .ym_size = cpu_to_be32(1),
+
+ .n_algs = 0,
+};
+
+static const struct wmfw_adsp2_id_hdr cs_dsp_mock_adsp2_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_ADSP2 << 16),
+ .core_rev = cpu_to_be32(2 << 16),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm = cpu_to_be32(((sizeof(struct wmfw_adsp2_id_hdr) +
+ (40 * sizeof(struct wmfw_adsp2_alg_hdr)))
+ / 4) * 3),
+
+ .ym = cpu_to_be32(0),
+ .zm = cpu_to_be32(0),
+
+ .n_algs = 0,
+};
+
+/**
+ * cs_dsp_mock_xm_header_get_alg_base_in_words() - Algorithm base offset in DSP words.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @alg_id: Algorithm ID.
+ * @mem_type: Memory region type.
+ *
+ * Lookup an algorithm in the XM header and return the base offset in
+ * DSP words of the algorithm data in the requested memory region.
+ *
+ * Return: Offset in DSP words.
+ */
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_adsp2_alg_hdr adsp2;
+ struct wmfw_halo_alg_hdr halo;
+ } alg;
+ unsigned int alg_hdr_addr;
+ unsigned int val, xm_base = 0, ym_base = 0, zm_base = 0;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ alg_hdr_addr = xm + (sizeof(struct wmfw_adsp2_id_hdr) / 2);
+ for (;; alg_hdr_addr += sizeof(alg.adsp2) / 2) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.adsp2, sizeof(alg.adsp2));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.adsp2.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.adsp2.xm);
+ ym_base = be32_to_cpu(alg.adsp2.ym);
+ zm_base = be32_to_cpu(alg.adsp2.zm);
+ break;
+ }
+ }
+ break;
+ case WMFW_HALO:
+ alg_hdr_addr = xm + sizeof(struct wmfw_halo_id_hdr);
+ for (;; alg_hdr_addr += sizeof(alg.halo)) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.halo, sizeof(alg.halo));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.halo.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.halo.xm_base);
+ ym_base = be32_to_cpu(alg.halo.ym_base);
+ break;
+ }
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type %d\n", priv->dsp->type);
+ return 0;
+ }
+
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_HALO_XM_PACKED:
+ return xm_base;
+ case WMFW_ADSP2_YM:
+ case WMFW_HALO_YM_PACKED:
+ return ym_base;
+ case WMFW_ADSP2_ZM:
+ return zm_base;
+ default:
+ KUNIT_FAIL(priv->test, "Bad mem_type\n");
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_id_hdr adsp2;
+ struct wmfw_v3_id_hdr halo;
+ } hdr;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2));
+ return be32_to_cpu(hdr.adsp2.ver);
+ case WMFW_HALO:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo));
+ return be32_to_cpu(hdr.halo.ver);
+ default:
+ KUNIT_FAIL(priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap,
+ "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header)
+{
+ const struct wmfw_id_hdr *adsp2_hdr;
+ const struct wmfw_v3_id_hdr *halo_hdr;
+
+ switch (header->test_priv->dsp->type) {
+ case WMFW_ADSP2:
+ adsp2_hdr = header->blob_data;
+ return be32_to_cpu(adsp2_hdr->ver);
+ case WMFW_HALO:
+ halo_hdr = header->blob_data;
+ return be32_to_cpu(halo_hdr->ver);
+ default:
+ KUNIT_FAIL(header->test_priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_drop_from_regmap_cache() - Drop XM header from regmap cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ */
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ unsigned int bytes;
+ __be32 num_algs_be32;
+ unsigned int num_algs;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ /*
+ * Could be one 32-bit register or two 16-bit registers.
+ * A raw read will read the requested number of bytes.
+ */
+ regmap_raw_read(priv->dsp->regmap,
+ xm + (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
+ &num_algs_be32, sizeof(num_algs_be32));
+ num_algs = be32_to_cpu(num_algs_be32);
+ bytes = sizeof(struct wmfw_adsp2_id_hdr) +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1);
+ break;
+ case WMFW_HALO:
+ regmap_read(priv->dsp->regmap,
+ xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
+ &num_algs);
+ bytes = sizeof(struct wmfw_halo_id_hdr) +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + bytes - 4);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_drop_from_regmap_cache, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_xm_header_add_adsp2_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_adsp2_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word, next_free_zm_word;
+
+ next_free_xm_word = be32_to_cpu(hdr->xm);
+ next_free_ym_word = be32_to_cpu(hdr->ym);
+ next_free_zm_word = be32_to_cpu(hdr->zm);
+
+ /* Set num_algs in XM header. */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_adsp2_alg_hdr *alg_info =
+ (struct wmfw_adsp2_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last, alg_zm_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm = cpu_to_be32(algs->xm_base_words);
+ alg_info->ym = cpu_to_be32(algs->ym_base_words);
+ alg_info->zm = cpu_to_be32(algs->zm_base_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm && algs->xm_size_words)
+ alg_info->xm = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym && algs->ym_size_words)
+ alg_info->ym = cpu_to_be32(next_free_ym_word);
+
+ if (!alg_info->zm && algs->zm_size_words)
+ alg_info->zm = cpu_to_be32(next_free_zm_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm) + algs->xm_size_words - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym) + algs->ym_size_words - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+
+ alg_zm_last = be32_to_cpu(alg_info->zm) + algs->zm_size_words - 1;
+ if (alg_zm_last > next_free_zm_word)
+ next_free_zm_word = alg_zm_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+static void cs_dsp_mock_xm_header_add_halo_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_halo_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word;
+
+ /* Assume we're starting with bare header */
+ next_free_xm_word = be32_to_cpu(hdr->xm_base) + be32_to_cpu(hdr->xm_size) - 1;
+ next_free_ym_word = be32_to_cpu(hdr->ym_base) + be32_to_cpu(hdr->ym_size) - 1;
+
+ /* Set num_algs in XM header */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_halo_alg_hdr *alg_info =
+ (struct wmfw_halo_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm_base = cpu_to_be32(algs->xm_base_words);
+ alg_info->xm_size = cpu_to_be32(algs->xm_size_words);
+ alg_info->ym_base = cpu_to_be32(algs->ym_base_words);
+ alg_info->ym_size = cpu_to_be32(algs->ym_size_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm_base && alg_info->xm_size)
+ alg_info->xm_base = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym_base && alg_info->ym_size)
+ alg_info->ym_base = cpu_to_be32(next_free_ym_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm_base) + be32_to_cpu(alg_info->xm_size) - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym_base) + be32_to_cpu(alg_info->ym_size) - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+/**
+ * cs_dsp_mock_xm_header_write_to_regmap() - Write XM header to regmap.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * The data in header is written to the XM addresses in the regmap.
+ *
+ * Return: 0 on success, else negative error code.
+ */
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header)
+{
+ struct cs_dsp_test *priv = header->test_priv;
+ unsigned int reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+
+ /*
+ * One 32-bit word corresponds to one 32-bit unpacked XM word so the
+ * blob can be written directly to the regmap.
+ */
+ return regmap_raw_write(priv->dsp->regmap, reg_addr,
+ header->blob_data, header->blob_size_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_write_to_regmap, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_create_mock_xm_header() - Create a dummy XM header.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @algs: Pointer to array of struct cs_dsp_mock_alg_def listing the
+ * dummy algorithm entries to include in the XM header.
+ * @num_algs: Number of entries in the algs array.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_xm_header.
+ */
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct cs_dsp_mock_xm_header *builder;
+ size_t total_bytes_required;
+ const void *header;
+ size_t header_size_bytes;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ header = &cs_dsp_mock_adsp2_xm_hdr;
+ header_size_bytes = sizeof(cs_dsp_mock_adsp2_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ case WMFW_HALO:
+ header = &cs_dsp_mock_halo_xm_hdr,
+ header_size_bytes = sizeof(cs_dsp_mock_halo_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "%s unexpected DSP type %d\n",
+ __func__, priv->dsp->type);
+ return NULL;
+ }
+
+ builder->blob_data = kunit_kzalloc(priv->test, total_bytes_required, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder->blob_data);
+ builder->blob_size_bytes = total_bytes_required;
+
+ memcpy(builder->blob_data, header, header_size_bytes);
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ cs_dsp_mock_xm_header_add_adsp2_algs(builder, algs, num_algs);
+ break;
+ case WMFW_HALO:
+ cs_dsp_mock_xm_header_add_halo_algs(builder, algs, num_algs);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_create_mock_xm_header, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
new file mode 100644
index 000000000000..fb8e4a5d189a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock regmap for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/regmap.h>
+
+static int cs_dsp_mock_regmap_read(void *context, const void *reg_buf,
+ const size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus read @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_gather_write(void *context,
+ const void *reg_buf, size_t reg_size,
+ const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus gather_write @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_write(void *context, const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus write @%#x", *(u32 *)val_buf);
+
+ return -EIO;
+}
+
+static const struct regmap_bus cs_dsp_mock_regmap_bus = {
+ .read = cs_dsp_mock_regmap_read,
+ .write = cs_dsp_mock_regmap_write,
+ .gather_write = cs_dsp_mock_regmap_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct reg_default adsp2_32bit_register_defaults[] = {
+ { 0xffe00, 0x0000 }, /* CONTROL */
+ { 0xffe02, 0x0000 }, /* CLOCKING */
+ { 0xffe04, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0xffe30, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0xffe32, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0xffe34, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0xffe40, 0x0000 }, /* SCRATCH_0_1 */
+ { 0xffe42, 0x0000 }, /* SCRATCH_2_3 */
+};
+
+static const struct regmap_range adsp2_32bit_registers[] = {
+ regmap_reg_range(0x80000, 0x88ffe), /* PM */
+ regmap_reg_range(0xa0000, 0xa9ffe), /* XM */
+ regmap_reg_range(0xc0000, 0xc1ffe), /* YM */
+ regmap_reg_range(0xe0000, 0xe1ffe), /* ZM */
+ regmap_reg_range(0xffe00, 0xffe7c), /* CORE CTRL */
+};
+
+const unsigned int cs_dsp_mock_adsp2_32bit_sysbase = 0xffe00;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_32bit_rw = {
+ .yes_ranges = adsp2_32bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_32bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_32bit = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 2,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_32bit_rw,
+ .rd_table = &adsp2_32bit_rw,
+ .max_register = 0xffe7c,
+ .reg_defaults = adsp2_32bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_32bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default adsp2_16bit_register_defaults[] = {
+ { 0x1100, 0x0000 }, /* CONTROL */
+ { 0x1101, 0x0000 }, /* CLOCKING */
+ { 0x1104, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0x1130, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0x1131, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0x1134, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0x1140, 0x0000 }, /* SCRATCH_0 */
+ { 0x1141, 0x0000 }, /* SCRATCH_1 */
+ { 0x1142, 0x0000 }, /* SCRATCH_2 */
+ { 0x1143, 0x0000 }, /* SCRATCH_3 */
+};
+
+static const struct regmap_range adsp2_16bit_registers[] = {
+ regmap_reg_range(0x001100, 0x001143), /* CORE CTRL */
+ regmap_reg_range(0x100000, 0x105fff), /* PM */
+ regmap_reg_range(0x180000, 0x1807ff), /* ZM */
+ regmap_reg_range(0x190000, 0x1947ff), /* XM */
+ regmap_reg_range(0x1a8000, 0x1a97ff), /* YM */
+};
+
+const unsigned int cs_dsp_mock_adsp2_16bit_sysbase = 0x001100;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_16bit_rw = {
+ .yes_ranges = adsp2_16bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_16bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_16bit = {
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_stride = 1,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_16bit_rw,
+ .rd_table = &adsp2_16bit_rw,
+ .max_register = 0x1a97ff,
+ .reg_defaults = adsp2_16bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_16bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default halo_register_defaults[] = {
+ /* CORE */
+ { 0x2b80010, 0 }, /* HALO_CORE_SOFT_RESET */
+ { 0x2b805c0, 0 }, /* HALO_SCRATCH1 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH2 */
+ { 0x2b805d0, 0 }, /* HALO_SCRATCH3 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH4 */
+ { 0x2bc1000, 0 }, /* HALO_CCM_CORE_CONTROL */
+ { 0x2bc7000, 0 }, /* HALO_WDT_CONTROL */
+
+ /* SYSINFO */
+ { 0x25e2040, 0 }, /* HALO_AHBM_WINDOW_DEBUG_0 */
+ { 0x25e2044, 0 }, /* HALO_AHBM_WINDOW_DEBUG_1 */
+};
+
+static const struct regmap_range halo_readable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x25e0000, 0x25e004f), /* SYSINFO */
+ regmap_reg_range(0x25e2000, 0x25e2047), /* SYSINFO */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+static const struct regmap_range halo_writeable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+const unsigned int cs_dsp_mock_halo_core_base = 0x2b80000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_core_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const unsigned int cs_dsp_mock_halo_sysinfo_base = 0x25e0000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_sysinfo_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table halo_readable = {
+ .yes_ranges = halo_readable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_readable_registers),
+};
+
+static const struct regmap_access_table halo_writeable = {
+ .yes_ranges = halo_writeable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_writeable_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_halo = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &halo_writeable,
+ .rd_table = &halo_readable,
+ .max_register = 0x3804ffc,
+ .reg_defaults = halo_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(halo_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+/**
+ * cs_dsp_mock_regmap_drop_range() - drop a range of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @last_reg: Address of last register to drop.
+ */
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg)
+{
+ regcache_drop_region(priv->dsp->regmap, first_reg, last_reg);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_range, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_regs() - drop a number of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_regs: Number of registers to drop.
+ */
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs)
+{
+ int stride = regmap_get_reg_stride(priv->dsp->regmap);
+ unsigned int last = first_reg + (stride * (num_regs - 1));
+
+ cs_dsp_mock_regmap_drop_range(priv, first_reg, last);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_bytes() - drop a number of bytes from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_bytes: Number of bytes to drop from the cache. Will be rounded
+ * down to a whole number of registers. Trailing bytes that
+ * are not a multiple of the register size will not be dropped.
+ * (This is intended to help detect math errors in test code.)
+ */
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes)
+{
+ size_t num_regs = num_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+
+ cs_dsp_mock_regmap_drop_regs(priv, first_reg, num_regs);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_system_regs() - Drop DSP system registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ *
+ * Drops all DSP system registers from the regmap cache.
+ */
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x7c);
+ }
+ return;
+ case WMFW_HALO:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x47000);
+ }
+
+ /* sysinfo registers are read-only so don't drop them */
+ return;
+ default:
+ return;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_system_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_is_dirty() - Test for dirty registers in the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @drop_system_regs: If true the DSP system regs will be dropped from
+ * the cache before checking for dirty.
+ *
+ * All registers that are expected to be written must have been dropped
+ * from the cache (DSP system registers can be dropped by passing
+ * drop_system_regs == true). If any unexpected registers were written
+ * there will still be dirty entries in the cache and a cache sync will
+ * cause a write.
+ *
+ * Returns: true if there were dirty entries, false if not.
+ */
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs)
+{
+ if (drop_system_regs)
+ cs_dsp_mock_regmap_drop_system_regs(priv);
+
+ priv->saw_bus_write = false;
+ regcache_cache_only(priv->dsp->regmap, false);
+ regcache_sync(priv->dsp->regmap);
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return priv->saw_bus_write;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_is_dirty, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_init() - Initialize a mock regmap.
+ *
+ * @priv: Pointer to struct cs_dsp_test object. This must have a
+ * valid pointer to a struct cs_dsp in which the type and
+ * rev fields are set to the type of DSP to be simulated.
+ *
+ * On success the priv->dsp->regmap will point to the created
+ * regmap instance.
+ *
+ * Return: zero on success, else negative error code.
+ */
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv)
+{
+ const struct regmap_config *config;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_HALO:
+ config = &cs_dsp_mock_regmap_halo;
+ break;
+ case WMFW_ADSP2:
+ if (priv->dsp->rev == 0)
+ config = &cs_dsp_mock_regmap_adsp2_16bit;
+ else
+ config = &cs_dsp_mock_regmap_adsp2_32bit;
+ break;
+ default:
+ config = NULL;
+ break;
+ }
+
+ priv->dsp->regmap = devm_regmap_init(priv->dsp->dev,
+ &cs_dsp_mock_regmap_bus,
+ priv,
+ config);
+ if (IS_ERR(priv->dsp->regmap)) {
+ ret = PTR_ERR(priv->dsp->regmap);
+ kunit_err(priv->test, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ /* Put regmap in cache-only so it accumulates the writes done by cs_dsp */
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
new file mode 100644
index 000000000000..cbd0bf72b7de
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("Utilities for Cirrus Logic DSP driver testing");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
new file mode 100644
index 000000000000..5a3ac03ac37f
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// wmfw file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_WMFW_BUF_SIZE 131072
+
+struct cs_dsp_mock_wmfw_builder {
+ struct cs_dsp_test *test_priv;
+ int format_version;
+ void *buf;
+ size_t buf_size_bytes;
+ void *write_p;
+ size_t bytes_used;
+
+ void *alg_data_header;
+ unsigned int num_coeffs;
+};
+
+struct wmfw_adsp2_halo_header {
+ struct wmfw_header header;
+ struct wmfw_adsp2_sizes sizes;
+ struct wmfw_footer footer;
+} __packed;
+
+struct wmfw_long_string {
+ __le16 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+struct wmfw_short_string {
+ u8 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+/**
+ * cs_dsp_mock_wmfw_format_version() - Return format version.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Format version.
+ */
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ return builder->format_version;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_format_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct firmware *fw;
+
+ if (!builder)
+ return NULL;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_raw_block() - Add a block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @block_type: Block type.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data,
+ * or NULL if no data.
+ * @payload_len_bytes: Length of payload data in bytes, or zero.
+ */
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int block_type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_region *header = builder->write_p;
+ unsigned int bytes_needed = struct_size_t(struct wmfw_region, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ header->offset = cpu_to_le32(offset | (block_type << 24));
+ header->len = cpu_to_le32(payload_len_bytes);
+ if (payload_len_bytes > 0)
+ memcpy(header->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_info() - Add an info block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_data_block() - Add a data block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @mem_region: Memory region for the block.
+ * @mem_offset_dsp_words: Offset to start of destination in DSP words.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Blob payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, mem_region, mem_offset_dsp_words,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_data_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description)
+{
+ struct wmfw_region *rgn = builder->write_p;
+ struct wmfw_adsp_alg_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, name_len, description_len;
+ int offset;
+
+ /* Bytes needed for region header */
+ bytes_needed = offsetof(struct wmfw_region, data);
+
+ builder->alg_data_header = builder->write_p;
+ builder->num_coeffs = 0;
+
+ switch (builder->format_version) {
+ case 0:
+ KUNIT_FAIL(builder->test_priv->test, "wmfwV0 does not have alg blocks\n");
+ return;
+ case 1:
+ bytes_needed += offsetof(struct wmfw_adsp_alg_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->id = cpu_to_le32(alg_id);
+ if (name)
+ strscpy(v1->name, name, sizeof(v1->name));
+
+ if (description)
+ strscpy(v1->descr, description, sizeof(v1->descr));
+ break;
+ default:
+ name_len = 0;
+ description_len = 0;
+
+ if (name)
+ name_len = strlen(name);
+
+ if (description)
+ description_len = strlen(description);
+
+ bytes_needed += sizeof(__le32); /* alg id */
+ bytes_needed += round_up(name_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32); /* coeff count */
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ *(__force __le32 *)&rgn->data[0] = cpu_to_le32(alg_id);
+
+ shortstring = (struct wmfw_short_string *)&rgn->data[4];
+ shortstring->len = name_len;
+
+ if (name_len)
+ memcpy(shortstring->data, name, name_len);
+
+ /* Round up to next __le32 */
+ offset = round_up(4 + struct_size_t(struct wmfw_short_string, data, name_len),
+ sizeof(__le32));
+
+ longstring = (struct wmfw_long_string *)&rgn->data[offset];
+ longstring->len = cpu_to_le16(description_len);
+
+ if (description_len)
+ memcpy(longstring->data, description, description_len);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_start_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def)
+{
+ struct wmfw_adsp_coeff_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, shortname_len, fullname_len, description_len;
+ __le32 *ple32;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, builder->alg_data_header);
+
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ bytes_needed = offsetof(struct wmfw_adsp_coeff_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ v1 = (struct wmfw_adsp_coeff_data *)builder->write_p;
+ memset(v1, 0, sizeof(*v1));
+ v1->hdr.offset = cpu_to_le16(def->offset_dsp_words);
+ v1->hdr.type = cpu_to_le16(def->mem_type);
+ v1->hdr.size = cpu_to_le32(bytes_needed - sizeof(v1->hdr));
+ v1->ctl_type = cpu_to_le16(def->type);
+ v1->flags = cpu_to_le16(def->flags);
+ v1->len = cpu_to_le32(def->length_bytes);
+
+ if (def->fullname)
+ strscpy(v1->name, def->fullname, sizeof(v1->name));
+
+ if (def->description)
+ strscpy(v1->descr, def->description, sizeof(v1->descr));
+ break;
+ default:
+ fullname_len = 0;
+ description_len = 0;
+ shortname_len = strlen(def->shortname);
+
+ if (def->fullname)
+ fullname_len = strlen(def->fullname);
+
+ if (def->description)
+ description_len = strlen(def->description);
+
+ bytes_needed = sizeof(__le32) * 2; /* type, offset and size */
+ bytes_needed += round_up(shortname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(fullname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32) * 2; /* flags, type and length */
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ ple32 = (__force __le32 *)builder->write_p;
+ *ple32++ = cpu_to_le32(def->offset_dsp_words | (def->mem_type << 16));
+ *ple32++ = cpu_to_le32(bytes_needed - sizeof(__le32) - sizeof(__le32));
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = shortname_len;
+ memcpy(shortstring->data, def->shortname, shortname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, shortname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = fullname_len;
+ memcpy(shortstring->data, def->fullname, fullname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, fullname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ longstring = (__force struct wmfw_long_string *)ple32;
+ longstring->len = cpu_to_le16(description_len);
+ memcpy(longstring->data, def->description, description_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_long_string, data, description_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ *ple32++ = cpu_to_le32(def->type | (def->flags << 16));
+ *ple32 = cpu_to_le32(def->length_bytes);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+ builder->num_coeffs++;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_coeff_desc, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_region *rgn = builder->alg_data_header;
+ struct wmfw_adsp_alg_data *v1;
+ const struct wmfw_short_string *shortstring;
+ const struct wmfw_long_string *longstring;
+ size_t offset;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, rgn);
+
+ /* Fill in data size */
+ rgn->len = cpu_to_le32((u8 *)builder->write_p - (u8 *)rgn->data);
+
+ /* Fill in coefficient count */
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->ncoeff = cpu_to_le32(builder->num_coeffs);
+ break;
+ default:
+ offset = 4; /* skip alg id */
+
+ /* Get name length and round up to __le32 multiple */
+ shortstring = (const struct wmfw_short_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_short_string, data, shortstring->len),
+ sizeof(__le32));
+
+ /* Get description length and round up to __le32 multiple */
+ longstring = (const struct wmfw_long_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_long_string, data,
+ le16_to_cpu(longstring->len)),
+ sizeof(__le32));
+
+ *(__force __le32 *)&rgn->data[offset] = cpu_to_le32(builder->num_coeffs);
+ break;
+ }
+
+ builder->alg_data_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_end_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_init_adsp2_halo_wmfw(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_adsp2_halo_header *hdr = builder->buf;
+ const struct cs_dsp *dsp = builder->test_priv->dsp;
+
+ memcpy(hdr->header.magic, "WMFW", sizeof(hdr->header.magic));
+ hdr->header.len = cpu_to_le32(sizeof(*hdr));
+ hdr->header.ver = builder->format_version;
+ hdr->header.core = dsp->type;
+ hdr->header.rev = cpu_to_le16(dsp->rev);
+
+ hdr->sizes.pm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_PM));
+ hdr->sizes.xm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_XM));
+ hdr->sizes.ym = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_YM));
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ hdr->sizes.zm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_ZM));
+ break;
+ default:
+ break;
+ }
+
+ builder->write_p = &hdr[1];
+ builder->bytes_used += sizeof(*hdr);
+}
+
+/**
+ * cs_dsp_mock_wmfw_init() - Initialize a struct cs_dsp_mock_wmfw_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required wmfw format version.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_wmfw_builder.
+ */
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version)
+{
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ /* If format version isn't given use the default for the target core */
+ if (format_version < 0) {
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ format_version = 2;
+ break;
+ default:
+ format_version = 3;
+ break;
+ }
+ }
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+
+ builder->test_priv = priv;
+ builder->format_version = format_version;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_WMFW_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ builder->buf_size_bytes = CS_DSP_MOCK_WMFW_BUF_SIZE;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ case WMFW_HALO:
+ cs_dsp_init_adsp2_halo_wmfw(builder);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
new file mode 100644
index 000000000000..1e161bbc5b4a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
@@ -0,0 +1,2556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create a XM header with an algorithm list in the cached regmap.
+ * 3) Create dummy wmfw file to satisfy cs_dsp.
+ * 4) Create bin file content.
+ * 5) Call cs_dsp_power_up() with the bin file.
+ * 6) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 7) All the registers that are expected to have been written are dropped from
+ * the cache (including the XM header). This should leave the cache clean.
+ * 8) If the cache is still dirty there have been unexpected writes.
+ *
+ * There are multiple different schemes used for addressing across
+ * ADSP2 and Halo Core DSPs:
+ *
+ * dsp words: The addressing scheme used by the DSP, pointers and lengths
+ * in DSP memory use this. A memory region (XM, YM, ZM) is
+ * also required to create a unique DSP memory address.
+ * registers: Addresses in the register map. Older ADSP2 devices have
+ * 16-bit registers with an address stride of 1. Newer ADSP2
+ * devices have 32-bit registers with an address stride of 2.
+ * Halo Core devices have 32-bit registers with a stride of 4.
+ * unpacked: Registers that have a 1:1 mapping to DSP words
+ * packed: Registers that pack multiple DSP words more efficiently into
+ * multiple 32-bit registers. Because of this the relationship
+ * between a packed _register_ address and the corresponding
+ * _dsp word_ address is different from unpacked registers.
+ * Packed registers can only be accessed as a group of
+ * multiple registers, therefore can only read/write a group
+ * of multiple DSP words.
+ * Packed registers only exist on Halo Core DSPs.
+ *
+ * Addresses can also be relative to the start of an algorithm, and this
+ * can be expressed in dsp words, register addresses, or bytes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+};
+
+struct bin_test_param {
+ const char *name;
+ int mem_type;
+ unsigned int offset_words;
+ int alg_idx;
+};
+
+static const struct cs_dsp_mock_alg_def bin_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xfbfb,
+ .ver = 0x100000,
+ .xm_size_words = 99,
+ .ym_size_words = 99,
+ .zm_size_words = 99,
+ },
+ {
+ .id = 0xc321,
+ .ver = 0x100000,
+ .xm_size_words = 120,
+ .ym_size_words = 120,
+ .zm_size_words = 120,
+ },
+ {
+ .id = 0xb123,
+ .ver = 0x100000,
+ .xm_size_words = 96,
+ .ym_size_words = 96,
+ .zm_size_words = 96,
+ },
+};
+
+/*
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ * There are 3 registers for every 4 packed words.
+ */
+static unsigned int _num_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ return (num_dsp_words * 3) / 4;
+}
+
+/* bin file that patches a single DSP word */
+static void bin_patch_one_word(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a single payload that patches consecutive words */
+static void bin_patch_one_multiword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple one-word payloads that patch consecutive words */
+static void bin_patch_multi_oneword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(payload_data); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + i) * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads that patch a block of consecutive
+ * words but the payloads are not in address order.
+ */
+static void bin_patch_multi_oneword_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ static const u8 word_order[] = { 10, 2, 12, 4, 0, 11, 6, 1, 3, 15, 5, 13, 8, 7, 9, 14 };
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+ static_assert(ARRAY_SIZE(word_order) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_order); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + word_order[i]) *
+ reg_inc_per_word,
+ &payload_data[word_order[i]], sizeof(payload_data[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads. The payloads are not in address
+ * order and collectively do not patch a contiguous block of memory.
+ */
+static void bin_patch_multi_oneword_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ static const u8 word_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ u32 payload_data[44];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ word_offsets[i] * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + word_offsets[i]) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &reg_val, &payload_data[i], sizeof(reg_val));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_one_word_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ unsigned int alg_xm_base_words, alg_ym_base_words, alg_zm_base_words;
+ unsigned int reg_addr;
+ u32 payload_data[3];
+ struct firmware *fw;
+ u32 reg_val;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_XM);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_YM);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_zm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_ZM);
+ } else {
+ alg_zm_base_words = 0;
+ }
+
+ /* Add words to XM, YM and ZM */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_XM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[0], sizeof(payload_data[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_YM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[1], sizeof(payload_data[1]));
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_ZM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[2], sizeof(payload_data[2]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM) +
+ ((alg_xm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[0]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM) +
+ ((alg_ym_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[1]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM) +
+ ((alg_zm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[2]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ */
+static void bin_patch_one_word_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_one_word_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file that patches a single packed block of DSP words */
+static void bin_patch_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is one word longer than a packed block using one
+ * packed block followed by one unpacked word.
+ */
+static void bin_patch_1_packed_1_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked word following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by two blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_2_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by three blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_3_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 6) - alg_base_words) * 4,
+ &unpacked_payloads[2], sizeof(unpacked_payloads[2]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by a block of two unpacked words.
+ */
+static void bin_patch_1_packed_2_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by a block of three unpacked words.
+ */
+static void bin_patch_1_packed_3_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts one word before a packed boundary using one
+ * unpacked word followed by one packed block.
+ */
+static void bin_patch_1_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked word */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 1) * 4;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using two
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_2_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using one
+ * block of two unpacked words followed by one packed block.
+ */
+static void bin_patch_2_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using three
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_3_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[2], sizeof(unpacked_payload[2]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using one
+ * block of three unpacked words followed by one packed block.
+ */
+static void bin_patch_3_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple payloads that each patch one packed block. */
+static void bin_patch_multi_onepacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(packed_payloads); ++i) {
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words + (i * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i], sizeof(packed_payloads[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order.
+ */
+static void bin_patch_multi_onepacked_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 payload_order[] = { 4, 3, 6, 1, 0, 7, 5, 2 };
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(payload_order) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(payload_order); ++i) {
+ patch_pos_in_packed_regs =
+ _num_words_to_num_packed_regs(patch_pos_words + (payload_order[i] * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[payload_order[i]],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content in registers should match the order of data in packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order. The patched memory is not contiguous.
+ */
+static void bin_patch_multi_onepacked_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 word_offsets[] = { 60, 24, 76, 4, 40, 52, 48, 36, 12 };
+ u32 packed_payloads[9][3], readback[3];
+ unsigned int alg_base_words, alg_base_in_packed_regs;
+ unsigned int patch_pos_words, patch_pos_in_packed_regs, payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads[0]));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads[i], sizeof(packed_payloads[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_1_packed_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_xm_payload[3], packed_ym_payload[3], readback[3];
+ unsigned int alg_xm_base_words, alg_ym_base_words;
+ unsigned int xm_patch_pos_words, ym_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_xm_payload));
+ static_assert(sizeof(readback) == sizeof(packed_ym_payload));
+
+ get_random_bytes(packed_xm_payload, sizeof(packed_xm_payload));
+ get_random_bytes(packed_ym_payload, sizeof(packed_ym_payload));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_XM_PACKED);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_YM_PACKED);
+
+ /* Round patch start word up to a packed boundary */
+ xm_patch_pos_words = round_up(alg_xm_base_words + param->offset_words, 4);
+ ym_patch_pos_words = round_up(alg_ym_base_words + param->offset_words, 4);
+
+ /* Add XM and YM patches */
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_xm_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_XM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_xm_payload, sizeof(packed_xm_payload));
+
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_ym_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_YM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_ym_payload, sizeof(packed_ym_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed XM registers should match packed_xm_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_XM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_xm_payload, sizeof(packed_xm_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_xm_payload));
+
+ /* Content of packed YM registers should match packed_ym_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_YM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_ym_payload, sizeof(packed_ym_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_ym_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ */
+static void bin_patch_1_packed_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /* For each algorithm patch one DSP word to a value from packed_payload */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_1_packed_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /*
+ * For each algorithm index in alg_order[] patch one DSP word in
+ * that algorithm to a value from packed_payload.
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that contains a mix of packed and unpacked words.
+ * payloads are in random offset order. Offsets that are on a packed boundary
+ * are written as a packed block. Offsets that are not on a packed boundary
+ * are written as a single unpacked word.
+ */
+static void bin_patch_mixed_packed_unpacked_random(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 offset_words[] = {
+ 58, 68, 50, 10, 44, 17, 74, 36, 8, 7, 49, 11, 78, 57, 65, 2,
+ 48, 38, 22, 70, 77, 21, 61, 56, 75, 34, 27, 3, 31, 20, 43, 63,
+ 5, 30, 32, 25, 33, 79, 29, 0, 37, 60, 69, 52, 13, 12, 24, 26,
+ 4, 51, 76, 72, 16, 6, 39, 62, 15, 41, 28, 73, 53, 40, 45, 54,
+ 14, 55, 46, 66, 64, 59, 23, 9, 67, 47, 19, 71, 35, 18, 42, 1,
+ };
+ struct {
+ u32 packed[80][3];
+ u32 unpacked[80];
+ } *payload;
+ u32 readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ struct firmware *fw;
+ int i;
+
+ payload = kunit_kmalloc(test, sizeof(*payload), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, payload);
+
+ get_random_bytes(payload->packed, sizeof(payload->packed));
+ get_random_bytes(payload->unpacked, sizeof(payload->unpacked));
+
+ /* Create a patch entry for every offset in offset_words[] */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ /*
+ * If the offset is on a packed boundary use a packed payload else
+ * use an unpacked word
+ */
+ patch_pos_words = alg_base_words + offset_words[i];
+ if ((patch_pos_words % 4) == 0) {
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ param->mem_type,
+ payload_offset,
+ payload->packed[i],
+ sizeof(payload->packed[i]));
+ } else {
+ payload_offset = offset_words[i] * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ unpacked_mem_type,
+ payload_offset,
+ &payload->unpacked[i],
+ sizeof(payload->unpacked[i]));
+ }
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /*
+ * Readback the packed registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->packed[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is not on a packed boundary */
+ if ((patch_pos_words % 4) != 0)
+ continue;
+
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload->packed[i], sizeof(payload->packed[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->packed[i]));
+ }
+
+ /*
+ * Readback the unpacked registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->unpacked[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ unpacked_mem_type);
+
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is on a packed boundary */
+ if ((patch_pos_words % 4) == 0)
+ continue;
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ ((patch_pos_words) * 4);
+
+ readback[0] = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[0], sizeof(readback[0])),
+ 0);
+ KUNIT_EXPECT_EQ(test, readback[0], payload->unpacked[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->unpacked[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Bin file with name and multiple info blocks */
+static void bin_patch_name_and_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ char *infobuf;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ WMFW_ADSP2_YM);
+
+ /* Add a name block and info block */
+ cs_dsp_mock_bin_add_name(priv->local->bin_builder, "The name");
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, "Some info");
+
+ /* Add a big block of info */
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512; )
+ ;
+
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, infobuf);
+
+ /* Add a patch */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM,
+ 0,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg_addr += alg_base_words * reg_inc_per_word;
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+}
+
+static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_mock_xm_header *xm_hdr;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!priv->local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /* Create an XM header */
+ xm_hdr = cs_dsp_create_mock_xm_header(priv,
+ bin_test_mock_algs,
+ ARRAY_SIZE(bin_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_hdr);
+ ret = cs_dsp_mock_xm_header_write_to_regmap(xm_hdr);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv->local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
+
+ /* We must provide a dummy wmfw to load */
+ priv->local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, -1);
+ priv->local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+/* Parameterize on choice of XM or YM with a range of word offsets */
+static const struct bin_test_param x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 20 },
+
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 20 },
+};
+
+/* Parameterize on ZM with a range of word offsets */
+static const struct bin_test_param z_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 20 },
+};
+
+/* Parameterize on choice of packed XM or YM with a range of word offsets */
+static const struct bin_test_param packed_x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 12 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 12 },
+};
+
+static void x_or_y_or_z_and_offset_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s@%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_offset,
+ x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(z_and_offset,
+ z_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_offset,
+ packed_x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+/* Parameterize on choice of packed XM or YM */
+static const struct bin_test_param packed_x_or_y_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+};
+
+static void x_or_y_or_z_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(packed_x_or_y, packed_x_or_y_param_cases, x_or_y_or_z_param_desc);
+
+static const struct bin_test_param offset_param_cases[] = {
+ { .offset_words = 0 },
+ { .offset_words = 1 },
+ { .offset_words = 2 },
+ { .offset_words = 3 },
+ { .offset_words = 4 },
+ { .offset_words = 23 },
+ { .offset_words = 22 },
+ { .offset_words = 21 },
+ { .offset_words = 20 },
+};
+
+static void offset_param_desc(const struct bin_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "@%u", param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(offset, offset_param_cases, offset_param_desc);
+
+static const struct bin_test_param alg_param_cases[] = {
+ { .alg_idx = 0 },
+ { .alg_idx = 1 },
+ { .alg_idx = 2 },
+ { .alg_idx = 3 },
+};
+
+static void alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg[%u] (%#x)",
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(alg, alg_param_cases, alg_param_desc);
+
+static const struct bin_test_param x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 3 },
+
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 3 },
+};
+
+static void x_or_y_or_z_and_alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s alg[%u] (%#x)",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_alg, x_or_y_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param z_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(z_and_alg, z_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param packed_x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 3 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_alg, packed_x_or_y_and_alg_param_cases,
+ x_or_y_or_z_and_alg_param_desc);
+
+static struct kunit_case cs_dsp_bin_test_cases_halo[] = {
+ /* Unpacked memory */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* Packed memory tests */
+ KUNIT_CASE_PARAM(bin_patch_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_1_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_sparse_unordered,
+ packed_x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_mixed_packed_unpacked_random,
+ packed_x_or_y_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_bin_test_cases_adsp2[] = {
+ /* XM and YM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* ZM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, z_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, z_and_offset_gen_params),
+
+ /* Other */
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_test_halo = {
+ .name = "cs_dsp_bin_halo",
+ .init = cs_dsp_bin_test_halo_init,
+ .test_cases = cs_dsp_bin_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_adsp2_32bit",
+ .init = cs_dsp_bin_test_adsp2_32bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_adsp2_16bit",
+ .init = cs_dsp_bin_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_test_halo,
+ &cs_dsp_bin_test_adsp2_32bit,
+ &cs_dsp_bin_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
new file mode 100644
index 000000000000..5dcf62f19faf
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+ int wmfw_version;
+};
+
+struct cs_dsp_bin_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_bin_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/* Load a bin containing unknown blocks. They should be skipped. */
+static void bin_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the bin */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf500, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xc300, 0,
+ random_data, sizeof(random_data));
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a bin that doesn't have a valid magic marker. */
+static void bin_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+
+ memcpy((void *)bin->data, "WMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "xMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WxDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMxR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMDx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memset((void *)bin->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Load a bin that is too short for a valid header. */
+static void bin_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ do {
+ bin->size--;
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ } while (bin->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void bin_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ unsigned int real_len, len;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Wrong core type in header. */
+static void bin_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+
+ header->core_ver = cpu_to_le32(0);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(priv->dsp->type + 1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(0xff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void bin_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int header_length;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header_length = bin->size;
+ kunit_kfree(test, bin);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ NULL, 0);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_ASSERT_GT(test, bin->size, header_length);
+
+ for (bin->size--; bin->size > header_length; bin->size--) {
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* File too short to contain the block payload */
+static void bin_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ static const u8 payload[256] = { };
+ int i;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ for (i = 0; i < sizeof(payload); i++) {
+ bin->size--;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void bin_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ struct wmfw_coeff_item *block;
+ u32 payload = 0;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ &payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ block = (struct wmfw_coeff_item *)&bin->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the bin */
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(block->type), param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(block->len), sizeof(payload));
+
+ block->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+static void cs_dsp_bin_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_bin_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_bin_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_bin_err_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 1);
+}
+
+static struct kunit_case cs_dsp_bin_err_test_cases_halo[] = {
+
+ { } /* terminator */
+};
+
+static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+/* Some block types to test against, including illegal types */
+static const struct cs_dsp_bin_test_param bin_test_block_types_cases[] = {
+ { .block_type = WMFW_INFO_TEXT << 8 },
+ { .block_type = WMFW_METADATA << 8 },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_XM },
+ { .block_type = 0x33 },
+ { .block_type = 0xf500 },
+ { .block_type = 0xc000 },
+};
+
+KUNIT_ARRAY_PARAM(bin_test_block_types,
+ bin_test_block_types_cases,
+ cs_dsp_bin_err_block_types_desc);
+
+static struct kunit_case cs_dsp_bin_err_test_cases_adsp2[] = {
+ KUNIT_CASE(bin_load_with_unknown_blocks),
+ KUNIT_CASE(bin_err_wrong_magic),
+ KUNIT_CASE(bin_err_too_short_for_header),
+ KUNIT_CASE(bin_err_bad_header_length),
+ KUNIT_CASE(bin_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(bin_too_short_for_block_header, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_too_short_for_block_payload, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_block_payload_len_garbage, bin_test_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_halo = {
+ .name = "cs_dsp_bin_err_halo",
+ .init = cs_dsp_bin_err_test_halo_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_err_adsp2_32bit",
+ .init = cs_dsp_bin_err_test_adsp2_32bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_err_adsp2_16bit",
+ .init = cs_dsp_bin_err_test_adsp2_16bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_err_test_halo,
+ &cs_dsp_bin_err_test_adsp2_32bit,
+ &cs_dsp_bin_err_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
new file mode 100644
index 000000000000..8a9b66a3b7d3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define ADSP2_LOCK_REGION_CTRL 0x7A
+#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+
+ int num_control_add;
+ int num_control_remove;
+ int num_pre_run;
+ int num_post_run;
+ int num_pre_stop;
+ int num_post_stop;
+ int num_watchdog_expired;
+
+ struct cs_dsp_coeff_ctl *passed_ctl[16];
+ struct cs_dsp *passed_dsp;
+};
+
+struct cs_dsp_callbacks_test_param {
+ const struct cs_dsp_client_ops *ops;
+ const char *case_name;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_callbacks_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+static int cs_dsp_test_control_add_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_add] = ctl;
+ local->num_control_add++;
+
+ return 0;
+}
+
+static void cs_dsp_test_control_remove_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_remove] = ctl;
+ local->num_control_remove++;
+}
+
+static int cs_dsp_test_pre_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_run++;
+
+ return 0;
+}
+
+static int cs_dsp_test_post_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_run++;
+
+ return 0;
+}
+
+static void cs_dsp_test_pre_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_stop++;
+}
+
+static void cs_dsp_test_post_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_stop++;
+}
+
+static void cs_dsp_test_watchdog_expired_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_watchdog_expired++;
+}
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_client_ops = {
+ .control_add = cs_dsp_test_control_add_callback,
+ .control_remove = cs_dsp_test_control_remove_callback,
+ .pre_run = cs_dsp_test_pre_run_callback,
+ .post_run = cs_dsp_test_post_run_callback,
+ .pre_stop = cs_dsp_test_pre_stop_callback,
+ .post_stop = cs_dsp_test_post_stop_callback,
+ .watchdog_expired = cs_dsp_test_watchdog_expired_callback,
+};
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_empty_client_ops = {
+ /* No entries */
+};
+
+static void cs_dsp_test_run_stop_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 2);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+}
+
+static void cs_dsp_test_ctl_v1_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Add a control for each memory */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ def.shortname = "zm";
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "ym";
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "xm";
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 3);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_ctl_v2_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char name[2] = { };
+ int i;
+
+ /* Add some controls */
+ def.shortname = name;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ for (i = 0; i < ARRAY_SIZE(local->passed_ctl); ++i) {
+ name[0] = 'A' + i;
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, ARRAY_SIZE(local->passed_ctl));
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+
+ /* Add a controls */
+ def.shortname = "A";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Run a sequence of ops that would invoke callbacks */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ cs_dsp_stop(priv->dsp);
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ /* Something went very wrong if any of our callbacks were called */
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 0);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static void cs_dsp_test_halo_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_halo_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static int cs_dsp_callbacks_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ const struct cs_dsp_callbacks_test_param *param = test->param_value;
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ struct cs_dsp_mock_xm_header *xm_header;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_callbacks_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_callbacks_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ xm_header->blob_data,
+ xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = param->ops;
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_callbacks_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_callbacks_test_adsp2_32bit_init(struct kunit *test, int rev)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = rev;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v2_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v1_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_callbacks_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 1);
+}
+
+static void cs_dsp_callbacks_param_desc(const struct cs_dsp_callbacks_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", param->case_name);
+}
+
+/* Parameterize on different client callback ops tables */
+static const struct cs_dsp_callbacks_test_param cs_dsp_callbacks_ops_cases[] = {
+ { .ops = &cs_dsp_callback_test_client_ops, .case_name = "all ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops,
+ cs_dsp_callbacks_ops_cases,
+ cs_dsp_callbacks_param_desc);
+
+static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = {
+ { .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks,
+ cs_dsp_no_callbacks_cases,
+ cs_dsp_callbacks_param_desc);
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv1_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v1_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_adsp2v2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_halo = {
+ .name = "cs_dsp_callbacks_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_callbacks_halo_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v2_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v2_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v1_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v1_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v1_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2_16bit = {
+ .name = "cs_dsp_callbacks_adsp2_16bit_wmfwv1",
+ .init = cs_dsp_callbacks_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv1_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_adsp2v2_32bit = {
+ .name = "cs_dsp_watchdog_adsp2v2_32bit",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_watchdog_adsp2v2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_halo_32bit = {
+ .name = "cs_dsp_watchdog_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_watchdog_halo_test_cases,
+};
+
+kunit_test_suites(&cs_dsp_callbacks_test_halo,
+ &cs_dsp_callbacks_test_adsp2v2_32bit,
+ &cs_dsp_callbacks_test_adsp2v1_32bit,
+ &cs_dsp_callbacks_test_adsp2_16bit,
+ &cs_dsp_watchdog_test_adsp2v2_32bit,
+ &cs_dsp_watchdog_test_halo_32bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
new file mode 100644
index 000000000000..83386cc978e3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
@@ -0,0 +1,3282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_cache_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_cache_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static const char * const cs_dsp_ctl_cache_test_fw_names[] = {
+ "misc", "mbc/vss", "haps",
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_cache_test_algs); ++i) {
+ if (cs_dsp_ctl_cache_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Memory allocated for control cache must be large enough.
+ * This creates multiple controls of different sizes so only works on
+ * wmfw V2 and later.
+ */
+static void cs_dsp_ctl_v2_cache_alloc(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_size_bytes;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char ctl_name[4];
+ u32 *reg_vals;
+ int num_ctls;
+
+ /* Create some DSP data to initialize the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ alg_size_bytes = cs_dsp_ctl_cache_test_algs[0].ym_size_words *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_vals = kunit_kzalloc(test, alg_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, alg_size_bytes);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls of different sizes */
+ def.mem_type = WMFW_ADSP2_YM;
+ def.shortname = ctl_name;
+ num_ctls = 0;
+ for (def.length_bytes = 4; def.length_bytes <= 64; def.length_bytes += 4) {
+ snprintf(ctl_name, ARRAY_SIZE(ctl_name), "%x", def.length_bytes);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ num_ctls++;
+ def.offset_dsp_words += def.length_bytes / sizeof(u32);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&dsp->ctl_list), num_ctls);
+
+ /* Check that the block allocated for the cache is large enough */
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ KUNIT_EXPECT_GE(test, ksize(ctl->cache), ctl->len);
+}
+
+/*
+ * Content of registers backing a control should be read into the
+ * control cache when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * For a non-volatile write-only control the cache should be zero-filled
+ * when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init_write_only(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *readback, *zeros;
+
+ zeros = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, zeros);
+
+ readback = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create a non-volatile write-only control */
+ def.flags = param->flags & ~WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The control cache should have been zero-filled so should be
+ * readable through the control.
+ */
+ get_random_bytes(readback, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, zeros, param->len_bytes);
+}
+
+/*
+ * Multiple different firmware with identical controls.
+ * This is legal because different firmwares could contain the same
+ * algorithm.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fw_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[0]) == 0)
+ ctl[0] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[1]) == 0)
+ ctl[1] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[2]) == 0)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Multiple different firmware with controls identical except for alg id.
+ * This is legal because the controls are qualified by algorithm id.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (cs_dsp_ctl_cache_test_algs[0].id == walkctl->alg_region.alg)
+ ctl[0] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[1].id == walkctl->alg_region.alg)
+ ctl[1] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[2].id == walkctl->alg_region.alg)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls at the same position in different memories.
+ * The control cache should be initialized with content from the
+ * correct memory region.
+ */
+static void cs_dsp_ctl_cache_init_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for memory region */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_XM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_ZM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 2 or 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list),
+ cs_dsp_mock_has_zm(priv) ? 3 : 2);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.type == WMFW_ADSP2_YM)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_XM)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_ZM)
+ ctl[2] = walkctl;
+ }
+
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+ }
+}
+
+/*
+ * Firmware with controls at the same position in different algorithms
+ * The control cache should be initialized with content from the
+ * memory of the algorithm it points to.
+ */
+static void cs_dsp_ctl_cache_init_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create controls identical except for algorithm */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ /* Create random content in the registers backing each control */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[0].id)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[1].id)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[2].id)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls in the same algorithm and memory but at
+ * different offsets.
+ * The control cache should be initialized with content from the
+ * correct offset.
+ * Only for wmfw format V2 and later. V1 only supports one control per
+ * memory per algorithm.
+ */
+static void cs_dsp_ctl_cache_init_multiple_offsets(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_base_reg;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for offset */
+ def.length_bytes = 8;
+ def.offset_dsp_words = 0;
+ def.shortname = "CtlA";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 5;
+ def.shortname = "CtlB";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 8;
+ def.shortname = "CtlC";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ alg_base_reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ alg_base_reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ reg = alg_base_reg;
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+ reg = alg_base_reg + (5 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+ reg = alg_base_reg + (8 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->offset == 0)
+ ctl[0] = walkctl;
+ if (walkctl->offset == 5)
+ ctl[1] = walkctl;
+ if (walkctl->offset == 8)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Read from a cached control before the firmware is started.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been stopped.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the DSP has been powered-up and
+ * then powered-down without running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been run and
+ * stopped, then the DSP has been powered-down.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with non-zero flags while the firmware is
+ * running.
+ * Should return the data in the cache, not from the registers.
+ */
+static void cs_dsp_ctl_cache_read_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create data in the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(init_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Change the values in the registers backing the control then drop
+ * them from the regmap cache. This allows checking that the control
+ * read is returning values from the control cache and not accessing
+ * the registers.
+ */
+ KUNIT_ASSERT_EQ(test,
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes),
+ 0);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Control should readback the origin data from its cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with flags == 0 while the firmware is
+ * running.
+ * Should behave as volatile and read from the registers.
+ * (This is for backwards compatibility with old firmware versions)
+ */
+static void cs_dsp_ctl_cache_read_running_zero_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = 0;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Change the values in the registers backing the control */
+ get_random_bytes(new_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Change the values in the registers backing the control */
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * This should be a writethrough operation, writing to the cache and
+ * the registers.
+ */
+static void cs_dsp_ctl_cache_writethrough(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control, it should be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is running.
+ * The control write should return 0 to indicate that the content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_writethrough_unchanged(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is not started.
+ * The control write should return 0 to indicate that the cache content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_write_unchanged_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is loaded but not
+ * started.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started and stopped.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started, stopped, and then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently running firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control before running the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_before_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * The value written should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_while_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *ctl_vals, *readback;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ ctl_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP and start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Stop firmware and zero the registers backing the control */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after stopping the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_after_stop(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control the next time the firmware containing the
+ * control is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_not_current_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Power-down DSP then power-up with the original firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be synced out to the registers
+ * backing the control every time the firmware containing the control
+ * is run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_every_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and reset the registers */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Start the firmware again and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained if the same
+ * firmware is downloaded again. It should be synced out to the
+ * registers backing the control after the firmware containing the
+ * control is downloaded again and run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_reload(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the firmware again, the cache content should not change */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained after a different
+ * firmware is downloaded.
+ * When the firmware containing the control is downloaded and run
+ * the value in the control cache should be synced out to the registers
+ * backing the control.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_swap(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download and run a different firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the original firmware again */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_EXPECT_TRUE(test, ctl->set);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+static int cs_dsp_ctl_cache_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_cache_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_cache_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_cache_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_cache_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control, except flags==0
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_nonzero_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_nonzero_flags,
+ all_pop_nonvol_readable_nonzero_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile write-only control of varying lengths
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_write_only_length_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_write_only_length,
+ all_pop_nonvol_write_only_length_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v1[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v2[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_halo = {
+ .name = "cs_dsp_ctl_cache_wmfwV3_halo",
+ .init = cs_dsp_ctl_cache_test_halo_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_ctl_cache_test_halo,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
new file mode 100644
index 000000000000..cb90964740ea
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
@@ -0,0 +1,1851 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_parse_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offset;
+ unsigned int length;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_parse_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_size_words = 8,
+ .ym_size_words = 8,
+ .zm_size_words = 8,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Algorithm info block without controls should load */
+static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored.
+ */
+static void cs_dsp_ctl_parse_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Dummy";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a zero-length name string.
+ */
+static void cs_dsp_ctl_parse_empty_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "\0";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a maximum length name string.
+ */
+static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *name;
+
+ name = kunit_kzalloc(test, 256, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+ def.fullname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Short name from coeff descriptor should be used as control name. */
+static void cs_dsp_ctl_parse_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a short name that is a single character.
+ */
+static void cs_dsp_ctl_parse_min_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.shortname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 1);
+ KUNIT_EXPECT_EQ(test, ctl->subname[0], 'Q');
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a maximum length name.
+ */
+static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char *name;
+ struct firmware *wmfw;
+
+ name = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+
+ def.shortname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 255);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, name, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character full name.
+ */
+static void cs_dsp_ctl_parse_with_min_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length full name.
+ */
+static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character description
+ */
+static void cs_dsp_ctl_parse_with_min_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.description = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length description
+ */
+static void cs_dsp_ctl_parse_with_max_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *description;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name and description from coeff descriptor are variable length
+ * fields so affects the position of subsequent fields.
+ * Test with a maximum length full name and description
+ */
+static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname, *description;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+static const char * const cs_dsp_ctl_alignment_test_names[] = {
+ "1", "12", "123", "1234", "12345", "123456", "1234567",
+ "12345678", "123456789", "123456789A", "123456789AB",
+ "123456789ABC", "123456789ABCD", "123456789ABCDE",
+ "123456789ABCDEF",
+};
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of short name.
+ */
+static void cs_dsp_ctl_shortname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ def.shortname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_alignment_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, i + 1);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_alignment_test_names[i],
+ ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of full name.
+ */
+static void cs_dsp_ctl_fullname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.fullname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of description.
+ */
+static void cs_dsp_ctl_description_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.description = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+static const char * const cs_dsp_get_ctl_test_names[] = {
+ "Up", "Down", "Switch", "Mute",
+ "Left Up", "Left Down", "Right Up", "Right Down",
+ "Left Mute", "Right Mute",
+ "_trunc_1", "_trunc_2", " trunc",
+};
+
+/* Test using cs_dsp_get_ctl() to lookup various controls. */
+static void cs_dsp_get_ctl_test(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_get_ctl_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(cs_dsp_get_ctl_test_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_get_ctl_test_names[i],
+ ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->offset, i);
+ }
+}
+
+/*
+ * cs_dsp_get_ctl() searches for the control in the currently loaded
+ * firmware, so create identical controls in multiple firmware and
+ * test that the correct one is found.
+ */
+static void cs_dsp_get_ctl_test_multiple_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ def.shortname = "_A_CONTROL";
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control of the same name */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* A lookup should return the control for the current firmware */
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 2);
+
+ /* Re-load the 'misc' firmware and a lookup should return its control */
+ cs_dsp_power_down(priv->dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 1);
+}
+
+/* Test that the value of the memory type field is parsed correctly. */
+static void cs_dsp_ctl_parse_memory_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the algorithm id from the parent alg-info block is
+ * correctly stored in the cs_dsp_coeff_ctl.
+ */
+static void cs_dsp_ctl_parse_alg_id(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, def.mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the values of (alg id, memory type) tuple is parsed correctly.
+ * The alg id is parsed from the alg-info block, but the memory type is
+ * parsed from the coefficient info descriptor.
+ */
+static void cs_dsp_ctl_parse_alg_mem(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+}
+
+/* Test that the value of the offset field is parsed correctly. */
+static void cs_dsp_ctl_parse_offset(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.offset_dsp_words = param->offset;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, param->offset);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the length field is parsed correctly. */
+static void cs_dsp_ctl_parse_length(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.length_bytes = param->length;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, def.offset_dsp_words);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, param->length);
+}
+
+/* Test that the value of the control type field is parsed correctly. */
+static void cs_dsp_ctl_parse_ctl_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, param->ctl_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the flags field is parsed correctly. */
+static void cs_dsp_ctl_parse_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, param->flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that invalid combinations of (control type, flags) are rejected. */
+static void cs_dsp_ctl_illegal_type_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_LT(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/* Test that the correct firmware name is entered in the cs_dsp_coeff_ctl. */
+static void cs_dsp_ctl_parse_fw_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* Both controls should be in the list (order not guaranteed) */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = NULL;
+ ctl2 = NULL;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, "misc") == 0)
+ ctl1 = walkctl;
+ else if (strcmp(walkctl->fw_name, "mbc/vss") == 0)
+ ctl2 = walkctl;
+ }
+
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, 1);
+ KUNIT_EXPECT_EQ(test, ctl2->offset, 2);
+}
+
+/* Controls are unique if the algorithm ID is different */
+static void cs_dsp_ctl_alg_id_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ /* Create an algorithm containing the control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create a different algorithm containing an identical control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[1].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if the memory region is different */
+static void cs_dsp_ctl_mem_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ /* Create control in XM */
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ /* Create control in YM */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if they are in different firmware */
+static void cs_dsp_ctl_fw_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with the same control */
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2",
+ NULL, NULL, "mbc/vss"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STRNEQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This creates multiple algorithms with one control each, which will
+ * work on both V1 format and >=V2 format controls.
+ */
+static void cs_dsp_ctl_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Create some algorithms with a control */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_parse_test_algs); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[i].id,
+ "dummyalg", NULL);
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This tests >=V2 firmware that can have multiple named controls in
+ * the same algorithm.
+ */
+static void cs_dsp_ctl_v2_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_get_ctl_test_names)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create some controls */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ if (i & BIT(0))
+ def.mem_type = WMFW_ADSP2_XM;
+ else
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+static const char * const cs_dsp_ctl_v2_compare_len_names[] = {
+ "LEFT",
+ "LEFT_",
+ "LEFT_SPK",
+ "LEFT_SPK_V",
+ "LEFT_SPK_VOL",
+ "LEFT_SPK_MUTE",
+ "LEFT_SPK_1",
+ "LEFT_X",
+ "LEFT2",
+};
+
+/*
+ * When comparing shortnames the full length of both strings is
+ * considered, not only the characters in of the shortest string.
+ * So that "LEFT" is not the same as "LEFT2".
+ * This is specifically to test for the bug that was fixed by commit:
+ * 7ac1102b227b ("firmware: cs_dsp: Fix new control name check")
+ */
+static void cs_dsp_ctl_v2_compare_len(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ def.shortname = cs_dsp_ctl_v2_compare_len_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_v2_compare_len_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len,
+ strlen(cs_dsp_ctl_v2_compare_len_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_v2_compare_len_names[i],
+ ctl->subname_len);
+ }
+}
+
+static int cs_dsp_ctl_parse_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_parse_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header blob to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_parse_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 2);
+}
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_mem_type_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM },
+ { .mem_type = WMFW_ADSP2_YM },
+ { .mem_type = WMFW_ADSP2_ZM },
+};
+
+static void cs_dsp_ctl_mem_type_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s",
+ cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_mem_type,
+ cs_dsp_ctl_mem_type_param_cases,
+ cs_dsp_ctl_mem_type_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_alg_id_param_cases[] = {
+ { .alg_id = 0xb },
+ { .alg_id = 0xfafa },
+ { .alg_id = 0x9f1234 },
+ { .alg_id = 0xff00ff },
+};
+
+static void cs_dsp_ctl_alg_id_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg_id:%#x", param->alg_id);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_alg_id,
+ cs_dsp_ctl_alg_id_param_cases,
+ cs_dsp_ctl_alg_id_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_offset_param_cases[] = {
+ { .offset = 0x0 },
+ { .offset = 0x1 },
+ { .offset = 0x2 },
+ { .offset = 0x3 },
+ { .offset = 0x4 },
+ { .offset = 0x5 },
+ { .offset = 0x6 },
+ { .offset = 0x7 },
+ { .offset = 0xe0 },
+ { .offset = 0xf1 },
+ { .offset = 0xfffe },
+ { .offset = 0xffff },
+};
+
+static void cs_dsp_ctl_offset_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "offset:%#x", param->offset);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_offset,
+ cs_dsp_ctl_offset_param_cases,
+ cs_dsp_ctl_offset_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_length_param_cases[] = {
+ { .length = 0x4 },
+ { .length = 0x8 },
+ { .length = 0x18 },
+ { .length = 0xf000 },
+};
+
+static void cs_dsp_ctl_length_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "length:%#x", param->length);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_length,
+ cs_dsp_ctl_length_param_cases,
+ cs_dsp_ctl_length_desc);
+
+/* Note: some control types mandate specific flags settings */
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_type_param_cases[] = {
+ { .ctl_type = WMFW_CTL_TYPE_BYTES,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+};
+
+static void cs_dsp_ctl_type_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "ctl_type:%#x flags:%#x",
+ param->ctl_type, param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_type,
+ cs_dsp_ctl_type_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_flags_param_cases[] = {
+ { .flags = 0 },
+ { .flags = WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+static void cs_dsp_ctl_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "flags:%#x", param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_flags,
+ cs_dsp_ctl_flags_param_cases,
+ cs_dsp_ctl_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_illegal_type_flags_param_cases[] = {
+ /* ACKED control must be volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* HOSTEVENT must be system + volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* FWEVENT rules same as HOSTEVENT */
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /*
+ * HOSTBUFFER must be system + volatile + readable or
+ * system + volatile + readable + writeable
+ */
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE},
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v1[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_empty_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_v1_name),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v2_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_min_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname_and_description),
+ KUNIT_CASE(cs_dsp_ctl_shortname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_fullname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_description_alignment),
+ KUNIT_CASE(cs_dsp_get_ctl_test),
+ KUNIT_CASE(cs_dsp_get_ctl_test_multiple_wmfw),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_compare_len),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_halo = {
+ .name = "cs_dsp_ctl_parse_wmfwV3_halo",
+ .init = cs_dsp_ctl_parse_test_halo_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+kunit_test_suites(&cs_dsp_ctl_parse_test_halo,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
new file mode 100644
index 000000000000..bda00a95d4f9
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
@@ -0,0 +1,2669 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_rw_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_rw_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_rw_test_algs); ++i) {
+ if (cs_dsp_ctl_rw_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Write to a control while the firmware is running.
+ * This should write to the underlying registers.
+ */
+static void cs_dsp_ctl_write_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Write new data to the control, it should be written to the registers
+ * and cs_dsp_coeff_lock_and_write_ctrl() should return 1 to indicate
+ * that the control content changed.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a volatile control while the firmware is running.
+ * This should return the current state of the underlying registers.
+ */
+static void cs_dsp_ctl_read_volatile_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return the current register content */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /*
+ * Change the register content and read the control, it should return
+ * the new register content
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes), 0);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Write to a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from an offset into the control data. Should return only the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_read_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read from an offset into the control cache. Should return only the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_read_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a control. Should return
+ * only the requested number of bytes.
+ */
+static void cs_dsp_ctl_read_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a cached control.
+ * Should return only the requested number of bytes.
+ * Same as cs_dsp_ctl_read_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control data. Should only change the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_write_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control cache. Should only change the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_write_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a control. Should only
+ * change the requested number of bytes.
+ */
+static void cs_dsp_ctl_write_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a cached control.
+ * Should only change the requested number of bytes.
+ * Same as cs_dsp_ctl_write_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read from an offset that is beyond the end of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Read more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+}
+
+/*
+ * Read with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Read full control length but at a start offset of 1 so that
+ * offset + length exceeds the length of the control.
+ */
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Write to an offset that is beyond the end of the control data.
+ * Should return an error without touching any registers.
+ */
+static void cs_dsp_ctl_write_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /*
+ * Write full control length but at a start offset of 1 so that
+ * offset + length exceeeds the length of the control.
+ */
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a write-only control. This is legal because controls can
+ * always be read. Write-only only indicates that it is not useful to
+ * populate the cache from the DSP memory.
+ */
+static void cs_dsp_ctl_read_from_writeonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *ctl_vals, *readback;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ ctl_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write some test data to the control */
+ get_random_bytes(ctl_vals, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, def.length_bytes),
+ 1);
+
+ /* Read back the data */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+ }
+}
+
+/*
+ * Write to a read-only control.
+ * This should return an error without writing registers.
+ */
+static void cs_dsp_ctl_write_to_readonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+static int cs_dsp_ctl_rw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_rw_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_rw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_rw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_rw_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readable_flags,
+ all_pop_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * read-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readonly_flags,
+ all_pop_readonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeable_flags,
+ all_pop_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * write-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeonly_flags,
+ all_pop_writeonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_readable_flags,
+ all_pop_volatile_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_writeable_flags,
+ all_pop_volatile_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_adsp[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_halo[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_halo = {
+ .name = "cs_dsp_ctl_rw_wmfwV3_halo",
+ .init = cs_dsp_ctl_rw_test_halo_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+kunit_test_suites(&cs_dsp_ctl_rw_test_halo,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
new file mode 100644
index 000000000000..9e997c4ee2d6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
@@ -0,0 +1,2211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create dummy wmfw file.
+ * 3) Call cs_dsp_power_up() with the bin file.
+ * 4) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 5) All the registers that are expected to have been written are dropped from
+ * the cache. This should leave the cache clean.
+ * 6) If the cache is still dirty there have been unexpected writes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_vfree_wrapper, vfree, void *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ unsigned int num_blocks;
+ int mem_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/*
+ * wmfw that writes the XM header.
+ * cs_dsp always reads this back from unpacked XM.
+ */
+static void wmfw_write_xm_header_unpacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ unsigned int reg_addr;
+ u8 *readback;
+
+ /* XM header payload was added to wmfw by test case init function */
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Read raw so endianness and register width don't matter */
+ readback = kunit_kzalloc(test, local->xm_header->blob_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ local->xm_header->blob_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write one payload of length param->num_blocks */
+static void wmfw_write_one_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type, mem_offset_dsp_words,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write several smallest possible payloads for the given memory type */
+static void wmfw_write_multiple_oneblock_payloads(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = 0; i < num_payloads; ++i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write several smallest possible payloads of the given memory type
+ * in reverse address order
+ */
+static void wmfw_write_multiple_oneblock_payloads_reverse(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = num_payloads - 1; i >= 0; --i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write multiple payloads of length param->num_blocks.
+ * The payloads are not in address order and collectively do not patch
+ * a contiguous block of memory.
+ */
+static void wmfw_write_multiple_payloads_sparse_unordered(struct kunit *test)
+{
+ static const unsigned int random_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const int num_payloads = ARRAY_SIZE(random_offsets);
+ int i;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ payload_size_dsp_words = param->num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each at "random" locations */
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset = random_offsets[i] * payload_size_dsp_words;
+
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + offset,
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset_num_regs = (random_offsets[i] * payload_size_bytes) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[i * payload_size_bytes],
+ payload_size_bytes),
+ 0);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single unpacked payload */
+static void wmfw_write_all_unpacked_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_ADSP2_PM);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_PM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_PM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single packed payload */
+static void wmfw_write_all_packed_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_HALO_PM_PACKED);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_HALO_PM_PACKED, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_PM_PACKED);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_ADSP2_PM, 11, 60 },
+ { WMFW_ADSP2_ZM, 69, 8 },
+ { WMFW_ADSP2_YM, 32, 74 },
+ { WMFW_ADSP2_XM, 70, 38 },
+ { WMFW_ADSP2_PM, 84, 48 },
+ { WMFW_ADSP2_XM, 46, 18 },
+ { WMFW_ADSP2_PM, 0, 8 },
+ { WMFW_ADSP2_YM, 0, 30 },
+ { WMFW_ADSP2_PM, 160, 50 },
+ { WMFW_ADSP2_ZM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks, payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various packed and unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_packed_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_HALO_PM_PACKED, 11, 60 },
+ { WMFW_ADSP2_YM, 69, 8 },
+ { WMFW_HALO_YM_PACKED, 32, 74 },
+ { WMFW_HALO_XM_PACKED, 70, 38 },
+ { WMFW_HALO_PM_PACKED, 84, 48 },
+ { WMFW_HALO_XM_PACKED, 46, 18 },
+ { WMFW_HALO_PM_PACKED, 0, 8 },
+ { WMFW_HALO_YM_PACKED, 0, 30 },
+ { WMFW_HALO_PM_PACKED, 160, 50 },
+ { WMFW_ADSP2_XM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * using one packed payload followed by one unpacked word.
+ */
+static void wmfw_write_packed_1_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of one unpacked word to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache. The unpacked payload is offset within
+ * unpacked register space by the number of DSP words that were
+ * written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by one payload of two unpacked words.
+ */
+static void wmfw_write_packed_2_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of two unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by one payload of three unpacked words.
+ */
+static void wmfw_write_packed_3_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of three unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by two payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_2_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add two unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by three payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_3_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add three unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 2,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one unpacked word
+ * followed by a packed payload.
+ */
+static void wmfw_write_packed_1_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for an unaligned word before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 1;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /* Add a single unpacked word right before the first word of packed data */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked word. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 1) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of two unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_2_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of three unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_3_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for three unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use two payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_2_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as two payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use three payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_3_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as three payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Load a wmfw containing multiple info blocks */
+static void wmfw_load_with_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ char *infobuf;
+ const unsigned int payload_size_bytes = 48;
+ int ret;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add a couple of info blocks at the start of the wmfw */
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is a timestamp");
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is some more info");
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ /* Add a bigger info block then another small one*/
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512;)
+ ;
+
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, infobuf);
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "Another block of info");
+
+ /* Add another payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 64,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ ret = cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc");
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "cs_dsp_power_up failed: %d\n", ret);
+
+ /* Check first payload was written */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Check second payload was written */
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * 64;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+static int cs_dsp_wmfw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_mem_param_desc(const struct cs_dsp_wmfw_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s num_blocks:%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->num_blocks);
+}
+
+static const struct cs_dsp_wmfw_test_param adsp2_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(adsp2_all_num_blocks,
+ adsp2_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param halo_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(halo_all_num_blocks,
+ halo_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param packed_xy_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(packed_xy_num_blocks,
+ packed_xy_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static struct kunit_case cs_dsp_wmfw_test_cases_halo[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ halo_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_packed_pm),
+ KUNIT_CASE(wmfw_write_multiple_packed_unpacked_mem),
+
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_test_cases_adsp2[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ adsp2_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_unpacked_pm),
+ KUNIT_CASE(wmfw_write_multiple_unpacked_mem),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_halo = {
+ .name = "cs_dsp_wmfwV3_halo",
+ .init = cs_dsp_wmfw_test_halo_init,
+ .test_cases = cs_dsp_wmfw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_test_halo,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
new file mode 100644
index 000000000000..c309843261d7
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Load a wmfw containing unknown blocks. They should be skipped. */
+static void wmfw_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the wmfw */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xc0, 0, random_data,
+ sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0x33, 0, NULL, 0);
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a wmfw that doesn't have a valid magic marker. */
+static void wmfw_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ memcpy((void *)wmfw->data, "WMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "xMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WxFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMxW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMFx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memset((void *)wmfw->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* Load a wmfw that is too short for a valid header. */
+static void wmfw_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ do {
+ wmfw->size--;
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ } while (wmfw->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void wmfw_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ unsigned int real_len, len;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* Wrong core type in header. */
+static void wmfw_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+
+ header->core = 0;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = priv->dsp->type + 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 0xff;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void wmfw_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+ u32 dummy_payload = 0;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ /* Add the block. A block must have at least 4 bytes of payload */
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &dummy_payload, sizeof(dummy_payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* File too short to contain the block payload */
+static void wmfw_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ static const u8 payload[256] = { };
+ int i;
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (i = 0; i < sizeof(payload); i++) {
+ wmfw->size--;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void wmfw_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ u32 payload = 0;
+
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the wmfw */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->offset) >> 24, param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->len), sizeof(payload));
+
+ region->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* File too short to contain an algorithm header */
+static void wmfw_too_short_for_alg_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ NULL, NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* V1 algorithm name does not have NUL terminator */
+static void wmfw_v1_alg_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Write a string to the alg name that overflows the array */
+ memset(alg_data->descr, 0, sizeof(alg_data->descr));
+ memset(alg_data->name, 'A', sizeof(alg_data->name));
+ memset(alg_data->descr, 'A', sizeof(alg_data->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow alg_data->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(alg_data->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(alg_data->name, sizeof(alg_data->name)),
+ sizeof(alg_data->name));
+
+ /*
+ * The alg name isn't stored, but cs_dsp parses the name field.
+ * It should load the file successfully and create the control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of alg name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ algorithm name exceeds length of containing block */
+static void wmfw_v2_alg_name_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[1]), 3 | ('a' << 8) | ('b' << 16) | ('c' << 24));
+ KUNIT_ASSERT_EQ(test, *(u8 *)&alg_data[1], 3);
+
+ /* Set name string length longer than available space */
+ *(u8 *)&alg_data[1] = 4;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 7;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0x80;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0xff;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ algorithm description exceeds length of containing block */
+static void wmfw_v2_alg_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc[desc_len]de
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[2]), 2 | ('d' << 16) | ('e' << 24));
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(*(__le16 *)&alg_data[2]), 2);
+
+ /* Set name string length longer than available space */
+ *(__le16 *)&alg_data[2] = cpu_to_le16(4);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(7);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x80);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coefficient count exceeds length of containing block */
+static void wmfw_v1_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Add one to the coefficient count */
+ alg_data->ncoeff = cpu_to_le32(le32_to_cpu(alg_data->ncoeff) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ alg_data->ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient count exceeds length of containing block */
+static void wmfw_v2_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *ncoeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ ncoeff = (__force __le32 *)&alg_data[3];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(*ncoeff), 1);
+
+ /* Add one to the coefficient count */
+ *ncoeff = cpu_to_le32(2);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ *ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient block size exceeds length of containing block */
+static void wmfw_v2_coeff_block_size_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the block size */
+ coeff[1] = cpu_to_le32(le32_to_cpu(coeff[1]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the block size garbage */
+ coeff[1] = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coeff name does not have NUL terminator */
+static void wmfw_v1_coeff_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct wmfw_adsp_coeff_data *coeff;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->ncoeff), 1);
+
+ coeff = (void *)alg_data->data;
+
+ /* Write a string to the coeff name that overflows the array */
+ memset(coeff->descr, 0, sizeof(coeff->descr));
+ memset(coeff->name, 'A', sizeof(coeff->name));
+ memset(coeff->descr, 'A', sizeof(coeff->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow coeff->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(coeff->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(coeff->name, sizeof(coeff->name)),
+ sizeof(coeff->name));
+
+ /*
+ * V1 controls do not have names, but cs_dsp parses the name
+ * field. It should load the file successfully and create the
+ * control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of coeff name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ coefficient shortname exceeds length of coeff block */
+static void wmfw_v2_coeff_shortname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the shortname length */
+ coeff[2] = cpu_to_le32(le32_to_cpu(coeff[2]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum shortname length */
+ coeff[2] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient fullname exceeds length of coeff block */
+static void wmfw_v2_coeff_fullname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname;
+ size_t shortlen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Fullname follows the shortname rounded up to a __le32 boundary */
+ shortlen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (shortlen / sizeof(*coeff));
+
+ /* Fullname increases in blocks of __le32 so increase past the current __le32 */
+ fullname[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum fullname length */
+ fullname[0] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient description exceeds length of coeff block */
+static void wmfw_v2_coeff_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname, *description;
+ size_t namelen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Description follows the shortname and fullname rounded up to __le32 boundaries */
+ namelen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (namelen / sizeof(*coeff));
+ namelen = round_up(le32_to_cpu(fullname[0]) & 0xff, sizeof(__le32));
+ description = fullname + (namelen / sizeof(*fullname));
+
+ /* Description increases in blocks of __le32 so increase past the current __le32 */
+ description[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum description length */
+ fullname[0] = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+static void cs_dsp_wmfw_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_wmfw_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_wmfw_err_block_types_desc(const struct cs_dsp_wmfw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_adsp2_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_adsp2,
+ wmfw_valid_block_types_adsp2_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_halo_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_HALO_PM_PACKED },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_halo,
+ wmfw_valid_block_types_halo_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_invalid_block_types_cases[] = {
+ { .block_type = 0x33 },
+ { .block_type = 0xf5 },
+ { .block_type = 0xc0 },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_invalid_block_types,
+ wmfw_invalid_block_types_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v0[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v1[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v1_alg_name_unterminated),
+ KUNIT_CASE(wmfw_v1_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v1_coeff_name_unterminated),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v2[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v3[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_halo = {
+ .name = "cs_dsp_wmfwV3_err_halo",
+ .init = cs_dsp_wmfw_err_test_halo_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_err_test_halo,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_tests.c b/drivers/firmware/cirrus/test/cs_dsp_tests.c
new file mode 100644
index 000000000000..7b829a03ca52
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_tests.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("KUnit tests for Cirrus Logic DSP driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
+MODULE_IMPORT_NS("FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 60c64b81d2c3..8296bf985d1d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -148,9 +148,6 @@ static ssize_t systab_show(struct kobject *kobj,
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
- if (IS_ENABLED(CONFIG_X86))
- str = efi_systab_show_arch(str);
-
return str - buf;
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index ed4e8ddbe76a..1141cd06011f 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-y := $(KBUILD_CFLAGS)
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index c0c81ca4237e..fd6dc790c5a8 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -47,9 +47,10 @@ bool __pure __efi_soft_reserve_enabled(void)
*/
efi_status_t efi_parse_options(char const *cmdline)
{
- size_t len;
+ char *buf __free(efi_pool) = NULL;
efi_status_t status;
- char *str, *buf;
+ size_t len;
+ char *str;
if (!cmdline)
return EFI_SUCCESS;
@@ -102,7 +103,6 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_parse_option_graphics(val + strlen("efifb:"));
}
}
- efi_bs_call(free_pool, buf);
return EFI_SUCCESS;
}
@@ -250,7 +250,7 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
u64, const union efistub_event *);
struct { u32 hash_log_extend_event; } mixed_mode;
} method;
- struct efistub_measured_event *evt;
+ struct efistub_measured_event *evt __free(efi_pool) = NULL;
int size = struct_size(evt, tagged_event.tagged_event_data,
events[event].event_data_len);
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
@@ -312,7 +312,6 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
status = efi_fn_call(&method, hash_log_extend_event, protocol, 0,
load_addr, load_size, &evt->event_data);
- efi_bs_call(free_pool, evt);
if (status == EFI_SUCCESS)
return EFI_SUCCESS;
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 382b54f40603..874f63b4a383 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -10,6 +10,7 @@
*/
#include <linux/efi.h>
+#include <linux/screen_info.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -53,25 +54,16 @@ void __weak free_screen_info(struct screen_info *si)
static struct screen_info *setup_graphics(void)
{
- efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- struct screen_info *si = NULL;
+ struct screen_info *si, tmp = {};
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &gop_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL) {
- si = alloc_screen_info();
- if (!si)
- return NULL;
- status = efi_setup_gop(si, &gop_proto, size);
- if (status != EFI_SUCCESS) {
- free_screen_info(si);
- return NULL;
- }
- }
+ if (efi_setup_gop(&tmp) != EFI_SUCCESS)
+ return NULL;
+
+ si = alloc_screen_info();
+ if (!si)
+ return NULL;
+
+ *si = tmp;
return si;
}
@@ -112,8 +104,8 @@ static u32 get_supported_rt_services(void)
efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
{
+ char *cmdline __free(efi_pool) = NULL;
efi_status_t status;
- char *cmdline;
/*
* Get the command line from EFI, using the LOADED_IMAGE
@@ -128,25 +120,24 @@ efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
status = efi_parse_options(cmdline);
- if (status != EFI_SUCCESS)
- goto fail_free_cmdline;
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse EFI load options\n");
+ return status;
+ }
}
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
cmdline[0] == 0) {
status = efi_parse_options(CONFIG_CMDLINE);
- if (status != EFI_SUCCESS)
- goto fail_free_cmdline;
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse built-in command line\n");
+ return status;
+ }
}
- *cmdline_ptr = cmdline;
+ *cmdline_ptr = no_free_ptr(cmdline);
return EFI_SUCCESS;
-
-fail_free_cmdline:
- efi_err("Failed to parse options\n");
- efi_bs_call(free_pool, cmdline);
- return status;
}
efi_status_t efi_stub_common(efi_handle_t handle,
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 76e44c185f29..d96d4494070d 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -4,6 +4,7 @@
#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/efi.h>
#include <linux/kernel.h>
#include <linux/kern_levels.h>
@@ -122,11 +123,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
#define efi_get_handle_num(size) \
((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
-#define for_each_efi_handle(handle, array, size, i) \
- for (i = 0; \
- i < efi_get_handle_num(size) && \
- ((handle = efi_get_handle_at((array), i)) || true); \
- i++)
+#define for_each_efi_handle(handle, array, num) \
+ for (int __i = 0; __i < (num) && \
+ ((handle = efi_get_handle_at((array), __i)) || true); \
+ __i++)
static inline
void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
@@ -171,7 +171,7 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
* the EFI memory map. Other related structures, e.g. x86 e820ext, need
* to factor in this headroom requirement as well.
*/
-#define EFI_MMAP_NR_SLACK_SLOTS 8
+#define EFI_MMAP_NR_SLACK_SLOTS 32
typedef struct efi_generic_dev_path efi_device_path_protocol_t;
@@ -314,7 +314,9 @@ union efi_boot_services {
void *close_protocol;
void *open_protocol_information;
void *protocols_per_handle;
- void *locate_handle_buffer;
+ efi_status_t (__efiapi *locate_handle_buffer)(int, efi_guid_t *,
+ void *, unsigned long *,
+ efi_handle_t **);
efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
void **);
efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...);
@@ -1053,6 +1055,7 @@ void efi_puts(const char *str);
__printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
+DEFINE_FREE(efi_pool, void *, if (_T) efi_bs_call(free_pool, _T));
void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size);
@@ -1082,8 +1085,7 @@ efi_status_t efi_parse_options(char const *cmdline);
void efi_parse_option_graphics(char *option);
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size);
+efi_status_t efi_setup_gop(struct screen_info *si);
efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
const efi_char16_t *optstr,
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index ea5da307d542..3785fb4986b4 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -133,13 +133,11 @@ void efi_parse_option_graphics(char *option)
static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
unsigned long info_size;
-
u32 max_mode, cur_mode;
+ efi_status_t status;
int pf;
mode = efi_table_attr(gop, mode);
@@ -154,17 +152,13 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cur_mode;
}
- status = efi_call_proto(gop, query_mode, cmdline.mode,
- &info_size, &info);
+ status = efi_call_proto(gop, query_mode, cmdline.mode, &info_size, &info);
if (status != EFI_SUCCESS) {
efi_err("Couldn't get mode information\n");
return cur_mode;
}
pf = info->pixel_format;
-
- efi_bs_call(free_pool, info);
-
if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) {
efi_err("Invalid PixelFormat\n");
return cur_mode;
@@ -173,6 +167,28 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cmdline.mode;
}
+static u32 choose_mode(efi_graphics_output_protocol_t *gop,
+ bool (*match)(const efi_graphics_output_mode_info_t *, u32, void *),
+ void *ctx)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
+
+ for (u32 m = 0; m < max_mode; m++) {
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
+ unsigned long info_size;
+ efi_status_t status;
+
+ status = efi_call_proto(gop, query_mode, m, &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (match(info, m, ctx))
+ return m;
+ }
+ return (unsigned long)ctx;
+}
+
static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
{
if (pixel_format == PIXEL_BIT_MASK) {
@@ -185,192 +201,117 @@ static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
return 32;
}
-static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+static bool match_res(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
{
- efi_status_t status;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- mode = efi_table_attr(gop, mode);
+ return cmdline.res.width == info->horizontal_resolution &&
+ cmdline.res.height == info->vertical_resolution &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi));
+}
- cur_mode = efi_table_attr(mode, mode);
- info = efi_table_attr(mode, info);
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ if (match_res(efi_table_attr(mode, info), cur_mode, NULL))
return cur_mode;
- max_mode = efi_table_attr(mode, max_mode);
-
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return choose_mode(gop, match_res, (void *)cur_mode);
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+struct match {
+ u32 mode;
+ u32 area;
+ u8 depth;
+};
- efi_bs_call(free_pool, info);
+static bool match_auto(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ u32 area = info->horizontal_resolution * info->vertical_resolution;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
+ u8 depth = pixel_bpp(pf, pi);
+ struct match *m = ctx;
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
- return m;
- }
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- efi_err("Couldn't find requested mode\n");
+ if (area > m->area || (area == m->area && depth > m->depth))
+ *m = (struct match){ mode, area, depth };
- return cur_mode;
+ return false;
}
static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode, best_mode, area;
- u8 depth;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h, a;
- u8 d;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ struct match match = {};
- info = efi_table_attr(mode, info);
-
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- best_mode = cur_mode;
- area = w * h;
- depth = pixel_bpp(pf, pi);
+ choose_mode(gop, match_auto, &match);
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return match.mode;
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static bool match_list(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ u32 cur_mode = (unsigned long)ctx;
+ int pf = info->pixel_format;
+ const char *dstr;
+ u8 depth = 0;
+ bool valid;
- efi_bs_call(free_pool, info);
+ valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- a = w * h;
- if (a < area)
- continue;
- d = pixel_bpp(pf, pi);
- if (a > area || d > depth) {
- best_mode = m;
- area = a;
- depth = d;
- }
+ switch (pf) {
+ case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
+ dstr = "rgb";
+ break;
+ case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
+ dstr = "bgr";
+ break;
+ case PIXEL_BIT_MASK:
+ dstr = "";
+ depth = pixel_bpp(pf, pi);
+ break;
+ case PIXEL_BLT_ONLY:
+ dstr = "blt";
+ break;
+ default:
+ dstr = "xxx";
+ break;
}
- return best_mode;
+ efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
+ mode,
+ (mode == cur_mode) ? '*' : ' ',
+ !valid ? '-' : ' ',
+ info->horizontal_resolution,
+ info->vertical_resolution,
+ dstr, depth);
+
+ return false;
}
static u32 choose_mode_list(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
- u8 d;
- const char *dstr;
- bool valid;
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
efi_input_key_t key;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ efi_status_t status;
efi_printk("Available graphics modes are 0-%u\n", max_mode-1);
efi_puts(" * = current mode\n"
" - = unusable mode\n");
- for (m = 0; m < max_mode; m++) {
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- efi_bs_call(free_pool, info);
-
- valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- d = 0;
- switch (pf) {
- case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
- dstr = "rgb";
- break;
- case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
- dstr = "bgr";
- break;
- case PIXEL_BIT_MASK:
- dstr = "";
- d = pixel_bpp(pf, pi);
- break;
- case PIXEL_BLT_ONLY:
- dstr = "blt";
- break;
- default:
- dstr = "xxx";
- break;
- }
-
- efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
- m,
- m == cur_mode ? '*' : ' ',
- !valid ? '-' : ' ',
- w, h, dstr, d);
- }
+ choose_mode(gop, match_list, (void *)cur_mode);
efi_puts("\nPress any key to continue (or wait 10 seconds)\n");
status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key);
@@ -461,26 +402,25 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
-static efi_graphics_output_protocol_t *
-find_gop(efi_guid_t *proto, unsigned long size, void **handles)
+static efi_graphics_output_protocol_t *find_gop(unsigned long num,
+ const efi_handle_t handles[])
{
efi_graphics_output_protocol_t *first_gop;
efi_handle_t h;
- int i;
first_gop = NULL;
- for_each_efi_handle(h, handles, size, i) {
+ for_each_efi_handle(h, handles, num) {
efi_status_t status;
efi_graphics_output_protocol_t *gop;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
-
- efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
void *dummy = NULL;
- status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID,
+ (void **)&gop);
if (status != EFI_SUCCESS)
continue;
@@ -500,7 +440,8 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
* Once we've found a GOP supporting ConOut,
* don't bother looking any further.
*/
- status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_CONSOLE_OUT_DEVICE_GUID, &dummy);
if (status == EFI_SUCCESS)
return gop;
@@ -511,16 +452,22 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
return first_gop;
}
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size, void **handles)
+efi_status_t efi_setup_gop(struct screen_info *si)
{
- efi_graphics_output_protocol_t *gop;
+ efi_handle_t *handles __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
+ efi_graphics_output_protocol_t *gop;
+ efi_status_t status;
+ unsigned long num;
- gop = find_gop(proto, size, handles);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID, NULL, &num,
+ &handles);
+ if (status != EFI_SUCCESS)
+ return status;
- /* Did we find any GOPs? */
+ gop = find_gop(num, handles);
if (!gop)
return EFI_NOT_FOUND;
@@ -552,29 +499,3 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
return EFI_SUCCESS;
}
-
-/*
- * See if we have Graphics Output Protocol
- */
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size)
-{
- efi_status_t status;
- void **gop_handle = NULL;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&gop_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
- &size, gop_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- status = setup_gop(si, proto, size, gop_handle);
-
-free_handle:
- efi_bs_call(free_pool, gop_handle);
- return status;
-}
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
index 6318c40bda38..4bc963e999eb 100644
--- a/drivers/firmware/efi/libstub/kaslr.c
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -57,7 +57,7 @@ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
*/
static bool check_image_region(u64 base, u64 size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
bool ret = false;
int map_offset;
@@ -80,8 +80,6 @@ static bool check_image_region(u64 base, u64 size)
}
}
- efi_bs_call(free_pool, map);
-
return ret;
}
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
index 4f1fa302234d..9c82259eea81 100644
--- a/drivers/firmware/efi/libstub/mem.c
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -20,10 +20,10 @@
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
bool install_cfg_tbl)
{
+ struct efi_boot_memmap tmp, *m __free(efi_pool) = NULL;
int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY
: EFI_LOADER_DATA;
efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID;
- struct efi_boot_memmap *m, tmp;
efi_status_t status;
unsigned long size;
@@ -48,24 +48,20 @@ efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
*/
status = efi_bs_call(install_configuration_table, &tbl_guid, m);
if (status != EFI_SUCCESS)
- goto free_map;
+ return status;
}
m->buff_size = m->map_size = size;
status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key,
&m->desc_size, &m->desc_ver);
- if (status != EFI_SUCCESS)
- goto uninstall_table;
+ if (status != EFI_SUCCESS) {
+ if (install_cfg_tbl)
+ efi_bs_call(install_configuration_table, &tbl_guid, NULL);
+ return status;
+ }
- *map = m;
+ *map = no_free_ptr(m);
return EFI_SUCCESS;
-
-uninstall_table:
- if (install_cfg_tbl)
- efi_bs_call(install_configuration_table, &tbl_guid, NULL);
-free_map:
- efi_bs_call(free_pool, m);
- return status;
}
/**
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
index 99fb25d2bcf5..1dccf77958d3 100644
--- a/drivers/firmware/efi/libstub/pci.c
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -16,37 +16,20 @@
void efi_pci_disable_bridge_busmaster(void)
{
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long pci_handle_size = 0;
- efi_handle_t *pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
+ unsigned long pci_handle_num;
efi_handle_t handle;
efi_status_t status;
u16 class, command;
- int i;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL) {
- if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
- efi_err("Failed to locate PCI I/O handles'\n");
- return;
- }
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
- (void **)&pci_handle);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &pci_handle_num, &pci_handle);
if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
+ efi_err("Failed to locate PCI I/O handles\n");
return;
}
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, pci_handle);
- if (status != EFI_SUCCESS) {
- efi_err("Failed to locate PCI I/O handles'\n");
- goto free_handle;
- }
-
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
unsigned long segment_nr, bus_nr, device_nr, func_nr;
@@ -82,7 +65,7 @@ void efi_pci_disable_bridge_busmaster(void)
efi_bs_call(disconnect_controller, handle, NULL, NULL);
}
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
status = efi_bs_call(handle_protocol, handle, &pci_proto,
@@ -108,7 +91,4 @@ void efi_pci_disable_bridge_busmaster(void)
if (status != EFI_SUCCESS)
efi_err("Failed to disable PCI busmastering\n");
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index c41e7b2091cd..e5872e38d9a4 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -59,9 +59,9 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long alloc_min,
unsigned long alloc_max)
{
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
- struct efi_boot_memmap *map;
efi_status_t status;
int map_offset;
@@ -130,7 +130,5 @@ efi_status_t efi_random_alloc(unsigned long size,
break;
}
- efi_bs_call(free_pool, map);
-
return status;
}
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index d694bcfa1074..99b45d1cd624 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -23,14 +23,14 @@
efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
unsigned long nr_pages;
int i;
status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
/*
* Enforce minimum alignment that EFI or Linux requires when
@@ -79,11 +79,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
}
if (i == map->map_size / map->desc_size)
- status = EFI_NOT_FOUND;
+ return EFI_NOT_FOUND;
- efi_bs_call(free_pool, map);
-fail:
- return status;
+ return EFI_SUCCESS;
}
/**
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 188c8000d245..863910e9eefc 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -42,7 +42,7 @@ union sev_memory_acceptance_protocol {
static efi_status_t
preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
{
- struct pci_setup_rom *rom = NULL;
+ struct pci_setup_rom *rom __free(efi_pool) = NULL;
efi_status_t status;
unsigned long size;
uint64_t romsize;
@@ -75,14 +75,13 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
rom->data.len = size - sizeof(struct setup_data);
rom->data.next = 0;
rom->pcilen = romsize;
- *__rom = rom;
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
PCI_VENDOR_ID, 1, &rom->vendor);
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->vendor\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
@@ -90,21 +89,18 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->devid\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus,
&rom->device, &rom->function);
if (status != EFI_SUCCESS)
- goto free_struct;
+ return status;
memcpy(rom->romdata, romimage, romsize);
- return status;
-
-free_struct:
- efi_bs_call(free_pool, rom);
- return status;
+ *__rom = no_free_ptr(rom);
+ return EFI_SUCCESS;
}
/*
@@ -119,38 +115,23 @@ free_struct:
static void setup_efi_pci(struct boot_params *params)
{
efi_status_t status;
- void **pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long size = 0;
struct setup_data *data;
+ unsigned long num;
efi_handle_t h;
- int i;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
-
- if (status == EFI_BUFFER_TOO_SMALL) {
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&pci_handle);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
- return;
- }
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
- }
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &num, &pci_handle);
if (status != EFI_SUCCESS)
- goto free_handle;
+ return;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
while (data && data->next)
data = (struct setup_data *)(unsigned long)data->next;
- for_each_efi_handle(h, pci_handle, size, i) {
+ for_each_efi_handle(h, pci_handle, num) {
efi_pci_io_protocol_t *pci = NULL;
struct pci_setup_rom *rom;
@@ -170,9 +151,6 @@ static void setup_efi_pci(struct boot_params *params)
data = (struct setup_data *)rom;
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
static void retrieve_apple_device_properties(struct boot_params *boot_params)
@@ -405,116 +383,13 @@ static void setup_quirks(struct boot_params *boot_params)
}
}
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t
-setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
-{
- efi_status_t status;
- u32 width, height;
- void **uga_handle = NULL;
- efi_uga_draw_protocol_t *uga = NULL, *first_uga;
- efi_handle_t handle;
- int i;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&uga_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- uga_proto, NULL, &size, uga_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- height = 0;
- width = 0;
-
- first_uga = NULL;
- for_each_efi_handle(handle, uga_handle, size, i) {
- efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 w, h, depth, refresh;
- void *pciio;
-
- status = efi_bs_call(handle_protocol, handle, uga_proto,
- (void **)&uga);
- if (status != EFI_SUCCESS)
- continue;
-
- pciio = NULL;
- efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio);
-
- status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh);
- if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- width = w;
- height = h;
-
- /*
- * Once we've found a UGA supporting PCIIO,
- * don't bother looking any further.
- */
- if (pciio)
- break;
-
- first_uga = uga;
- }
- }
-
- if (!width && !height)
- goto free_handle;
-
- /* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_depth = 32;
- si->lfb_width = width;
- si->lfb_height = height;
-
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
-
-free_handle:
- efi_bs_call(free_pool, uga_handle);
-
- return status;
-}
-
static void setup_graphics(struct boot_params *boot_params)
{
- efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- struct screen_info *si;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- void **uga_handle = NULL;
-
- si = &boot_params->screen_info;
- memset(si, 0, sizeof(*si));
+ struct screen_info *si = memset(&boot_params->screen_info, 0, sizeof(*si));
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &graphics_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- status = efi_setup_gop(si, &graphics_proto, size);
-
- if (status != EFI_SUCCESS) {
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &uga_proto, NULL, &size, uga_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- setup_uga(si, &uga_proto, size);
- }
+ efi_setup_gop(si);
}
-
static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
{
efi_bs_call(exit, handle, status, 0, NULL);
@@ -737,7 +612,7 @@ static efi_status_t allocate_e820(struct boot_params *params,
struct setup_data **e820ext,
u32 *e820ext_size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
__u32 nr_desc;
@@ -751,13 +626,14 @@ static efi_status_t allocate_e820(struct boot_params *params,
EFI_MMAP_NR_SLACK_SLOTS;
status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
}
- if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && status == EFI_SUCCESS)
- status = allocate_unaccepted_bitmap(nr_desc, map);
+ if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
+ return allocate_unaccepted_bitmap(nr_desc, map);
- efi_bs_call(free_pool, map);
- return status;
+ return EFI_SUCCESS;
}
struct exit_boot_struct {
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index cc807ed35aed..1e509595ac03 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -91,6 +91,7 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
_ret_; \
})
+#ifdef CONFIG_EFI
static int __init efifb_set_system(const struct dmi_system_id *id)
{
struct efifb_dmi_info *info = id->driver_data;
@@ -346,7 +347,6 @@ static const struct fwnode_operations efifb_fwnode_ops = {
.add_links = efifb_add_links,
};
-#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;
__init void sysfb_apply_efi_quirks(void)
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index 66042160b361..773d05078e0a 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -30,7 +30,7 @@ static struct cbmem_entry *to_cbmem_entry(struct kobject *kobj)
}
static ssize_t mem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ const struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
@@ -40,7 +40,7 @@ static ssize_t mem_read(struct file *filp, struct kobject *kobj,
}
static ssize_t mem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ const struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
@@ -53,7 +53,7 @@ static ssize_t mem_write(struct file *filp, struct kobject *kobj,
memcpy(entry->mem_file_buf + pos, buf, count);
return count;
}
-static BIN_ATTR_ADMIN_RW(mem, 0);
+static const BIN_ATTR_ADMIN_RW(mem, 0);
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -79,14 +79,14 @@ static struct attribute *attrs[] = {
NULL,
};
-static struct bin_attribute *bin_attrs[] = {
+static const struct bin_attribute *const bin_attrs[] = {
&bin_attr_mem,
NULL,
};
static const struct attribute_group cbmem_entry_group = {
.attrs = attrs,
- .bin_attrs = bin_attrs,
+ .bin_attrs_new = bin_attrs,
};
static const struct attribute_group *dev_groups[] = {
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 24e666d5c3d1..e8fb00dcaf65 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -488,7 +488,7 @@ static const struct efivar_operations efivar_ops = {
#endif /* CONFIG_EFI */
static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct gsmi_set_eventlog_param param = {
@@ -528,9 +528,9 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
}
-static struct bin_attribute eventlog_bin_attr = {
+static const struct bin_attribute eventlog_bin_attr = {
.attr = {.name = "append_to_eventlog", .mode = 0200},
- .write = eventlog_write,
+ .write_new = eventlog_write,
};
static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index b9d99fe1ff0f..d957af6f9349 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -14,7 +14,7 @@
#include "memconsole.h"
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
@@ -28,7 +28,7 @@ static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
static struct bin_attribute memconsole_bin_attr = {
.attr = {.name = "log", .mode = 0444},
- .read = memconsole_read,
+ .read_new = memconsole_read,
};
void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1749529f63d4..254ac6545d68 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -56,7 +56,7 @@ static struct vpd_section ro_vpd;
static struct vpd_section rw_vpd;
static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_attrib_info *info = bin_attr->private;
@@ -121,7 +121,7 @@ static int vpd_section_attrib_add(const u8 *key, u32 key_len,
info->bin_attr.attr.name = info->key;
info->bin_attr.attr.mode = 0444;
info->bin_attr.size = value_len;
- info->bin_attr.read = vpd_attrib_read;
+ info->bin_attr.read_new = vpd_attrib_read;
info->bin_attr.private = info;
info->value = value;
@@ -156,7 +156,7 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec)
}
static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_section *sec = bin_attr->private;
@@ -201,7 +201,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->bin_attr.attr.name = sec->raw_name;
sec->bin_attr.attr.mode = 0444;
sec->bin_attr.size = size;
- sec->bin_attr.read = vpd_section_read;
+ sec->bin_attr.read_new = vpd_section_read;
sec->bin_attr.private = sec;
err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
index 2b4c2826f572..574930729ddd 100644
--- a/drivers/firmware/qcom/qcom_scm-smc.c
+++ b/drivers/firmware/qcom/qcom_scm-smc.c
@@ -152,7 +152,6 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
struct qcom_scm_res *res, bool atomic)
{
- struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
int arglen = desc->arginfo & 0xf;
int i, ret;
void *args_virt __free(qcom_tzmem) = NULL;
@@ -173,6 +172,11 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+ struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
+
+ if (!mempool)
+ return -EINVAL;
+
args_virt = qcom_tzmem_alloc(mempool,
SCM_SMC_N_EXT_ARGS * sizeof(u64),
flag);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 72bf87ddcd96..f0569bb9411f 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -217,7 +217,10 @@ static DEFINE_SPINLOCK(scm_query_lock);
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
{
- return __scm ? __scm->mempool : NULL;
+ if (!qcom_scm_is_available())
+ return NULL;
+
+ return __scm->mempool;
}
static enum qcom_scm_convention __get_convention(void)
@@ -1279,6 +1282,220 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
+bool qcom_scm_has_wrapped_key_support(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_DERIVE_SW_SECRET) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_GENERATE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_PREPARE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_IMPORT_ICE_KEY);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support);
+
+/**
+ * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key
+ * @eph_key: an ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes
+ * @sw_secret: output buffer for the software secret
+ * @sw_secret_size: size of the software secret to derive in bytes
+ *
+ * Derive a software secret from an ephemerally-wrapped key for software crypto
+ * operations. This is done by calling into the secure execution environment,
+ * which then calls into the hardware to unwrap and derive the secret.
+ *
+ * For more information on sw_secret, see the "Hardware-wrapped keys" section of
+ * Documentation/block/inline-encryption.rst.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
+ u8 *sw_secret, size_t sw_secret_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ sw_secret_size,
+ GFP_KERNEL);
+ if (!sw_secret_buf)
+ return -ENOMEM;
+
+ memcpy(eph_key_buf, eph_key, eph_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[1] = eph_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf);
+ desc.args[3] = sw_secret_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(sw_secret, sw_secret_buf, sw_secret_size);
+
+ memzero_explicit(eph_key_buf, eph_key_size);
+ memzero_explicit(sw_secret_buf, sw_secret_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret);
+
+/**
+ * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Generate a key using the built-in HW module in the SoC. The resulting key is
+ * returned wrapped with the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key);
+
+/**
+ * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key
+ * @lt_key: a long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes
+ * @eph_key: output buffer for the ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for
+ * added protection. The resulting key will only be valid for the current boot.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
+ u8 *eph_key, size_t eph_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ memcpy(lt_key_buf, lt_key, lt_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[3] = eph_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(eph_key, eph_key_buf, eph_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ memzero_explicit(eph_key_buf, eph_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key);
+
+/**
+ * qcom_scm_import_ice_key() - Import key for storage encryption
+ * @raw_key: the raw key to import
+ * @raw_key_size: size of @raw_key in bytes
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to
+ * wrap the raw key using the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
+ u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ raw_key_size,
+ GFP_KERNEL);
+ if (!raw_key_buf)
+ return -ENOMEM;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ memcpy(raw_key_buf, raw_key, raw_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(raw_key_buf);
+ desc.args[1] = raw_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[3] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(raw_key_buf, raw_key_size);
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key);
+
/**
* qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
*
@@ -1768,18 +1985,23 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
+ any potential issues with this, only allow validated machines for now.
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ { .compatible = "asus,vivobook-s15" },
{ .compatible = "dell,xps13-9345" },
+ { .compatible = "hp,omnibook-x14" },
+ { .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
{ .compatible = "lenovo,yoga-slim7x" },
{ .compatible = "microsoft,arcata", },
+ { .compatible = "microsoft,blackrock" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
{ .compatible = "qcom,sc8180x-primus" },
{ .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
{ .compatible = "qcom,x1e80100-qcp" },
+ { .compatible = "qcom,x1p42100-crd" },
{ }
};
@@ -1867,7 +2089,8 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
*/
bool qcom_scm_is_available(void)
{
- return !!READ_ONCE(__scm);
+ /* Paired with smp_store_release() in qcom_scm_probe */
+ return !!smp_load_acquire(&__scm);
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
@@ -2024,18 +2247,22 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Let all above stores be available after this */
+ /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
smp_store_release(&__scm, scm);
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
- if (irq != -ENXIO)
- return irq;
+ if (irq != -ENXIO) {
+ ret = irq;
+ goto err;
+ }
} else {
ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
IRQF_ONESHOT, "qcom-scm", __scm);
- if (ret < 0)
- return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ if (ret < 0) {
+ dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ goto err;
+ }
}
__get_convention();
@@ -2054,14 +2281,18 @@ static int qcom_scm_probe(struct platform_device *pdev)
qcom_scm_disable_sdi();
ret = of_reserved_mem_device_init(__scm->dev);
- if (ret && ret != -ENODEV)
- return dev_err_probe(__scm->dev, ret,
- "Failed to setup the reserved memory region for TZ mem\n");
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(__scm->dev, ret,
+ "Failed to setup the reserved memory region for TZ mem\n");
+ goto err;
+ }
ret = qcom_tzmem_enable(__scm->dev);
- if (ret)
- return dev_err_probe(__scm->dev, ret,
- "Failed to enable the TrustZone memory allocator\n");
+ if (ret) {
+ dev_err_probe(__scm->dev, ret,
+ "Failed to enable the TrustZone memory allocator\n");
+ goto err;
+ }
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = 0;
@@ -2069,9 +2300,11 @@ static int qcom_scm_probe(struct platform_device *pdev)
pool_config.max_size = SZ_256K;
__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
- if (IS_ERR(__scm->mempool))
- return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
+ if (IS_ERR(__scm->mempool)) {
+ dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
+ goto err;
+ }
/*
* Initialize the QSEECOM interface.
@@ -2087,6 +2320,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
return 0;
+
+err:
+ /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
+ smp_store_release(&__scm, NULL);
+
+ return ret;
}
static void qcom_scm_shutdown(struct platform_device *pdev)
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index e36b2f67607f..097369d38b84 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -128,6 +128,10 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */
#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03
#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04
+#define QCOM_SCM_ES_DERIVE_SW_SECRET 0x07
+#define QCOM_SCM_ES_GENERATE_ICE_KEY 0x08
+#define QCOM_SCM_ES_PREPARE_ICE_KEY 0x09
+#define QCOM_SCM_ES_IMPORT_ICE_KEY 0x0a
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_HDCP_INVOKE 0x01
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 93ee3aa092f8..add5ad29a673 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -529,9 +529,9 @@ config GPIO_OCTEON
family of SOCs.
config GPIO_OMAP
- tristate "TI OMAP GPIO support" if ARCH_OMAP2PLUS || COMPILE_TEST
+ tristate "TI OMAP GPIO support"
+ depends on ARCH_OMAP || COMPILE_TEST
default y if ARCH_OMAP
- depends on ARM
select GENERIC_IRQ_CHIP
select GPIOLIB_IRQCHIP
help
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 73e660c5e38a..17ab039c7413 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -32,14 +32,12 @@
* will be blocked until the current one completes.
* @interrupt_trigger : specifies the hardware configured IRQ trigger type
* (rising, falling, both, high)
-* @mapped_irq : kernel mapped irq number.
*/
struct altera_gpio_chip {
struct gpio_chip gc;
void __iomem *regs;
raw_spinlock_t gpio_lock;
int interrupt_trigger;
- int mapped_irq;
};
static void altera_gpio_irq_unmask(struct irq_data *d)
@@ -235,6 +233,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
int reg, ret;
struct altera_gpio_chip *altera_gc;
struct gpio_irq_chip *girq;
+ int mapped_irq;
altera_gc = devm_kzalloc(&pdev->dev, sizeof(*altera_gc), GFP_KERNEL);
if (!altera_gc)
@@ -271,8 +270,8 @@ static int altera_gpio_probe(struct platform_device *pdev)
if (IS_ERR(altera_gc->regs))
return dev_err_probe(dev, PTR_ERR(altera_gc->regs), "failed to ioremap memory resource\n");
- altera_gc->mapped_irq = platform_get_irq_optional(pdev, 0);
- if (altera_gc->mapped_irq < 0)
+ mapped_irq = platform_get_irq_optional(pdev, 0);
+ if (mapped_irq < 0)
goto skip_irq;
if (device_property_read_u32(dev, "altr,interrupt-type", &reg)) {
@@ -296,7 +295,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
- girq->parents[0] = altera_gc->mapped_irq;
+ girq->parents[0] = mapped_irq;
skip_irq:
ret = devm_gpiochip_add_data(dev, &altera_gc->gc, altera_gc);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 24417c3247b0..0cd4c36ae8aa 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -285,6 +285,7 @@ static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
};
static const struct of_device_id mpc8xxx_gpio_ids[] = {
+ { .compatible = "fsl,mpc8314-gpio", },
{ .compatible = "fsl,mpc8349-gpio", },
{ .compatible = "fsl,mpc8572-gpio", .data = &mpc8572_gpio_devtype, },
{ .compatible = "fsl,mpc8610-gpio", },
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 272febc3230e..be4c9981ebc4 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1088,7 +1088,8 @@ static int pca953x_probe(struct i2c_client *client)
*/
reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(reset_gpio))
- return PTR_ERR(reset_gpio);
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Failed to get reset gpio\n");
}
chip->client = client;
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 71684dee2ca5..05f8781b5204 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -262,6 +262,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->label = config->label ?: dev_name(config->parent);
chip->can_sleep = regmap_might_sleep(config->regmap);
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
chip->set = gpio_regmap_set_with_clear;
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index cd1f17041f8c..526640c39a11 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -15,8 +15,6 @@
#define TPS65219_GPIO0_DIR_MASK BIT(3)
#define TPS65219_GPIO0_OFFSET 2
#define TPS65219_GPIO0_IDX 0
-#define TPS65219_GPIO_DIR_IN 1
-#define TPS65219_GPIO_DIR_OUT 0
struct tps65219_gpio {
struct gpio_chip gpio_chip;
@@ -61,7 +59,7 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset)
* status bit.
*/
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_OUT)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return -ENOTSUPP;
return ret;
@@ -124,10 +122,10 @@ static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offs
return -ENOTSUPP;
}
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_IN)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_IN)
return 0;
- return tps65219_gpio_change_direction(gc, offset, TPS65219_GPIO_DIR_IN);
+ return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_IN);
}
static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
@@ -136,10 +134,10 @@ static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int off
if (offset != TPS65219_GPIO0_IDX)
return 0;
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_OUT)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return 0;
- return tps65219_gpio_change_direction(gc, offset, TPS65219_GPIO_DIR_OUT);
+ return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_OUT);
}
static const struct gpio_chip tps65219_template_chip = {
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 5e26eb3adabb..18f523a15b3c 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -29,18 +29,22 @@
#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
-#define TQMX86_GPII_NONE 0
-#define TQMX86_GPII_FALLING BIT(0)
-#define TQMX86_GPII_RISING BIT(1)
-/* Stored in irq_type as a trigger type, but not actually valid as a register
- * value, so the name doesn't use "GPII"
+/*
+ * NONE, FALLING and RISING use the same bit patterns that can be programmed to
+ * the GPII register (after passing them to the TQMX86_GPII_ macros to shift
+ * them to the right position)
*/
-#define TQMX86_INT_BOTH (BIT(0) | BIT(1))
-#define TQMX86_GPII_MASK (BIT(0) | BIT(1))
-#define TQMX86_GPII_BITS 2
+#define TQMX86_INT_TRIG_NONE 0
+#define TQMX86_INT_TRIG_FALLING BIT(0)
+#define TQMX86_INT_TRIG_RISING BIT(1)
+#define TQMX86_INT_TRIG_BOTH (BIT(0) | BIT(1))
+#define TQMX86_INT_TRIG_MASK (BIT(0) | BIT(1))
/* Stored in irq_type with GPII bits */
#define TQMX86_INT_UNMASKED BIT(2)
+#define TQMX86_GPIIC_CONFIG(i, v) ((v) << (2 * (i)))
+#define TQMX86_GPIIC_MASK(i) TQMX86_GPIIC_CONFIG(i, TQMX86_INT_TRIG_MASK)
+
struct tqmx86_gpio_data {
struct gpio_chip chip;
void __iomem *io_base;
@@ -48,7 +52,7 @@ struct tqmx86_gpio_data {
/* Lock must be held for accessing output and irq_type fields */
raw_spinlock_t spinlock;
DECLARE_BITMAP(output, TQMX86_NGPIO);
- u8 irq_type[TQMX86_NGPI];
+ u8 irq_type[TQMX86_NGPIO];
};
static u8 tqmx86_gpio_read(struct tqmx86_gpio_data *gd, unsigned int reg)
@@ -62,6 +66,18 @@ static void tqmx86_gpio_write(struct tqmx86_gpio_data *gd, u8 val,
iowrite8(val, gd->io_base + reg);
}
+static void tqmx86_gpio_clrsetbits(struct tqmx86_gpio_data *gpio,
+ u8 clr, u8 set, unsigned int reg)
+ __must_hold(&gpio->spinlock)
+{
+ u8 val = tqmx86_gpio_read(gpio, reg);
+
+ val &= ~clr;
+ val |= set;
+
+ tqmx86_gpio_write(gpio, val, reg);
+}
+
static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
@@ -69,127 +85,137 @@ static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(tqmx86_gpio_read(gpio, TQMX86_GPIOD) & BIT(offset));
}
+static void _tqmx86_gpio_set(struct tqmx86_gpio_data *gpio, unsigned int offset,
+ int value)
+ __must_hold(&gpio->spinlock)
+{
+ __assign_bit(offset, gpio->output, value);
+ tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
+}
+
static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
- unsigned long flags;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- __assign_bit(offset, gpio->output, value);
- tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ _tqmx86_gpio_set(gpio, offset, value);
}
static int tqmx86_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
- /* Direction cannot be changed. Validate is an input. */
- if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
- return 0;
- else
- return -EINVAL;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ tqmx86_gpio_clrsetbits(gpio, BIT(offset), 0, TQMX86_GPIODD);
+
+ return 0;
}
static int tqmx86_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset,
int value)
{
- /* Direction cannot be changed, validate is an output */
- if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
- return -EINVAL;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ _tqmx86_gpio_set(gpio, offset, value);
+ tqmx86_gpio_clrsetbits(gpio, 0, BIT(offset), TQMX86_GPIODD);
- tqmx86_gpio_set(chip, offset, value);
return 0;
}
static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
- if (TQMX86_DIR_INPUT_MASK & BIT(offset))
- return GPIO_LINE_DIRECTION_IN;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ u8 val;
+
+ val = tqmx86_gpio_read(gpio, TQMX86_GPIODD);
+
+ if (val & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
- return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
}
-static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
+static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int hwirq)
__must_hold(&gpio->spinlock)
{
- u8 type = TQMX86_GPII_NONE, gpiic;
+ u8 type = TQMX86_INT_TRIG_NONE;
+ int gpiic_irq = hwirq - TQMX86_NGPO;
- if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
- type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
+ if (gpio->irq_type[hwirq] & TQMX86_INT_UNMASKED) {
+ type = gpio->irq_type[hwirq] & TQMX86_INT_TRIG_MASK;
- if (type == TQMX86_INT_BOTH)
- type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
- ? TQMX86_GPII_FALLING
- : TQMX86_GPII_RISING;
+ if (type == TQMX86_INT_TRIG_BOTH)
+ type = tqmx86_gpio_get(&gpio->chip, hwirq)
+ ? TQMX86_INT_TRIG_FALLING
+ : TQMX86_INT_TRIG_RISING;
}
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
- gpiic |= type << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ tqmx86_gpio_clrsetbits(gpio,
+ TQMX86_GPIIC_MASK(gpiic_irq),
+ TQMX86_GPIIC_CONFIG(gpiic_irq, type),
+ TQMX86_GPIIC);
}
static void tqmx86_gpio_irq_mask(struct irq_data *data)
{
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ scoped_guard(raw_spinlock_irqsave, &gpio->spinlock) {
+ gpio->irq_type[data->hwirq] &= ~TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
+ }
gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
static void tqmx86_gpio_irq_unmask(struct irq_data *data)
{
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned long flags;
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ gpio->irq_type[data->hwirq] |= TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
}
static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
unsigned int edge_type = type & IRQF_TRIGGER_MASK;
- unsigned long flags;
u8 new_type;
switch (edge_type) {
case IRQ_TYPE_EDGE_RISING:
- new_type = TQMX86_GPII_RISING;
+ new_type = TQMX86_INT_TRIG_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
- new_type = TQMX86_GPII_FALLING;
+ new_type = TQMX86_INT_TRIG_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
- new_type = TQMX86_INT_BOTH;
+ new_type = TQMX86_INT_TRIG_BOTH;
break;
default:
return -EINVAL; /* not supported */
}
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
- gpio->irq_type[offset] |= new_type;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ gpio->irq_type[data->hwirq] &= ~TQMX86_INT_TRIG_MASK;
+ gpio->irq_type[data->hwirq] |= new_type;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
return 0;
}
@@ -199,8 +225,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned long irq_bits, flags;
- int i;
+ unsigned long irq_bits;
+ int i, hwirq;
u8 irq_status;
chained_irq_enter(irq_chip, desc);
@@ -210,32 +236,38 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
irq_bits = irq_status;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
- /*
- * Edge-both triggers are implemented by flipping the edge
- * trigger after each interrupt, as the controller only supports
- * either rising or falling edge triggers, but not both.
- *
- * Internally, the TQMx86 GPIO controller has separate status
- * registers for rising and falling edge interrupts. GPIIC
- * configures which bits from which register are visible in the
- * interrupt status register GPIIS and defines what triggers the
- * parent IRQ line. Writing to GPIIS always clears both rising
- * and falling interrupt flags internally, regardless of the
- * currently configured trigger.
- *
- * In consequence, we can cleanly implement the edge-both
- * trigger in software by first clearing the interrupt and then
- * setting the new trigger based on the current GPIO input in
- * tqmx86_gpio_irq_config() - even if an edge arrives between
- * reading the input and setting the trigger, we will have a new
- * interrupt pending.
- */
- if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
- tqmx86_gpio_irq_config(gpio, i);
+ scoped_guard(raw_spinlock_irqsave, &gpio->spinlock) {
+ for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
+ hwirq = i + TQMX86_NGPO;
+
+ /*
+ * Edge-both triggers are implemented by flipping the
+ * edge trigger after each interrupt, as the controller
+ * only supports either rising or falling edge triggers,
+ * but not both.
+ *
+ * Internally, the TQMx86 GPIO controller has separate
+ * status registers for rising and falling edge
+ * interrupts. GPIIC configures which bits from which
+ * register are visible in the interrupt status register
+ * GPIIS and defines what triggers the parent IRQ line.
+ * Writing to GPIIS always clears both rising and
+ * falling interrupt flags internally, regardless of the
+ * currently configured trigger.
+ *
+ * In consequence, we can cleanly implement the
+ * edge-both trigger in software by first clearing the
+ * interrupt and then setting the new trigger based on
+ * the current GPIO input in tqmx86_gpio_irq_config() -
+ * even if an edge arrives between reading the input and
+ * setting the trigger, we will have a new interrupt
+ * pending.
+ */
+ if ((gpio->irq_type[hwirq] & TQMX86_INT_TRIG_MASK) ==
+ TQMX86_INT_TRIG_BOTH)
+ tqmx86_gpio_irq_config(gpio, hwirq);
+ }
}
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
generic_handle_domain_irq(gpio->chip.irq.domain,
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index 6c3fbf382dba..b9171bf66168 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -22,7 +22,7 @@
static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset)
{
- struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
+ struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret = 0;
ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL);
@@ -46,7 +46,7 @@ static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset,
static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
+ struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret;
u8 gpoctl;
@@ -91,7 +91,7 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
twl6040gpo_chip.parent = &pdev->dev;
- ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, twl6040);
if (ret < 0) {
dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret);
twl6040gpo_chip.ngpio = 0;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index c6a8f2c82680..792d94c49077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -65,7 +65,7 @@ struct xgpio_instance {
DECLARE_BITMAP(state, 64);
DECLARE_BITMAP(last_irq_read, 64);
DECLARE_BITMAP(dir, 64);
- spinlock_t gpio_lock; /* For serializing operations */
+ raw_spinlock_t gpio_lock; /* For serializing operations */
int irq;
DECLARE_BITMAP(enable, 64);
DECLARE_BITMAP(rising_edge, 64);
@@ -179,14 +179,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
__assign_bit(bit, chip->state, val);
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -210,7 +210,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
@@ -218,7 +218,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_copy(chip->state, state, 64);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -236,13 +236,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
__set_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -265,7 +265,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
__assign_bit(bit, chip->state, val);
@@ -275,7 +275,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
__clear_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -398,7 +398,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
int bit = xgpio_to_bit(chip, irq_offset);
u32 mask = BIT(bit / 32), temp;
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__clear_bit(bit, chip->enable);
@@ -408,7 +408,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
temp &= ~mask;
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
gpiochip_disable_irq(&chip->gc, irq_offset);
}
@@ -428,7 +428,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
gpiochip_enable_irq(&chip->gc, irq_offset);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__set_bit(bit, chip->enable);
@@ -447,7 +447,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -512,7 +512,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
- spin_lock(&chip->gpio_lock);
+ raw_spin_lock(&chip->gpio_lock);
xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
@@ -529,7 +529,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
bitmap_copy(chip->last_irq_read, all, 64);
bitmap_or(all, rising, falling, 64);
- spin_unlock(&chip->gpio_lock);
+ raw_spin_unlock(&chip->gpio_lock);
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
@@ -620,7 +620,7 @@ static int xgpio_probe(struct platform_device *pdev)
bitmap_set(chip->hw_map, 0, width[0]);
bitmap_set(chip->hw_map, 32, width[1]);
- spin_lock_init(&chip->gpio_lock);
+ raw_spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 772fc7625639..2f51546b0b88 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -103,10 +103,15 @@ config DRM_KMS_HELPER
help
CRTC helpers for KMS drivers.
+config DRM_DRAW
+ bool
+ depends on DRM
+
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
depends on DRM
select FONT_SUPPORT
+ select DRM_DRAW
help
Enable a drm panic handler, which will display a user-friendly message
when a kernel panic occurs. It's useful when using a user-space
@@ -218,77 +223,7 @@ config DRM_CLIENT
option. Drivers that support the default clients should
select DRM_CLIENT_SELECTION instead.
-config DRM_CLIENT_LIB
- tristate
- depends on DRM
- select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
- select FB_CORE if DRM_FBDEV_EMULATION
- help
- This option enables the DRM client library and selects all
- modules and components according to the enabled clients.
-
-config DRM_CLIENT_SELECTION
- tristate
- depends on DRM
- select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION
- help
- Drivers that support in-kernel DRM clients have to select this
- option.
-
-config DRM_CLIENT_SETUP
- bool
- depends on DRM_CLIENT_SELECTION
- help
- Enables the DRM client selection. DRM drivers that support the
- default clients should select DRM_CLIENT_SELECTION instead.
-
-menu "Supported DRM clients"
- depends on DRM_CLIENT_SELECTION
-
-config DRM_FBDEV_EMULATION
- bool "Enable legacy fbdev support for your modesetting driver"
- depends on DRM_CLIENT_SELECTION
- select DRM_CLIENT
- select DRM_CLIENT_SETUP
- select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
- default FB
- help
- Choose this option if you have a need for the legacy fbdev
- support. Note that this support also provides the linux console
- support on top of your modesetting driver.
-
- If in doubt, say "Y".
-
-config DRM_FBDEV_OVERALLOC
- int "Overallocation of the fbdev buffer"
- depends on DRM_FBDEV_EMULATION
- default 100
- help
- Defines the fbdev buffer overallocation in percent. Default
- is 100. Typical values for double buffering will be 200,
- triple buffering 300.
-
-config DRM_FBDEV_LEAK_PHYS_SMEM
- bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
- depends on DRM_FBDEV_EMULATION && EXPERT
- default n
- help
- In order to keep user-space compatibility, we want in certain
- use-cases to keep leaking the fbdev physical address to the
- user-space program handling the fbdev buffer.
- This affects, not only, Amlogic, Allwinner or Rockchip devices
- with ARM Mali GPUs using an userspace Blob.
- This option is not supported by upstream developers and should be
- removed as soon as possible and be considered as a broken and
- legacy behaviour from a modern fbdev device driver.
-
- Please send any bug reports when using this to your proprietary
- software vendor that requires this.
-
- If in doubt, say "N" or spread the word to your closed source
- library vendor.
-
-endmenu
+source "drivers/gpu/drm/clients/Kconfig"
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
@@ -530,6 +465,10 @@ config DRM_HYPERV
config DRM_EXPORT_FOR_TESTS
bool
+# Separate option as not all DRM drivers use it
+config DRM_PANEL_BACKLIGHT_QUIRKS
+ tristate
+
config DRM_LIB_RANDOM
bool
default n
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 463afad1b5ca..19fb370fbc56 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -91,10 +91,12 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
drm-$(CONFIG_DRM_PANIC) += drm_panic.o
+drm-$(CONFIG_DRM_DRAW) += drm_draw.o
drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
+obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
# Memory-management helpers
@@ -149,14 +151,6 @@ drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
#
-# DRM clients
-#
-
-drm_client_lib-y := drm_client_setup.o
-drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o
-obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o
-
-#
# Drivers and the rest
#
@@ -165,6 +159,7 @@ obj-y += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-y += arm/
+obj-y += clients/
obj-y += display/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 41fa3377d9cf..1a11cab741ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -26,6 +26,7 @@ config DRM_AMDGPU
select DRM_BUDDY
select DRM_SUBALLOC_HELPER
select DRM_EXEC
+ select DRM_PANEL_BACKLIGHT_QUIRKS
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7b18c52825d..5b21674b07fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2017-2024 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -105,7 +105,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o
+ umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o umc_v8_14.o
# add IH block
amdgpu-y += \
@@ -200,6 +200,7 @@ amdgpu-y += \
vcn_v4_0_3.o \
vcn_v4_0_5.o \
vcn_v5_0_0.o \
+ vcn_v5_0_1.o \
amdgpu_jpeg.o \
jpeg_v1_0.o \
jpeg_v2_0.o \
@@ -208,7 +209,8 @@ amdgpu-y += \
jpeg_v4_0.o \
jpeg_v4_0_3.o \
jpeg_v4_0_5.o \
- jpeg_v5_0_0.o
+ jpeg_v5_0_0.o \
+ jpeg_v5_0_1.o
# add VPE block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index f44de9d4b6a1..e13fbd974141 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -334,6 +334,8 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
AMDGPU_INIT_LEVEL_RESET_RECOVERY);
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
+ /*TBD: Ideally should clear only GFX, SDMA blocks*/
+ amdgpu_ras_clear_err_state(tmp_adev);
r = aldebaran_mode2_restore_ip(tmp_adev);
if (r)
goto end;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4653a8d2823a..69895fccb474 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -880,6 +880,7 @@ struct amdgpu_device {
bool need_swiotlb;
bool accel_working;
struct notifier_block acpi_nb;
+ struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct debugfs_blob_wrapper debugfs_vbios_blob;
struct debugfs_blob_wrapper debugfs_discovery_blob;
@@ -1174,7 +1175,6 @@ struct amdgpu_device {
struct work_struct reset_work;
- bool job_hang;
bool dc_enabled;
/* Mask of active clusters */
uint32_t aid_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index 5ef6b745f222..f3289d289913 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -71,6 +71,11 @@ struct ras_query_context;
#define ACA_ERROR_CE_MASK BIT_MASK(ACA_ERROR_TYPE_CE)
#define ACA_ERROR_DEFERRED_MASK BIT_MASK(ACA_ERROR_TYPE_DEFERRED)
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+
enum aca_reg_idx {
ACA_REG_IDX_CTL = 0,
ACA_REG_IDX_STATUS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index ec5e0dcf8613..deb0785350e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -140,7 +140,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
* 2. power off the acp tiles
* 3. check and enter ulv state
*/
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
@@ -157,7 +157,7 @@ static int acp_poweron(struct generic_pm_domain *genpd)
* 2. turn on acp clock
* 3. power on acp tiles
*/
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -236,7 +236,7 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
} else if (r) {
return r;
@@ -508,7 +508,7 @@ static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
/* return early if no ACP */
if (!adev->acp.acp_genpd) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -565,7 +565,7 @@ static int acp_suspend(struct amdgpu_ip_block *ip_block)
/* power up on suspend */
if (!adev->acp.acp_cell)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -575,7 +575,7 @@ static int acp_resume(struct amdgpu_ip_block *ip_block)
/* power down again on resume */
if (!adev->acp.acp_cell)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
@@ -584,19 +584,19 @@ static bool acp_is_idle(void *handle)
return true;
}
-static int acp_set_clockgating_state(void *handle,
+static int acp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int acp_set_powergating_state(void *handle,
+static int acp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 3afcd1e8aa54..2c1b38c5cfc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -368,7 +368,7 @@ void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
- amdgpu_bo_reserve(*bo, true);
+ (void)amdgpu_bo_reserve(*bo, true);
amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(*bo);
amdgpu_bo_unreserve(*bo);
@@ -715,8 +715,9 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
- ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
+ if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
+ (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
@@ -724,7 +725,9 @@ void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
/* Disable GFXOFF and PG. Temporary workaround
* to fix some compute applications issue on GFX9.
*/
- adev->ip_blocks[AMD_IP_BLOCK_TYPE_GFX].version->funcs->set_powergating_state((void *)adev, state);
+ struct amdgpu_ip_block *gfx_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (gfx_block != NULL)
+ gfx_block->version->funcs->set_powergating_state((void *)gfx_block, state);
}
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
@@ -834,7 +837,7 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- if (!kiq_ring->sched.ready || adev->job_hang)
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
return 0;
ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 4b80ad860639..8af67f18500a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -433,6 +433,9 @@ void kgd2kfd_unlock_kfd(void);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
+bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault);
+
#else
static inline int kgd2kfd_init(void)
{
@@ -518,5 +521,12 @@ static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;
}
+
+static inline bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault)
+{
+ return false;
+}
+
#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index cc66ebb7bae1..441568163e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -1131,6 +1131,9 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
uint32_t low, high;
uint64_t queue_addr = 0;
+ if (!amdgpu_gpu_recovery)
+ return 0;
+
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
@@ -1179,6 +1182,9 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
uint32_t low, high, pipe_reset_data = 0;
uint64_t queue_addr = 0;
+ if (!amdgpu_gpu_recovery)
+ return 0;
+
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index f30548f4c3b3..1e998f972c30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -730,7 +730,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
return;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
@@ -779,7 +779,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
}
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -989,7 +989,7 @@ unwind:
if (!attachment[i])
continue;
if (attachment[i]->bo_va) {
- amdgpu_bo_reserve(bo[i], true);
+ (void)amdgpu_bo_reserve(bo[i], true);
if (--attachment[i]->bo_va->ref_count == 0)
amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
amdgpu_bo_unreserve(bo[i]);
@@ -1259,11 +1259,11 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
return -EBUSY;
}
- amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+ (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
- amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
+ (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ (void)amdgpu_sync_fence(sync, bo_va->last_pt_update);
return 0;
}
@@ -2352,7 +2352,7 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
{
struct amdgpu_bo *bo = mem->bo;
- amdgpu_bo_reserve(bo, true);
+ (void)amdgpu_bo_reserve(bo, true);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 45affc02548c..423fd2eebe1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -47,35 +47,37 @@
/* Check if current bios is an ATOM BIOS.
* Return true if it is ATOM BIOS. Otherwise, return false.
*/
-static bool check_atom_bios(uint8_t *bios, size_t size)
+static bool check_atom_bios(struct amdgpu_device *adev, size_t size)
{
uint16_t tmp, bios_header_start;
+ uint8_t *bios = adev->bios;
if (!bios || size < 0x49) {
- DRM_INFO("vbios mem is null or mem size is wrong\n");
+ dev_dbg(adev->dev, "VBIOS mem is null or mem size is wrong\n");
return false;
}
if (!AMD_IS_VALID_VBIOS(bios)) {
- DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]);
+ dev_dbg(adev->dev, "VBIOS signature incorrect %x %x\n", bios[0],
+ bios[1]);
return false;
}
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
- DRM_INFO("Can't locate bios header\n");
+ dev_dbg(adev->dev, "Can't locate VBIOS header\n");
return false;
}
tmp = bios_header_start + 4;
if (size < tmp) {
- DRM_INFO("BIOS header is broken\n");
+ dev_dbg(adev->dev, "VBIOS header is broken\n");
return false;
}
if (!memcmp(bios + tmp, "ATOM", 4) ||
!memcmp(bios + tmp, "MOTA", 4)) {
- DRM_DEBUG("ATOMBIOS detected\n");
+ dev_dbg(adev->dev, "ATOMBIOS detected\n");
return true;
}
@@ -118,7 +120,7 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
- if (!check_atom_bios(adev->bios, size)) {
+ if (!check_atom_bios(adev, size)) {
kfree(adev->bios);
return false;
}
@@ -146,7 +148,7 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
- if (!check_atom_bios(adev->bios, size)) {
+ if (!check_atom_bios(adev, size)) {
kfree(adev->bios);
return false;
}
@@ -186,7 +188,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
/* read complete BIOS */
amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
- if (!check_atom_bios(adev->bios, len)) {
+ if (!check_atom_bios(adev, len)) {
kfree(adev->bios);
return false;
}
@@ -216,7 +218,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, romlen);
iounmap(bios);
- if (!check_atom_bios(adev->bios, romlen))
+ if (!check_atom_bios(adev, romlen))
goto free_bios;
adev->bios_size = romlen;
@@ -324,7 +326,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
break;
}
- if (!check_atom_bios(adev->bios, size)) {
+ if (!check_atom_bios(adev, size)) {
kfree(adev->bios);
return false;
}
@@ -389,7 +391,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
vhdr->ImageLength,
GFP_KERNEL);
- if (!check_atom_bios(adev->bios, vhdr->ImageLength)) {
+ if (!check_atom_bios(adev, vhdr->ImageLength)) {
kfree(adev->bios);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 16153d275d7a..68bce6a6d09d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -414,7 +414,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "%s", fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw,
+ AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
amdgpu_ucode_release(&adev->pm.fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5df21529b3b1..5cc5f59e3018 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1105,7 +1105,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
* We can't use gang submit on with reserved VMIDs when the VM changes
* can't be invalidated by more than one engine at the same time.
*/
- if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
+ if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) {
for (i = 0; i < p->gang_size; ++i) {
struct drm_sched_entity *entity = p->entities[i];
struct drm_gpu_scheduler *sched = entity->rq->sched;
@@ -1189,7 +1189,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (!bo)
continue;
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(bo, false);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a68338cb7b4a..49ca8c814455 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2095,6 +2095,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog)
amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm);
+ amdgpu_debugfs_vcn_sched_mask_init(adev);
amdgpu_debugfs_jpeg_sched_mask_init(adev);
amdgpu_debugfs_gfx_sched_mask_init(adev);
amdgpu_debugfs_compute_sched_mask_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cd4fac120834..36053b3d48b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -199,14 +199,16 @@ void amdgpu_set_init_level(struct amdgpu_device *adev,
}
static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data);
/**
* DOC: pcie_replay_count
*
* The amdgpu driver provides a sysfs API for reporting the total number
- * of PCIe replays (NAKs)
+ * of PCIe replays (NAKs).
* The file pcie_replay_count is used for this and returns the total
- * number of replays as a sum of the NAKs generated and NAKs received
+ * number of replays as a sum of the NAKs generated and NAKs received.
*/
static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
@@ -432,8 +434,8 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
* @dev: drm_device pointer
*
* Return:
- * 1 if the device supporte BACO;
- * 3 if the device support MACO (only works if BACO is supported)
+ * 1 if the device supports BACO;
+ * 3 if the device supports MACO (only works if BACO is supported)
* otherwise return 0.
*/
int amdgpu_device_supports_baco(struct drm_device *dev)
@@ -580,7 +582,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
}
/**
- * amdgpu_device_aper_access - access vram by vram aperature
+ * amdgpu_device_aper_access - access vram by vram aperture
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
@@ -671,7 +673,7 @@ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
* here is that the GPU reset is not running on another thread in parallel.
*
* For this we trylock the read side of the reset semaphore, if that succeeds
- * we know that the reset is not running in paralell.
+ * we know that the reset is not running in parallel.
*
* If the trylock fails we assert that we are either already holding the read
* side of the lock or are the reset thread itself and hold the write side of
@@ -1402,6 +1404,7 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
ret = amdgpu_atomfirmware_asic_init(adev, true);
@@ -1736,7 +1739,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
uint32_t fw_ver;
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
- /* force vPost if error occured */
+ /* force vPost if error occurred */
if (err)
return true;
@@ -2168,7 +2171,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2202,7 +2205,7 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2381,7 +2384,7 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
* the module parameter virtual_display. This feature provides a virtual
* display hardware on headless boards or in virtualized environments.
* This function parses and validates the configuration string specified by
- * the user and configues the virtual display configuration (number of
+ * the user and configures the virtual display configuration (number of
* virtual connectors, crtcs, etc.) specified.
*/
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
@@ -2444,7 +2447,7 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
*
* Parses the asic configuration parameters specified in the gpu info
- * firmware and makes them availale to the driver for use in configuring
+ * firmware and makes them available to the driver for use in configuring
* the asic.
* Returns 0 on success, -EINVAL on failure.
*/
@@ -2485,6 +2488,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_gpu_info.bin", chip_name);
if (err) {
dev_err(adev->dev,
@@ -2504,7 +2508,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
/*
- * Should be droped when DAL no longer needs it.
+ * Should be dropped when DAL no longer needs it.
*/
if (adev->asic_type == CHIP_NAVI12)
goto parse_soc_bounding_box;
@@ -3064,7 +3068,7 @@ init_failed:
*
* Writes a reset magic value to the gart pointer in VRAM. The driver calls
* this function before a GPU reset. If the value is retained after a
- * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
+ * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
*/
static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
{
@@ -3140,7 +3144,7 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
state);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
@@ -3177,7 +3181,7 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
- r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
state);
if (r) {
DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
@@ -3379,7 +3383,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
amdgpu_amdkfd_suspend(adev, false);
- /* Workaroud for ASICs need to disable SMC first */
+ /* Workaround for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
@@ -3481,7 +3485,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
WARN_ON_ONCE(adev->gfx.gfx_off_state);
WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
}
@@ -4309,7 +4313,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/*
* Reset domain needs to be present early, before XGMI hive discovered
- * (if any) and intitialized to use reset sem and in_gpu reset flag
+ * (if any) and initialized to use reset sem and in_gpu reset flag
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
@@ -4599,6 +4603,11 @@ fence_driver_init:
amdgpu_device_check_iommu_direct_map(adev);
+ adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
+ r = register_pm_notifier(&adev->pm_nb);
+ if (r)
+ goto failed;
+
return 0;
release_ras_con:
@@ -4663,6 +4672,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
drain_workqueue(adev->mman.bdev.wq);
adev->shutdown = true;
+ unregister_pm_notifier(&adev->pm_nb);
+
/* make sure IB test finished before entering exclusive mode
* to avoid preemption on IB test
*/
@@ -4781,8 +4792,8 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
{
int ret;
- /* No need to evict vram on APUs for suspend to ram or s2idle */
- if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
+ /* No need to evict vram on APUs unless going to S4 */
+ if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
return 0;
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
@@ -4795,6 +4806,41 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* Suspend & resume.
*/
/**
+ * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
+ * @nb: notifier block
+ * @mode: suspend mode
+ * @data: data
+ *
+ * This function is called when the system is about to suspend or hibernate.
+ * It is used to evict resources from the device before the system goes to
+ * sleep while there is still access to swap.
+ */
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data)
+{
+ struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
+ int r;
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ adev->in_s4 = true;
+ fallthrough;
+ case PM_SUSPEND_PREPARE:
+ r = amdgpu_device_evict_resources(adev);
+ /*
+ * This is considered non-fatal at this time because
+ * amdgpu_device_prepare() will also fatally evict resources.
+ * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
+ */
+ if (r)
+ drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
* amdgpu_device_prepare - prepare for device suspend
*
* @dev: drm dev pointer
@@ -4833,7 +4879,7 @@ int amdgpu_device_prepare(struct drm_device *dev)
return 0;
unprepare:
- adev->in_s0ix = adev->in_s3 = false;
+ adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
return r;
}
@@ -5184,7 +5230,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- amdgpu_ras_set_fed(adev, false);
+ amdgpu_ras_clear_err_state(adev);
amdgpu_irq_gpu_reset_resume_helper(adev);
/* some sw clean up VF needs to do before recover */
@@ -5241,16 +5287,18 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_has_job_running - check if there is any job in mirror list
+ * amdgpu_device_has_job_running - check if there is any unfinished job
*
* @adev: amdgpu_device pointer
*
- * check if there is any job in mirror list
+ * check if there is any job running on the device when guest driver receives
+ * FLR notification from host driver. If there are still jobs running, then
+ * the guest driver will not respond the FLR reset. Instead, let the job hit
+ * the timeout and guest driver then issue the reset request.
*/
bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
{
int i;
- struct drm_sched_job *job;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -5258,11 +5306,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
if (!amdgpu_ring_sched_ready(ring))
continue;
- spin_lock(&ring->sched.job_list_lock);
- job = list_first_entry_or_null(&ring->sched.pending_list,
- struct drm_sched_job, list);
- spin_unlock(&ring->sched.job_list_lock);
- if (job)
+ if (amdgpu_fence_count_emitted(ring))
return true;
}
return false;
@@ -5487,7 +5531,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_set_init_level(tmp_adev, init_level);
if (full_reset) {
/* post card */
- amdgpu_ras_set_fed(tmp_adev, false);
+ amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
dev_warn(tmp_adev->dev, "asic atom init failed!");
@@ -5821,6 +5865,18 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
/*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+ /*
* Special case: RAS triggered and full reset isn't supported
*/
need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
@@ -5903,7 +5959,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
/*
- * Mark these ASICs to be reseted as untracked first
+ * Mark these ASICs to be reset as untracked first
* And add them back after reset completed
*/
amdgpu_unregister_gpu_instance(tmp_adev);
@@ -6106,7 +6162,7 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
*
- * Fetchs and stores in the driver the PCIE capabilities (gen speed
+ * Fetches and stores in the driver the PCIE capabilities (gen speed
* and lanes) of the slot the device is in. Handles APUs and
* virtualized environments where PCIE config space may not be available.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1040204ac8b9..949d74eff294 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -104,7 +104,9 @@
#include "smuio_v13_0_6.h"
#include "smuio_v14_0_2.h"
#include "vcn_v5_0_0.h"
+#include "vcn_v5_0_1.h"
#include "jpeg_v5_0_0.h"
+#include "jpeg_v5_0_1.h"
#include "amdgpu_vpe.h"
#if defined(CONFIG_DRM_AMD_ISP)
@@ -1340,7 +1342,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
*/
if (adev->vcn.num_vcn_inst <
AMDGPU_MAX_VCN_INSTANCES) {
- adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
ip->revision & 0xc0;
adev->vcn.num_vcn_inst++;
adev->vcn.inst_mask |=
@@ -1705,7 +1707,7 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
* so this won't overflow.
*/
for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
- adev->vcn.vcn_codec_disable_mask[v] =
+ adev->vcn.inst[v].vcn_codec_disable_mask =
le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
}
break;
@@ -1836,6 +1838,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -1890,6 +1893,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -2013,6 +2017,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
@@ -2184,6 +2189,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -2238,6 +2244,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(4, 4, 2):
case IP_VERSION(4, 4, 5):
+ case IP_VERSION(4, 4, 4):
amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
break;
case IP_VERSION(5, 0, 0):
@@ -2361,6 +2368,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
break;
+ case IP_VERSION(5, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
@@ -2405,6 +2416,7 @@ static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
aqua_vanjaram_init_soc_config(adev);
break;
default:
@@ -2652,6 +2664,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->family = AMDGPU_FAMILY_AI;
break;
case IP_VERSION(9, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b119d27271c1..35c778426a7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -33,6 +33,7 @@
#include "soc15_common.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
+#include "bif/bif_4_1_d.h"
#include <asm/div64.h>
#include <linux/pci.h>
@@ -1788,3 +1789,82 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
return 0;
}
+/* panic_bo is set in amdgpu_dm_plane_get_scanout_buffer() and only used in amdgpu_dm_set_pixel()
+ * they are called from the panic handler, and protected by the drm_panic spinlock.
+ */
+static struct amdgpu_bo *panic_abo;
+
+/* Use the indirect MMIO to write each pixel to the GPU VRAM,
+ * This is a simplified version of amdgpu_device_mm_access()
+ */
+static void amdgpu_display_set_pixel(struct drm_scanout_buffer *sb,
+ unsigned int x,
+ unsigned int y,
+ u32 color)
+{
+ struct amdgpu_res_cursor cursor;
+ unsigned long offset;
+ struct amdgpu_bo *abo = panic_abo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ uint32_t tmp;
+
+ offset = x * 4 + y * sb->pitch[0];
+ amdgpu_res_first(abo->tbo.resource, offset, 4, &cursor);
+
+ tmp = cursor.start >> 31;
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t) cursor.start) | 0x80000000);
+ if (tmp != 0xffffffff)
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ WREG32_NO_KIQ(mmMM_DATA, color);
+}
+
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct amdgpu_bo *abo;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ if (!fb)
+ return -EINVAL;
+
+ DRM_DEBUG_KMS("Framebuffer %dx%d %p4cc\n", fb->width, fb->height, &fb->format->format);
+
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
+ if (!abo)
+ return -EINVAL;
+
+ sb->width = fb->width;
+ sb->height = fb->height;
+ /* Use the generic linear format, because tiling will be disabled in panic_flush() */
+ sb->format = drm_format_info(fb->format->format);
+ if (!sb->format)
+ return -EINVAL;
+
+ sb->pitch[0] = fb->pitches[0];
+
+ if (abo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) {
+ if (abo->tbo.resource->mem_type != TTM_PL_VRAM) {
+ drm_warn(plane->dev, "amdgpu panic, framebuffer not in VRAM\n");
+ return -EINVAL;
+ }
+ /* Only handle 32bits format, to simplify mmio access */
+ if (fb->format->cpp[0] != 4) {
+ drm_warn(plane->dev, "amdgpu panic, pixel format is not 32bits\n");
+ return -EINVAL;
+ }
+ sb->set_pixel = amdgpu_display_set_pixel;
+ panic_abo = abo;
+ return 0;
+ }
+ if (!abo->kmap.virtual &&
+ ttm_bo_kmap(&abo->tbo, 0, PFN_UP(abo->tbo.base.size), &abo->kmap)) {
+ drm_warn(plane->dev, "amdgpu bo map failed, panic won't be displayed\n");
+ return -ENOMEM;
+ }
+ if (abo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK)
+ iosys_map_set_vaddr_iomem(&sb->map[0], abo->kmap.virtual);
+ else
+ iosys_map_set_vaddr(&sb->map[0], abo->kmap.virtual);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 9d19940f73c8..dfa0d642ac16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -23,6 +23,8 @@
#ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__
+#include <drm/drm_panic.h>
+
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
@@ -49,4 +51,7 @@ amdgpu_lookup_format_info(u32 format, uint64_t modifier);
int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 8e81a83d37d8..9f627caedc3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -36,6 +36,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
#include <drm/amdgpu_drm.h>
#include <drm/ttm/ttm_tt.h>
#include <linux/dma-buf.h>
@@ -60,6 +61,8 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
+ amdgpu_vm_bo_update_shared(bo);
+
return 0;
}
@@ -345,7 +348,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
/* FIXME: This should be after the "if", but needs a fix to make sure
* DMABuf imports are initialized in the right VM list.
*/
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(bo, false);
if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 38686203bea6..492b09d84571 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -23,7 +23,7 @@
*/
#include <drm/amdgpu_drm.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem.h>
@@ -2552,7 +2552,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
- adev->in_s4 = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_s4 = false;
if (r)
@@ -2916,7 +2915,6 @@ static const struct drm_driver amdgpu_kms_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
@@ -2940,7 +2938,6 @@ const struct drm_driver amdgpu_partition_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index 5bc2cb661af7..2d86cc6f7f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -40,7 +40,6 @@
#define DRIVER_NAME "amdgpu"
#define DRIVER_DESC "AMD GPU"
-#define DRIVER_DATE "20150101"
extern const struct drm_driver amdgpu_partition_driver;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index df2cf5c33925..91d638098889 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -60,7 +60,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_mem_stats stats[__AMDGPU_PL_LAST + 1] = { };
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
ktime_t usage[AMDGPU_HW_IP_NUM];
const char *pl_name[] = {
[TTM_PL_VRAM] = "vram",
@@ -72,15 +72,8 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
[AMDGPU_PL_DOORBELL] = "doorbell",
};
unsigned int hw_ip, i;
- int ret;
-
- ret = amdgpu_bo_reserve(vm->root.bo, false);
- if (ret)
- return;
-
- amdgpu_vm_get_memory(vm, stats, ARRAY_SIZE(stats));
- amdgpu_bo_unreserve(vm->root.bo);
+ amdgpu_vm_get_memory(vm, stats);
amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
/*
@@ -114,9 +107,11 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
drm_printf(p, "amd-evicted-vram:\t%llu KiB\n",
stats[TTM_PL_VRAM].evicted/1024UL);
drm_printf(p, "amd-requested-vram:\t%llu KiB\n",
- stats[TTM_PL_VRAM].requested/1024UL);
+ (stats[TTM_PL_VRAM].drm.shared +
+ stats[TTM_PL_VRAM].drm.private) / 1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
- stats[TTM_PL_TT].requested/1024UL);
+ (stats[TTM_PL_TT].drm.shared +
+ stats[TTM_PL_TT].drm.private) / 1024UL);
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index ceb5163480f4..09c9194d5bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -384,7 +384,7 @@ int amdgpu_fru_sysfs_init(struct amdgpu_device *adev)
void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev)
{
- if (!is_fru_eeprom_supported(adev, NULL) || !adev->fru_info)
+ if (!adev->fru_info)
return;
sysfs_remove_files(&adev->dev->kobj, amdgpu_fru_attributes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
index bc58dca18035..98f3196599ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -32,7 +32,7 @@ struct amdgpu_fru_info {
char product_name[AMDGPU_PRODUCT_NAME_LEN];
char serial[20];
char manufacturer_name[32];
- char fru_id[32];
+ char fru_id[50];
};
int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 2d4b67175b55..328a1b963548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
+ return 0;
+
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
return 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 1a5df8b94661..69429df09477 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -42,6 +42,7 @@
#include "amdgpu_dma_buf.h"
#include "amdgpu_hmm.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
@@ -87,10 +88,8 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
- if (aobj) {
- amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
- }
+ amdgpu_hmm_unregister(aobj);
+ ttm_bo_put(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
@@ -179,6 +178,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
if (r)
return r;
+ amdgpu_vm_bo_update_shared(abo);
bo_va = amdgpu_vm_bo_find(vm, abo);
if (!bo_va)
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
@@ -252,6 +252,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
amdgpu_vm_bo_del(adev, bo_va);
+ amdgpu_vm_bo_update_shared(bo);
if (!amdgpu_vm_ready(vm))
goto out_unlock;
@@ -839,7 +840,6 @@ error:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
@@ -899,7 +899,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
- amdgpu_vm_bo_invalidate(adev, robj, true);
+ amdgpu_vm_bo_invalidate(robj, true);
amdgpu_bo_unreserve(robj);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 69a6b6dba0a5..784b03abb3a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -515,7 +515,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- if (!kiq_ring->sched.ready || adev->job_hang || amdgpu_in_reset(adev))
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
return 0;
spin_lock(&kiq->ring_lock);
@@ -567,7 +567,7 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- if (!adev->gfx.kiq[0].ring.sched.ready || adev->job_hang)
+ if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
return 0;
if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
@@ -806,7 +806,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
/* If going to s2idle, no need to wait */
if (adev->in_s0ix) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
- AMD_IP_BLOCK_TYPE_GFX, true))
+ AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
@@ -818,7 +818,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
if (adev->gfx.gfx_off_state &&
- !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+ !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
adev->gfx.gfx_off_state = false;
if (adev->gfx.funcs->init_spm_golden) {
@@ -1484,6 +1484,24 @@ static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
return 0;
}
+/**
+ * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * Provides the sysfs interface to manually run a cleaner shader, which is
+ * used to clear the GPU state between different tasks. Writing a value to the
+ * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
+ * The value written corresponds to the partition index on multi-partition
+ * devices. On single-partition devices, the value should be '0'.
+ *
+ * The cleaner shader clears the Local Data Store (LDS) and General Purpose
+ * Registers (GPRs) to ensure data isolation between GPU workloads.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -1532,6 +1550,19 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
return count;
}
+/**
+ * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer to store the output data
+ *
+ * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
+ * feature for each GPU partition. Reading from the 'enforce_isolation'
+ * sysfs file returns the isolation settings for all partitions, where '0'
+ * indicates disabled and '1' indicates enabled.
+ *
+ * Return: The number of bytes read from the sysfs file.
+ */
static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1555,6 +1586,20 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
return size;
}
+/**
+ * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * This function allows control over the 'enforce_isolation' feature, which
+ * serializes access to the graphics engine. Writing '1' or '0' to the
+ * 'enforce_isolation' sysfs file enables or disables process isolation for
+ * each partition. The input should specify the setting for all partitions.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1940,6 +1985,17 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
mutex_unlock(&adev->enforce_isolation_mutex);
}
+/**
+ * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
+ * @adev: amdgpu_device pointer
+ * @idx: Index of the GPU partition
+ *
+ * When kernel submissions come in, the jobs are given a time slice and once
+ * that time slice is up, if there are KFD user queues active, kernel
+ * submissions are blocked until KFD has had its time slice. Once the KFD time
+ * slice is up, KFD user queues are preempted and kernel submissions are
+ * unblocked and allowed to run again.
+ */
static void
amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
u32 idx)
@@ -1985,10 +2041,20 @@ amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
msleep(GFX_SLICE_PERIOD_MS);
}
+/**
+ * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring begin_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2007,15 +2073,28 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
}
+/**
+ * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring end_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2031,9 +2110,12 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
}
/*
@@ -2050,7 +2132,7 @@ static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
if (!adev)
return -ENODEV;
- mask = (1 << adev->gfx.num_gfx_rings) - 1;
+ mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
if ((val & mask) == 0)
return -EINVAL;
@@ -2078,7 +2160,7 @@ static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
ring = &adev->gfx.gfx_ring[i];
if (ring->sched.ready)
- mask |= 1 << i;
+ mask |= 1ULL << i;
}
*val = mask;
@@ -2120,7 +2202,7 @@ static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
if (!adev)
return -ENODEV;
- mask = (1 << adev->gfx.num_compute_rings) - 1;
+ mask = (1ULL << adev->gfx.num_compute_rings) - 1;
if ((val & mask) == 0)
return -EINVAL;
@@ -2149,7 +2231,7 @@ static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
ring = &adev->gfx.compute_ring[i];
if (ring->sched.ready)
- mask |= 1 << i;
+ mask |= 1ULL << i;
}
*val = mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 8b512dc28df8..e0bc37557d2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,16 +89,14 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/**
* amdgpu_ib_free - free an IB (Indirect Buffer)
*
- * @adev: amdgpu_device pointer
* @ib: IB object to free
* @f: the fence SA bo need wait on for the ib alloation
*
* Free an IB (all asics).
*/
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f)
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f)
{
- amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
+ amdgpu_sa_bo_free(&ib->sa_bo, f);
}
/**
@@ -193,8 +191,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
- (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
- amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
need_pipe_sync = true;
if (tmp)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f3b0aaf3ebc6..901f8b12c672 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -298,3 +298,9 @@ uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
dw2 = le32_to_cpu(ih->ring[ring_index + 2]);
return dw1 | ((u64)(dw2 & 0xffff) << 32);
}
+
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+{
+ return ih == &adev->irq.ih ? "ih" : ih == &adev->irq.ih_soft ? "sw ih" :
+ ih == &adev->irq.ih1 ? "ih1" : ih == &adev->irq.ih2 ? "ih2" : "unknown";
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 508f02eb0cf8..7d4395a5d8ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -110,4 +110,5 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
signed int offset);
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 263ce1811cc8..732744488b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -77,7 +77,8 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev)
sizeof(ucode_prefix));
/* read isp fw */
- r = amdgpu_ucode_request(adev, &adev->isp.fw, "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->isp.fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s.bin", ucode_prefix);
if (r) {
amdgpu_ucode_release(&adev->isp.fw);
return r;
@@ -128,13 +129,13 @@ static bool isp_is_idle(void *handle)
return true;
}
-static int isp_set_clockgating_state(void *handle,
+static int isp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int isp_set_powergating_state(void *handle,
+static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index a21c510c408e..100f04475943 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -102,8 +102,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_ENODEV;
}
- adev->job_hang = true;
-
/*
* Do the coredump immediately after a job timeout to get a very
* close dump/snapshot/representation of GPU's current error status
@@ -181,7 +179,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
- adev->job_hang = false;
drm_dev_exit(idx);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -197,11 +194,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!*job)
return -ENOMEM;
- /*
- * Initialize the scheduler to at least some ring so that we always
- * have a pointer to adev.
- */
- (*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
@@ -267,7 +259,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
f = NULL;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(NULL, &job->ibs[i], f);
+ amdgpu_ib_free(&job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
@@ -366,6 +358,13 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
+ /*
+ * The VM structure might be released after the VMID is
+ * assigned, we had multiple problems with people trying to use
+ * the VM pointer so better set it to NULL.
+ */
+ if (!fence)
+ job->vm = NULL;
}
return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 3eb4a4653fce..d9cb343a8708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -27,7 +27,8 @@
#include "amdgpu_ras.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
-#define AMDGPU_MAX_JPEG_RINGS 8
+#define AMDGPU_MAX_JPEG_RINGS 10
+#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 59ec20b07a6a..32b27a1658e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1610,10 +1610,12 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
}
- r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mes.bin", ucode_prefix);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6852d50caa89..96f4b8904e9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -41,6 +41,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_vram_mgr.h"
#include "amdgpu_vm.h"
+#include "amdgpu_dma_buf.h"
/**
* DOC: amdgpu_object
@@ -324,6 +325,9 @@ error_free:
*
* Allocates and pins a BO for kernel internal use.
*
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
* Returns:
@@ -347,6 +351,76 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
return 0;
}
+EXPORT_SYMBOL(amdgpu_bo_create_kernel);
+
+/**
+ * amdgpu_bo_create_isp_user - create user BO for isp
+ *
+ * @adev: amdgpu device object
+ * @dma_buf: DMABUF handle for isp buffer
+ * @domain: where to place it
+ * @bo: used to initialize BOs in structures
+ * @gpu_addr: GPU addr of the pinned BO
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate gpu_addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dma_buf, u32 domain, struct amdgpu_bo **bo,
+ u64 *gpu_addr)
+
+{
+ struct drm_gem_object *gem_obj;
+ int r;
+
+ gem_obj = amdgpu_gem_prime_import(&adev->ddev, dma_buf);
+ *bo = gem_to_amdgpu_bo(gem_obj);
+ if (!(*bo)) {
+ dev_err(adev->dev, "failed to get valid isp user bo\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_bo_reserve(*bo, false);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to reserve isp user bo\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_pin(*bo, domain);
+ if (r) {
+ dev_err(adev->dev, "(%d) isp user bo pin failed\n", r);
+ goto error_unreserve;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(*bo)->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", *bo);
+ goto error_unpin;
+ }
+
+ if (!WARN_ON(!gpu_addr))
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo);
+
+ amdgpu_bo_unreserve(*bo);
+
+ return 0;
+
+error_unpin:
+ amdgpu_bo_unpin(*bo);
+error_unreserve:
+ amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unref(bo);
+
+ return r;
+}
+EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
@@ -423,6 +497,9 @@ error:
* @cpu_addr: pointer to where the BO's CPU memory space address was stored
*
* unmaps and unpin a BO for kernel internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
*/
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr)
@@ -447,6 +524,30 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (cpu_addr)
*cpu_addr = NULL;
}
+EXPORT_SYMBOL(amdgpu_bo_free_kernel);
+
+/**
+ * amdgpu_bo_free_isp_user - free BO for isp use
+ *
+ * @bo: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
+{
+ if (bo == NULL)
+ return;
+
+ if (amdgpu_bo_reserve(bo, true) == 0) {
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+ }
+ amdgpu_bo_unref(&bo);
+}
+EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
/* Validate bo size is bit bigger than the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
@@ -1150,7 +1251,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo;
@@ -1158,7 +1258,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = ttm_to_amdgpu_bo(bo);
- amdgpu_vm_bo_invalidate(adev, abo, evict);
+ amdgpu_vm_bo_move(abo, new_mem, evict);
amdgpu_bo_kunmap(abo);
@@ -1171,75 +1271,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
old_mem ? old_mem->mem_type : -1);
}
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats,
- unsigned int sz)
-{
- const unsigned int domain_to_pl[] = {
- [ilog2(AMDGPU_GEM_DOMAIN_CPU)] = TTM_PL_SYSTEM,
- [ilog2(AMDGPU_GEM_DOMAIN_GTT)] = TTM_PL_TT,
- [ilog2(AMDGPU_GEM_DOMAIN_VRAM)] = TTM_PL_VRAM,
- [ilog2(AMDGPU_GEM_DOMAIN_GDS)] = AMDGPU_PL_GDS,
- [ilog2(AMDGPU_GEM_DOMAIN_GWS)] = AMDGPU_PL_GWS,
- [ilog2(AMDGPU_GEM_DOMAIN_OA)] = AMDGPU_PL_OA,
- [ilog2(AMDGPU_GEM_DOMAIN_DOORBELL)] = AMDGPU_PL_DOORBELL,
- };
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_resource *res = bo->tbo.resource;
- struct drm_gem_object *obj = &bo->tbo.base;
- uint64_t size = amdgpu_bo_size(bo);
- unsigned int type;
-
- if (!res) {
- /*
- * If no backing store use one of the preferred domain for basic
- * stats. We take the MSB since that should give a reasonable
- * view.
- */
- BUILD_BUG_ON(TTM_PL_VRAM < TTM_PL_TT ||
- TTM_PL_VRAM < TTM_PL_SYSTEM);
- type = fls(bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK);
- if (!type)
- return;
- type--;
- if (drm_WARN_ON_ONCE(&adev->ddev,
- type >= ARRAY_SIZE(domain_to_pl)))
- return;
- type = domain_to_pl[type];
- } else {
- type = res->mem_type;
- }
-
- if (drm_WARN_ON_ONCE(&adev->ddev, type >= sz))
- return;
-
- /* DRM stats common fields: */
-
- if (drm_gem_object_is_shared_for_memory_stats(obj))
- stats[type].drm.shared += size;
- else
- stats[type].drm.private += size;
-
- if (res) {
- stats[type].drm.resident += size;
-
- if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP))
- stats[type].drm.active += size;
- else if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
- stats[type].drm.purgeable += size;
- }
-
- /* amdgpu specific stats: */
-
- if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
- stats[TTM_PL_VRAM].requested += size;
- if (type != TTM_PL_VRAM)
- stats[TTM_PL_VRAM].evicted += size;
- } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
- stats[TTM_PL_TT].requested += size;
- }
-}
-
/**
* amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
@@ -1455,6 +1486,45 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_mem_stats_placement - bo placement for memory accounting
+ * @bo: the buffer object we should look at
+ *
+ * BO can have multiple preferred placements, to avoid double counting we want
+ * to file it under a single placement for memory stats.
+ * Luckily, if we take the highest set bit in preferred_domains the result is
+ * quite sensible.
+ *
+ * Returns:
+ * Which of the placements should the BO be accounted under.
+ */
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
+{
+ uint32_t domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
+
+ if (!domain)
+ return TTM_PL_SYSTEM;
+
+ switch (rounddown_pow_of_two(domain)) {
+ case AMDGPU_GEM_DOMAIN_CPU:
+ return TTM_PL_SYSTEM;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ return TTM_PL_TT;
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ return TTM_PL_VRAM;
+ case AMDGPU_GEM_DOMAIN_GDS:
+ return AMDGPU_PL_GDS;
+ case AMDGPU_GEM_DOMAIN_GWS:
+ return AMDGPU_PL_GWS;
+ case AMDGPU_GEM_DOMAIN_OA:
+ return AMDGPU_PL_OA;
+ case AMDGPU_GEM_DOMAIN_DOORBELL:
+ return AMDGPU_PL_DOORBELL;
+ default:
+ return TTM_PL_SYSTEM;
+ }
+}
+
+/**
* amdgpu_bo_get_preferred_domain - get preferred domain
* @adev: amdgpu device object
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index be6769852ece..375448627f7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -260,6 +260,10 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dbuf, u32 domain,
+ struct amdgpu_bo **bo,
+ u64 *gpu_addr);
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr);
@@ -271,6 +275,7 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
struct amdgpu_bo_vm **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
@@ -300,9 +305,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats,
- unsigned int size);
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);
@@ -337,8 +340,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct drm_suballoc **sa_bo,
unsigned int size);
-void amdgpu_sa_bo_free(struct amdgpu_device *adev,
- struct drm_suballoc **sa_bo,
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo,
struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 448f9e742983..babe94ade247 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -208,6 +208,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
psp->boot_time_tmr = false;
fallthrough;
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = false;
@@ -359,6 +360,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
int i;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
return false;
@@ -870,6 +872,7 @@ static bool psp_skip_tmr(struct psp_context *psp)
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return true;
default:
@@ -2264,7 +2267,8 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return -EINVAL;
if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
- ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
return -EINVAL;
ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
@@ -2385,6 +2389,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
+ if ((is_psp_fw_valid(psp->spdm_drv)) &&
+ (psp->funcs->bootloader_load_spdm_drv != NULL)) {
+ ret = psp_bootloader_load_spdm_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load spdm_drv failed!\n");
+ return ret;
+ }
+ }
+
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@@ -3007,10 +3020,7 @@ static int psp_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
mutex_lock(&adev->firmware.mutex);
- /*
- * This sequence is just used on hw_init only once, no need on
- * resume.
- */
+
ret = amdgpu_ucode_init_bo(adev);
if (ret)
goto failed;
@@ -3135,6 +3145,10 @@ static int psp_resume(struct amdgpu_ip_block *ip_block)
mutex_lock(&adev->firmware.mutex);
+ ret = amdgpu_ucode_init_bo(adev);
+ if (ret)
+ goto failed;
+
ret = psp_hw_start(psp);
if (ret)
goto failed;
@@ -3289,7 +3303,8 @@ int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_asd.bin", chip_name);
if (err)
goto out;
@@ -3311,7 +3326,8 @@ int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_toc.bin", chip_name);
if (err)
goto out;
@@ -3407,6 +3423,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ipkeymgr_drv.start_addr = ucode_start_addr;
break;
+ case PSP_FW_TYPE_PSP_SPDM_DRV:
+ psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->spdm_drv.start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
@@ -3474,7 +3496,8 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3750,7 +3773,8 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
struct amdgpu_device *adev = psp->adev;
int err;
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
@@ -3785,7 +3809,8 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s_cap.bin", chip_name);
if (err) {
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
@@ -3849,13 +3874,13 @@ int psp_config_sq_perfmon(struct psp_context *psp,
return ret;
}
-static int psp_set_clockgating_state(void *handle,
+static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int psp_set_powergating_state(void *handle,
+static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3867,10 +3892,12 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_ip_block *ip_block;
uint32_t fw_ver;
int ret;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_info(adev->dev, "PSP block is not ready yet\n.");
return -EBUSY;
}
@@ -3899,8 +3926,10 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
struct amdgpu_bo *fw_buf_bo = NULL;
uint64_t fw_pri_mc_addr;
void *fw_pri_cpu_addr;
+ struct amdgpu_ip_block *ip_block;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_err(adev->dev, "PSP block is not ready yet.");
return -EBUSY;
}
@@ -3908,7 +3937,8 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
if (!drm_dev_enter(ddev, &idx))
return -ENODEV;
- ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
+ ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s", buf);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 567cb1f924ca..8d5acc415d38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -80,6 +80,7 @@ enum psp_bootloader_cmd {
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
+ PSP_BL__LOAD_SPDMDRV = 0x20000000,
};
enum psp_ring_type {
@@ -120,6 +121,7 @@ struct psp_funcs {
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp);
+ int (*bootloader_load_spdm_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -343,6 +345,7 @@ struct psp_context {
struct psp_bin_desc dbg_drv;
struct psp_bin_desc ras_drv;
struct psp_bin_desc ipkeymgr_drv;
+ struct psp_bin_desc spdm_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -434,6 +437,9 @@ struct amdgpu_psp_funcs {
#define psp_bootloader_load_ipkeymgr_drv(psp) \
((psp)->funcs->bootloader_load_ipkeymgr_drv ? \
(psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0)
+#define psp_bootloader_load_spdm_drv(psp) \
+ ((psp)->funcs->bootloader_load_spdm_drv ? \
+ (psp)->funcs->bootloader_load_spdm_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 4c9fa24dd972..f0924aa3f4e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -36,6 +36,7 @@
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include "nbio_v4_3.h"
+#include "nbif_v6_3_1.h"
#include "nbio_v7_9.h"
#include "atom.h"
#include "amdgpu_reset.h"
@@ -192,7 +193,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
- err_data.err_addr_cnt);
+ err_data.err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, NULL);
}
@@ -2015,6 +2016,7 @@ static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = true;
break;
@@ -2156,6 +2158,16 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
/* Fatal error events are handled on host side */
if (amdgpu_sriov_vf(adev))
return;
+ /**
+ * If the current interrupt is caused by a non-fatal RAS error, skip
+ * check for fatal error. For fatal errors, FED status of all devices
+ * in XGMI hive gets set when the first device gets fatal error
+ * interrupt. The error gets propagated to other devices as well, so
+ * make sure to ack the interrupt regardless of FED status.
+ */
+ if (!amdgpu_ras_get_fed_status(adev) &&
+ amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
+ return;
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
@@ -2185,6 +2197,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
if (ret)
return;
+ amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
/* both query_poison_status and handle_poison_consumption are optional,
* but at least one of them should be implemented if we need poison
* consumption handler
@@ -2717,40 +2730,203 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
+{
+ struct ta_ras_query_address_input addr_in;
+ uint32_t socket = 0;
+ int ret = 0;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.socket_id = socket;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ /* tell RAS TA the node instance is not used */
+ addr_in.ma.node_inst = TA_RAS_INV_NODE;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+
+ return ret;
+}
+
+static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
+{
+ struct ta_ras_query_address_input addr_in;
+ uint32_t die_id, socket = 0;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
+
+ /* although die id is gotten from PA in nps1 mode, the id is
+ * fitable for any nps mode
+ */
+ if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
+ die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
+ else
+ return -EINVAL;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ addr_in.ma.umc_inst = bps->mcumc_id;
+ addr_in.ma.node_inst = die_id;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+ else
+ return -EINVAL;
+}
+
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- struct eeprom_table_record *bps, int pages)
+ struct eeprom_table_record *bps, int pages, bool from_rom)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
+ struct ras_err_data err_data;
+ struct eeprom_table_record *err_rec;
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras_context.ras->eeprom_control;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i;
+ uint32_t i, j, loop_cnt = 1;
+ bool find_pages_per_pa = false;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
+ if (from_rom) {
+ err_data.err_addr =
+ kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ err_rec = err_data.err_addr;
+ loop_cnt = adev->umc.retire_unit;
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ }
+
mutex_lock(&con->recovery_lock);
data = con->eh_data;
- if (!data)
- goto out;
+ if (!data) {
+ /* Returning 0 as the absence of eh_data is acceptable */
+ goto free;
+ }
for (i = 0; i < pages; i++) {
- if (amdgpu_ras_check_bad_page_unlock(con,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- continue;
+ if (from_rom &&
+ control->rec_type == AMDGPU_RAS_EEPROM_REC_MCA) {
+ if (!find_pages_per_pa) {
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
+ if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
+ /* may use old RAS TA, use PA to find pages in
+ * one row
+ */
+ if (amdgpu_umc_pages_in_a_row(adev, &err_data,
+ bps[i].retired_page <<
+ AMDGPU_GPU_PAGE_SHIFT)) {
+ ret = -EINVAL;
+ goto free;
+ } else {
+ find_pages_per_pa = true;
+ }
+ } else {
+ /* unsupported cases */
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+ }
+ } else {
+ if (amdgpu_umc_pages_in_a_row(adev, &err_data,
+ bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+ } else {
+ if (from_rom && !find_pages_per_pa) {
+ if (bps[i].retired_page & UMC_CHANNEL_IDX_V2) {
+ /* bad page in any NPS mode in eeprom */
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ } else {
+ /* legacy bad page in eeprom, generated only in
+ * NPS1 mode
+ */
+ if (amdgpu_ras_mca2pa(adev, &bps[i], &err_data)) {
+ /* old RAS TA or ASICs which don't support to
+ * convert addrss via mca address
+ */
+ if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
+ find_pages_per_pa = true;
+ err_rec = &bps[i];
+ loop_cnt = 1;
+ } else {
+ /* non-nps1 mode, old RAS TA
+ * can't support it
+ */
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+ }
+ }
- if (!data->space_left &&
- amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
- ret = -ENOMEM;
- goto out;
+ if (!find_pages_per_pa)
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ err_rec = &bps[i];
+ }
}
- amdgpu_ras_reserve_page(adev, bps[i].retired_page);
+ for (j = 0; j < loop_cnt; j++) {
+ if (amdgpu_ras_check_bad_page_unlock(con,
+ err_rec[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
+ if (!data->space_left &&
+ amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
+ ret = -ENOMEM;
+ goto free;
+ }
- memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
- data->count++;
- data->space_left--;
+ amdgpu_ras_reserve_page(adev, err_rec[j].retired_page);
+
+ memcpy(&data->bps[data->count], &(err_rec[j]),
+ sizeof(struct eeprom_table_record));
+ data->count++;
+ data->space_left--;
+ }
}
+
+free:
+ if (from_rom)
+ kfree(err_data.err_addr);
out:
mutex_unlock(&con->recovery_lock);
@@ -2768,7 +2944,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count;
+ int save_count, unit_num, bad_page_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -2780,19 +2956,32 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- save_count = data->count - control->ras_num_recs;
+ bad_page_num = control->ras_num_bad_pages;
+ save_count = data->count - bad_page_num;
mutex_unlock(&con->recovery_lock);
+ unit_num = save_count / adev->umc.retire_unit;
if (new_cnt)
- *new_cnt = save_count / adev->umc.retire_unit;
+ *new_cnt = unit_num;
/* only new entries are saved */
if (save_count > 0) {
- if (amdgpu_ras_eeprom_append(control,
- &data->bps[control->ras_num_recs],
- save_count)) {
- dev_err(adev->dev, "Failed to save EEPROM table data!");
- return -EIO;
+ if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[control->ras_num_recs],
+ save_count)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ } else {
+ for (i = 0; i < unit_num; i++) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[bad_page_num + i * adev->umc.retire_unit],
+ 1)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ }
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
@@ -2821,11 +3010,32 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
return -ENOMEM;
ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
- if (ret)
+ if (ret) {
dev_err(adev->dev, "Failed to load EEPROM table records!");
- else
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
+ } else {
+ if (control->ras_num_recs > 1 &&
+ adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ if ((bps[0].address == bps[1].address) &&
+ (bps[0].mem_channel == bps[1].mem_channel))
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
+ else
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+ }
+
+ ret = amdgpu_ras_eeprom_check(control);
+ if (ret)
+ goto out;
+
+ /* HW not usable */
+ if (amdgpu_ras_is_rma(adev)) {
+ ret = -EHWPOISON;
+ goto out;
+ }
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ }
+
+out:
kfree(bps);
return ret;
}
@@ -3205,31 +3415,36 @@ static int amdgpu_ras_page_retirement_thread(void *param)
int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
int ret;
if (!con || amdgpu_sriov_vf(adev))
return 0;
- ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
-
+ control = &con->eeprom_control;
+ ret = amdgpu_ras_eeprom_init(control);
if (ret)
return ret;
- /* HW not usable */
- if (amdgpu_ras_is_rma(adev))
- return -EHWPOISON;
+ if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
+
+ /* default status is MCA storage */
+ if (control->ras_num_recs <= 1 &&
+ adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
- if (con->eeprom_control.ras_num_recs) {
+ if (control->ras_num_recs) {
ret = amdgpu_ras_load_bad_pages(adev);
if (ret)
return ret;
amdgpu_dpm_send_hbm_bad_pages_num(
- adev, con->eeprom_control.ras_num_recs);
+ adev, control->ras_num_bad_pages);
if (con->update_channel_flag == true) {
amdgpu_dpm_send_hbm_bad_channel_flag(
- adev, con->eeprom_control.bad_channel_bitmap);
+ adev, control->bad_channel_bitmap);
con->update_channel_flag = false;
}
}
@@ -3366,6 +3581,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return true;
default:
@@ -3378,7 +3594,9 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(14, 0, 3):
return true;
default:
return false;
@@ -3629,6 +3847,7 @@ static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
break;
@@ -3704,7 +3923,19 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
* check DF RAS */
adev->nbio.ras = &nbio_v4_3_ras;
break;
+ case IP_VERSION(6, 3, 1):
+ if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
+ /* unlike other generation of nbio ras,
+ * nbif v6_3_1 only support fatal error interrupt
+ * to inform software that DF is freezed due to
+ * system fatal error event. driver should not
+ * enable nbio ras in such case. Instead,
+ * check DF RAS
+ */
+ adev->nbio.ras = &nbif_v6_3_1_ras;
+ break;
case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
if (!adev->gmc.is_app_apu)
adev->nbio.ras = &nbio_v7_9_ras;
break;
@@ -4083,7 +4314,7 @@ bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
if (!ras)
return false;
- return atomic_read(&ras->fed);
+ return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
}
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
@@ -4091,8 +4322,48 @@ void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
struct amdgpu_ras *ras;
ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (status)
+ set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ else
+ clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ }
+}
+
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras)
+ ras->ras_err_state = 0;
+}
+
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
if (ras)
- atomic_set(&ras->fed, !!status);
+ set_bit(block, &ras->ras_err_state);
+}
+
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (block == AMDGPU_RAS_BLOCK__ANY)
+ return (ras->ras_err_state != 0);
+ else
+ return test_bit(block, &ras->ras_err_state) ||
+ test_bit(AMDGPU_RAS_BLOCK__LAST,
+ &ras->ras_err_state);
+ }
+
+ return false;
}
static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6db772ecfee4..82db986c36a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -99,7 +99,8 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__IH,
AMDGPU_RAS_BLOCK__MPIO,
- AMDGPU_RAS_BLOCK__LAST
+ AMDGPU_RAS_BLOCK__LAST,
+ AMDGPU_RAS_BLOCK__ANY = -1
};
enum amdgpu_ras_mca_block {
@@ -482,6 +483,8 @@ struct ras_ecc_err {
uint64_t ipid;
uint64_t addr;
uint64_t pa_pfn;
+ /* save global channel index across all UMC instances */
+ uint32_t channel_idx;
struct ras_err_pages err_pages;
};
@@ -558,8 +561,8 @@ struct amdgpu_ras {
struct ras_ecc_log_info umc_ecc_log;
struct delayed_work page_retirement_dwork;
- /* Fatal error detected flag */
- atomic_t fed;
+ /* ras errors detected */
+ unsigned long ras_err_state;
/* RAS event manager */
struct ras_event_manager __event_mgr;
@@ -750,7 +753,7 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- struct eeprom_table_record *bps, int pages);
+ struct eeprom_table_record *bps, int pages, bool from_rom);
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
unsigned long *new_cnt);
@@ -952,6 +955,10 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev);
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block);
u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type);
int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index f28f6b4ba765..52c16bfeccaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -470,9 +470,10 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
res = __write_table_ras_info(control);
control->ras_num_recs = 0;
+ control->ras_num_bad_pages = 0;
control->ras_fri = 0;
- amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
control->bad_channel_bitmap = 0;
amdgpu_dpm_send_hbm_bad_channel_flag(adev, control->bad_channel_bitmap);
@@ -559,7 +560,7 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
if (amdgpu_bad_page_threshold == -1) {
dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
- con->eeprom_control.ras_num_recs, con->bad_page_cnt_threshold);
+ con->eeprom_control.ras_num_bad_pages, con->bad_page_cnt_threshold);
dev_warn(adev->dev,
"But GPU can be operated due to bad_page_threshold = -1.\n");
return false;
@@ -621,6 +622,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(to_amdgpu_device(control));
+ struct amdgpu_device *adev = to_amdgpu_device(control);
u32 a, b, i;
u8 *buf, *pp;
int res;
@@ -723,6 +725,12 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
control->ras_num_recs = 1 + (control->ras_max_record_count + b
- control->ras_fri)
% control->ras_max_record_count;
+
+ if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
+ control->ras_num_bad_pages = control->ras_num_recs;
+ else
+ control->ras_num_bad_pages =
+ control->ras_num_recs * adev->umc.retire_unit;
Out:
kfree(buf);
return res;
@@ -740,10 +748,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
/* Modify the header if it exceeds.
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->ras_num_recs >= ras->bad_page_cnt_threshold) {
+ control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) {
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) {
control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
@@ -798,9 +806,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
*/
if (amdgpu_bad_page_threshold != 0 &&
control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
- control->ras_num_recs < ras->bad_page_cnt_threshold)
+ control->ras_num_bad_pages < ras->bad_page_cnt_threshold)
control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
- control->ras_num_recs) * 100) /
+ control->ras_num_bad_pages) * 100) /
ras->bad_page_cnt_threshold;
/* Recalc the checksum.
@@ -841,7 +849,7 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
+ int res, i;
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -855,6 +863,10 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
return -EINVAL;
}
+ /* set the new channel index flag */
+ for (i = 0; i < num; i++)
+ record[i].retired_page |= UMC_CHANNEL_IDX_V2;
+
mutex_lock(&control->ras_tbl_mutex);
res = amdgpu_ras_eeprom_append_table(control, record, num);
@@ -864,6 +876,11 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
amdgpu_ras_debugfs_set_ret_size(control);
mutex_unlock(&control->ras_tbl_mutex);
+
+ /* clear channel index flag, the flag is only saved on eeprom */
+ for (i = 0; i < num; i++)
+ record[i].retired_page &= ~UMC_CHANNEL_IDX_V2;
+
return res;
}
@@ -1373,9 +1390,35 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
}
control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ return 0;
+}
+
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.ras_eeprom_i2c_bus || !adev->pm.ras_eeprom_i2c_bus->algo)
+ return -ENOENT;
+
+ if (!__get_eeprom_i2c_addr(adev, control))
+ return -EINVAL;
+
+ if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
+ control->ras_num_bad_pages = control->ras_num_recs;
+ else
+ control->ras_num_bad_pages =
+ control->ras_num_recs * adev->umc.retire_unit;
+
if (hdr->header == RAS_TABLE_HDR_VAL) {
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->ras_num_recs);
+ control->ras_num_bad_pages);
if (hdr->version == RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
@@ -1390,9 +1433,9 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
/* Warn if we are at 90% of the threshold or above
*/
- if (10 * control->ras_num_recs >= 9 * ras->bad_page_cnt_threshold)
+ if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold)
dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
- control->ras_num_recs,
+ control->ras_num_bad_pages,
ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
@@ -1403,10 +1446,12 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
}
res = __verify_ras_table_checksum(control);
- if (res)
- DRM_ERROR("RAS Table incorrect checksum or error:%d\n",
+ if (res) {
+ dev_err(adev->dev, "RAS Table incorrect checksum or error:%d\n",
res);
- if (ras->bad_page_cnt_threshold > control->ras_num_recs) {
+ return -EINVAL;
+ }
+ if (ras->bad_page_cnt_threshold > control->ras_num_bad_pages) {
/* This means that, the threshold was increased since
* the last time the system was booted, and now,
* ras->bad_page_cnt_threshold - control->num_recs > 0,
@@ -1416,13 +1461,13 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
dev_info(adev->dev,
"records:%d threshold:%d, resetting "
"RAS table header signature",
- control->ras_num_recs,
+ control->ras_num_bad_pages,
ras->bad_page_cnt_threshold);
res = amdgpu_ras_eeprom_correct_header_tag(control,
RAS_TABLE_HDR_VAL);
} else {
dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
if (amdgpu_bad_page_threshold == -1) {
dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
res = 0;
@@ -1431,7 +1476,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
dev_err(adev->dev,
"RAS records:%d exceed threshold:%d, "
"GPU will not be initialized. Replace this GPU or increase the threshold",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
}
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index b9ebda577797..81d55cb7b397 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -43,6 +43,19 @@ enum amdgpu_ras_eeprom_err_type {
AMDGPU_RAS_EEPROM_ERR_COUNT,
};
+/*
+ * one UMC MCA address could map to multiply physical address (PA),
+ * such as 1:16, we use eeprom_table_record.address to store MCA
+ * address and use eeprom_table_record.retired_page to save PA.
+ *
+ * AMDGPU_RAS_EEPROM_REC_PA: one record store one PA
+ * AMDGPU_RAS_EEPROM_REC_MCA: one record store one MCA address
+ */
+enum amdgpu_ras_eeprom_rec_type {
+ AMDGPU_RAS_EEPROM_REC_PA,
+ AMDGPU_RAS_EEPROM_REC_MCA,
+};
+
struct amdgpu_ras_eeprom_table_header {
uint32_t header;
uint32_t version;
@@ -82,6 +95,11 @@ struct amdgpu_ras_eeprom_control {
*/
u32 ras_num_recs;
+ /* the bad page number is ras_num_recs or
+ * ras_num_recs * umc.retire_unit
+ */
+ u32 ras_num_bad_pages;
+
/* First record index to read, 0-based.
* Range is [0, num_recs-1]. This is
* an absolute index, starting right after
@@ -102,6 +120,7 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
+ enum amdgpu_ras_eeprom_rec_type rec_type;
};
/*
@@ -145,6 +164,8 @@ uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *co
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index a0acb65f4b40..dabfbdf6f1ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -183,6 +183,7 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_init(adev);
break;
@@ -206,6 +207,7 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_fini(adev);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 36fc9578c53c..dee5a1b4e572 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -462,8 +462,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size,
enum amdgpu_ib_pool_type pool,
struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f);
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, struct amdgpu_job *job,
struct dma_fence **f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 10df731998b2..39070b2a4c04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -93,8 +93,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
return 0;
}
-void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo,
- struct dma_fence *fence)
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo, struct dma_fence *fence)
{
if (sa_bo == NULL || *sa_bo == NULL) {
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 113f0d242618..174badca27e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -219,9 +219,11 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
if (instance == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s%d.bin", ucode_prefix, instance);
if (err)
goto out;
@@ -261,6 +263,8 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 2) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 4, 4) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 5)) &&
adev->firmware.load_type ==
AMDGPU_FW_LOAD_PSP &&
@@ -358,13 +362,13 @@ static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
if (!adev)
return -ENODEV;
- mask = (1 << adev->sdma.num_instances) - 1;
+ mask = BIT_ULL(adev->sdma.num_instances) - 1;
if ((val & mask) == 0)
return -EINVAL;
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
- if (val & (1 << i))
+ if (val & BIT_ULL(i))
ring->sched.ready = true;
else
ring->sched.ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c8180cad0abd..ff286940ab43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1762,7 +1762,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
if (!adev->bios &&
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
reserve_size = max(reserve_size, (uint32_t)280 << 20);
else if (!reserve_size)
reserve_size = DISCOVERY_TMR_OFFSET;
@@ -2065,6 +2066,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 2852a6064c9a..461fb8090ae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -26,15 +26,15 @@
#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
+#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
-#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
-#define __AMDGPU_PL_LAST (TTM_PL_PRIV + 4)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 4c7b53648a50..cf700824b960 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -1434,6 +1434,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
*
* @adev: amdgpu device
* @fw: pointer to load firmware to
+ * @required: whether the firmware is required
* @fmt: firmware name format string
* @...: variable arguments
*
@@ -1442,7 +1443,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
* the error code to -ENODEV, so that early_init functions will fail to load.
*/
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fmt, ...)
+ enum amdgpu_ucode_required required, const char *fmt, ...)
{
char fname[AMDGPU_UCODE_NAME_MAX];
va_list ap;
@@ -1456,16 +1457,24 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
return -EOVERFLOW;
}
- r = request_firmware(fw, fname, adev->dev);
+ if (required == AMDGPU_UCODE_REQUIRED)
+ r = request_firmware(fw, fname, adev->dev);
+ else {
+ r = firmware_request_nowarn(fw, fname, adev->dev);
+ if (r)
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fname);
+ }
if (r)
return -ENODEV;
r = amdgpu_ucode_validate(*fw);
- if (r) {
+ if (r)
+ /*
+ * The amdgpu_ucode_request() should be paired with amdgpu_ucode_release()
+ * regardless of success/failure, and the amdgpu_ucode_release() takes care of
+ * firmware release and need to avoid redundant release FW operation here.
+ */
dev_dbg(adev->dev, "\"%s\" failed to validate\n", fname);
- release_firmware(*fw);
- *fw = NULL;
- }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4150ec0aa10d..4eedd92f000b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -126,6 +126,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_DBG_DRV,
PSP_FW_TYPE_PSP_RAS_DRV,
PSP_FW_TYPE_PSP_IPKEYMGR_DRV,
+ PSP_FW_TYPE_PSP_SPDM_DRV,
PSP_FW_TYPE_MAX_INDEX,
};
@@ -551,6 +552,11 @@ enum amdgpu_firmware_load_type {
AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO,
};
+enum amdgpu_ucode_required {
+ AMDGPU_UCODE_OPTIONAL,
+ AMDGPU_UCODE_REQUIRED,
+};
+
/* conform to smu_ucode_xfer_cz.h */
#define AMDGPU_SDMA0_UCODE_LOADED 0x00000001
#define AMDGPU_SDMA1_UCODE_LOADED 0x00000002
@@ -604,9 +610,9 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
-__printf(3, 4)
+__printf(4, 5)
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fmt, ...);
+ enum amdgpu_ucode_required required, const char *fmt, ...);
void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 896f3609b0ee..eafe20d8fe0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -78,7 +78,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
- err_data.err_addr_cnt);
+ err_data.err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, NULL);
}
@@ -166,10 +166,11 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt);
+ err_data->err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, &err_count);
- amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev,
+ con->eeprom_control.ras_num_bad_pages);
if (con->update_channel_flag == true) {
amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
@@ -444,3 +445,77 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr)
+{
+ struct ta_ras_query_address_output addr_out;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ addr_out.pa.pa = pa_addr;
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data, NULL,
+ &addr_out, false);
+ else
+ return -EINVAL;
+}
+
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len)
+{
+ int i, ret;
+ struct ras_err_data err_data;
+
+ err_data.err_addr = kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n");
+ return 0;
+ }
+
+ ret = amdgpu_umc_pages_in_a_row(adev, &err_data, pa_addr);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ if (i >= len)
+ goto out;
+
+ pfns[i] = err_data.err_addr[i].retired_page;
+ }
+ ret = i;
+
+out:
+ kfree(err_data.err_addr);
+ return ret;
+}
+
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr)
+{
+ struct ta_ras_query_address_input addr_in;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch;
+ addr_in.ma.umc_inst = umc;
+ addr_in.ma.node_inst = node;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ ret = adev->umc.ras->convert_ras_err_addr(adev, NULL, &addr_in,
+ addr_out, dump_addr);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index ce4179db2a6d..a4a7e61817aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -54,6 +54,22 @@
/* Page retirement tag */
#define UMC_ECC_NEW_DETECTED_TAG 0x1
+/*
+ * a flag to indicate v2 of channel index stored in eeprom
+ *
+ * v1 (legacy way): store channel index within a umc instance in eeprom
+ * range in UMC v12: 0 ~ 7
+ * v2: store global channel index in eeprom
+ * range in UMC v12: 0 ~ 127
+ *
+ * NOTE: it's better to store it in eeprom_table_record.mem_channel,
+ * but there is only 8 bits in mem_channel, and the channel number may
+ * increase in the future, we decide to save it in
+ * eeprom_table_record.retired_page. retired_page is useless in v2,
+ * we depend on eeprom_table_record.address instead of retired_page in v2.
+ * Only 48 bits are saved on eeprom, use bit 47 here.
+ */
+#define UMC_CHANNEL_IDX_V2 BIT_ULL(47)
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -70,6 +86,13 @@ struct amdgpu_umc_ras {
enum amdgpu_mca_error_type type, void *ras_error_status);
int (*update_ecc_status)(struct amdgpu_device *adev,
uint64_t status, uint64_t ipid, uint64_t addr);
+ int (*convert_ras_err_addr)(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out,
+ bool dump_addr);
+ uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
+ uint64_t mca_addr, uint64_t retired_page);
};
struct amdgpu_umc_funcs {
@@ -134,4 +157,12 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
void *ras_error_status);
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr);
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len);
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index bd2d3863c3ed..dde15c6a96e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -587,7 +587,8 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
break;
}
- r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (r) {
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 65bb26215e86..74758b5ffc6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,7 +260,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->uvd.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 599d3ca4e0ef..b9060bcd4806 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,7 +158,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->vce.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
@@ -503,7 +503,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
- amdgpu_ib_free(ring->adev, &ib_msg, f);
+ amdgpu_ib_free(&ib_msg, f);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 3e94c3ba1ba2..83faf6e6788a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2024 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -62,6 +62,7 @@
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
+#define FIRMWARE_VCN5_0_1 "amdgpu/vcn_5_0_1.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -88,6 +89,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
+MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -99,11 +101,15 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
- r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s_%d.bin", ucode_prefix, i);
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_%d.bin", ucode_prefix, i);
else
- r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (r) {
- amdgpu_ucode_release(&adev->vcn.fw[i]);
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
return r;
}
}
@@ -151,7 +157,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
adev->vcn.using_unified_queue =
amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
- hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[0].fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -270,7 +276,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
- amdgpu_ucode_release(&adev->vcn.fw[j]);
+ amdgpu_ucode_release(&adev->vcn.inst[j].fw);
}
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
@@ -282,7 +288,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
- int vcn_config = adev->vcn.vcn_config[vcn_instance];
+ int vcn_config = adev->vcn.inst[vcn_instance].vcn_config;
if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
ret = true;
@@ -362,12 +368,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
const struct common_firmware_header *hdr;
unsigned int offset;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->vcn.inst[i].cpu_addr,
- adev->vcn.fw[i]->data + offset,
+ adev->vcn.inst[i].fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
@@ -580,7 +586,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -591,7 +597,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
return r;
}
@@ -773,7 +779,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -784,7 +790,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
return r;
}
@@ -1014,7 +1020,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
error:
- amdgpu_ib_free(adev, &ib, fence);
+ amdgpu_ib_free(&ib, fence);
dma_fence_put(fence);
return r;
@@ -1025,7 +1031,8 @@ int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
long r;
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) &&
+ (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) {
r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
if (r)
goto error;
@@ -1063,7 +1070,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
/* currently only support 2 FW instances */
if (i >= 2) {
dev_info(adev->dev, "More then 2 VCN FW instances!\n");
@@ -1071,12 +1078,14 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
}
idx = AMDGPU_UCODE_ID_VCN + i;
adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
+ adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(4, 0, 3))
+ IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, UVD_HWIP, 0) ==
+ IP_VERSION(5, 0, 1))
break;
}
}
@@ -1320,3 +1329,71 @@ void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
}
}
+
+/*
+ * debugfs to enable/disable vcn job submission to specific core or
+ * instance. It is created only if the queue type is unified.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->vcn.num_vcn_inst) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (val & (1ULL << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops,
+ amdgpu_debugfs_vcn_sched_mask_get,
+ amdgpu_debugfs_vcn_sched_mask_set, "%llx\n");
+#endif
+
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.using_unified_queue)
+ return;
+ sprintf(name, "amdgpu_vcn_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_vcn_sched_mask_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 1e32311c1dff..adaf4388ad28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -163,20 +163,30 @@
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
uint32_t internal_reg_offset, addr; \
- bool video_range, aon_range; \
+ bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
addr <<= 2; \
video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \
+ video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS + 0x2600))))); \
aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \
+ aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS + 0x600))))); \
if (video_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \
(VCN_VID_IP_ADDRESS)); \
else if (aon_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \
(VCN_AON_IP_ADDRESS)); \
+ else if (video1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS) + \
+ (VCN_VID_IP_ADDRESS)); \
+ else if (aon1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS) + \
+ (VCN_AON_IP_ADDRESS)); \
else \
internal_reg_offset = (0xFFFFF & addr); \
\
@@ -297,6 +307,9 @@ struct amdgpu_vcn_inst {
atomic_t dpg_enc_submission_cnt;
struct amdgpu_vcn_fw_shared fw_shared;
uint8_t aid_id;
+ const struct firmware *fw; /* VCN firmware */
+ uint8_t vcn_config;
+ uint32_t vcn_codec_disable_mask;
};
struct amdgpu_vcn_ras {
@@ -306,15 +319,12 @@ struct amdgpu_vcn_ras {
struct amdgpu_vcn {
unsigned fw_version;
struct delayed_work idle_work;
- const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
bool indirect_sram;
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
- uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES];
- uint32_t vcn_codec_disable_mask[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
struct mutex vcn_pg_lock;
struct mutex vcn1_jpeg1_workaround;
@@ -523,5 +533,6 @@ int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev);
int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index c704e9803e11..0af469ec6fcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -1263,12 +1263,10 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
return 0;
- tmp = kmalloc(used_size, GFP_KERNEL);
+ tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
- memcpy(tmp, &host_telemetry->body.error_count, used_size);
-
if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 8bf28d336807..03308261f894 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -632,13 +632,13 @@ static bool amdgpu_vkms_is_idle(void *handle)
return true;
}
-static int amdgpu_vkms_set_clockgating_state(void *handle,
+static int amdgpu_vkms_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int amdgpu_vkms_set_powergating_state(void *handle,
+static int amdgpu_vkms_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c9c48b782ec1..5c07777d3239 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
#include "amdgpu.h"
+#include "amdgpu_vm.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
@@ -311,6 +312,111 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
}
/**
+ * amdgpu_vm_update_shared - helper to update shared memory stat
+ * @base: base structure for tracking BO usage in a VM
+ *
+ * Takes the vm status_lock and updates the shared memory stat. If the basic
+ * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
+ * as well.
+ */
+static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
+{
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ uint64_t size = amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
+ bool shared;
+
+ spin_lock(&vm->status_lock);
+ shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ if (base->shared != shared) {
+ base->shared = shared;
+ if (shared) {
+ vm->stats[bo_memtype].drm.shared += size;
+ vm->stats[bo_memtype].drm.private -= size;
+ } else {
+ vm->stats[bo_memtype].drm.shared -= size;
+ vm->stats[bo_memtype].drm.private += size;
+ }
+ }
+ spin_unlock(&vm->status_lock);
+}
+
+/**
+ * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
+ * @bo: amdgpu buffer object
+ *
+ * Update the per VM stats for all the vm if needed from private to shared or
+ * vice versa.
+ */
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
+{
+ struct amdgpu_vm_bo_base *base;
+
+ for (base = bo->vm_bo; base; base = base->next)
+ amdgpu_vm_update_shared(base);
+}
+
+/**
+ * amdgpu_vm_update_stats_locked - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
+ *
+ * Caller need to have the vm status_lock held. Useful for when multiple update
+ * need to happen at the same time.
+ */
+static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
+{
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ int64_t size = sign * amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
+
+ /* For drm-total- and drm-shared-, BO are accounted by their preferred
+ * placement, see also amdgpu_bo_mem_stats_placement.
+ */
+ if (base->shared)
+ vm->stats[bo_memtype].drm.shared += size;
+ else
+ vm->stats[bo_memtype].drm.private += size;
+
+ if (res && res->mem_type < __AMDGPU_PL_NUM) {
+ uint32_t res_memtype = res->mem_type;
+
+ vm->stats[res_memtype].drm.resident += size;
+ /* BO only count as purgeable if it is resident,
+ * since otherwise there's nothing to purge.
+ */
+ if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
+ vm->stats[res_memtype].drm.purgeable += size;
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
+ vm->stats[bo_memtype].evicted += size;
+ }
+}
+
+/**
+ * amdgpu_vm_update_stats - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
+ *
+ * Updates the basic memory stat when bo is added/deleted/moved.
+ */
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
+{
+ struct amdgpu_vm *vm = base->vm;
+
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(base, res, sign);
+ spin_unlock(&vm->status_lock);
+}
+
+/**
* amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
*
* @base: base structure for tracking BO usage in a VM
@@ -333,6 +439,11 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
+ spin_lock(&vm->status_lock);
+ base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
+ spin_unlock(&vm->status_lock);
+
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@@ -1083,53 +1194,11 @@ error_free:
return r;
}
-static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
- struct amdgpu_mem_stats *stats,
- unsigned int size)
-{
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct amdgpu_bo *bo = bo_va->base.bo;
-
- if (!bo)
- return;
-
- /*
- * For now ignore BOs which are currently locked and potentially
- * changing their location.
- */
- if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
- !dma_resv_trylock(bo->tbo.base.resv))
- return;
-
- amdgpu_bo_get_memory(bo, stats, size);
- if (!amdgpu_vm_is_bo_always_valid(vm, bo))
- dma_resv_unlock(bo->tbo.base.resv);
-}
-
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats,
- unsigned int size)
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- struct amdgpu_bo_va *bo_va, *tmp;
-
spin_lock(&vm->status_lock);
- list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
+ memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
spin_unlock(&vm->status_lock);
}
@@ -2075,6 +2144,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
if (*base != &bo_va->base)
continue;
+ amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
*base = bo_va->base.next;
break;
}
@@ -2143,14 +2213,12 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
/**
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
- * @adev: amdgpu_device pointer
* @bo: amdgpu buffer object
* @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo, bool evicted)
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
{
struct amdgpu_vm_bo_base *bo_base;
@@ -2176,6 +2244,32 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_bo_move - handle BO move
+ *
+ * @bo: amdgpu buffer object
+ * @new_mem: the new placement of the BO move
+ * @evicted: is the BO evicted
+ *
+ * Update the memory stats for the new placement and mark @bo as invalid.
+ */
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted)
+{
+ struct amdgpu_vm_bo_base *bo_base;
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
+ amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
+ spin_unlock(&vm->status_lock);
+ }
+
+ amdgpu_vm_bo_invalidate(bo, evicted);
+}
+
+/**
* amdgpu_vm_get_block_size - calculate VM page table size as power of two
*
* @vm_size: VM size
@@ -2594,6 +2688,16 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->is_compute_context = false;
}
+static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
+{
+ for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
+ if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
+ vm->stats[i].evicted == 0))
+ return false;
+ }
+ return true;
+}
+
/**
* amdgpu_vm_fini - tear down a vm instance
*
@@ -2617,7 +2721,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- amdgpu_vm_put_task_info(vm->task_info);
amdgpu_vm_set_pasid(adev, vm, 0);
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
@@ -2666,6 +2769,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
+
+ if (!amdgpu_vm_stats_is_zero(vm)) {
+ struct amdgpu_task_info *ti = vm->task_info;
+
+ dev_warn(adev->dev,
+ "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
+ ti->process_name, ti->pid, ti->task_name, ti->tgid);
+ }
+
+ amdgpu_vm_put_task_info(vm->task_info);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 5d119ac26c4f..a3e128e373bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -35,6 +35,7 @@
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_ids.h"
+#include "amdgpu_ttm.h"
struct drm_exec;
@@ -202,9 +203,13 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
- /* protected by spinlock */
+ /* protected by vm status_lock */
struct list_head vm_status;
+ /* if the bo is counted as shared in mem stats
+ * protected by vm status_lock */
+ bool shared;
+
/* protected by the BO being reserved */
bool moved;
};
@@ -324,10 +329,7 @@ struct amdgpu_vm_fault_info {
struct amdgpu_mem_stats {
struct drm_memory_stats drm;
- /* buffers that requested this placement */
- uint64_t requested;
- /* buffers that requested this placement
- * but are currently evicted */
+ /* buffers that requested this placement but are currently evicted */
uint64_t evicted;
};
@@ -345,6 +347,9 @@ struct amdgpu_vm {
/* Lock to protect vm_bo add/del/move on all lists of vm */
spinlock_t status_lock;
+ /* Memory statistics for this vm, protected by status_lock */
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
@@ -524,8 +529,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo, bool evicted);
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted);
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *new_res, int sign);
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo);
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
@@ -576,8 +585,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats,
- unsigned int size);
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index f78a0434a48f..b0bf21682115 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -537,6 +537,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
if (!entry->bo)
return;
+ amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 110b120d7375..121ee17b522b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -236,7 +236,8 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
int ret;
amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
- ret = amdgpu_ucode_request(adev, &adev->vpe.fw, "amdgpu/%s.bin", fw_prefix);
+ ret = amdgpu_ucode_request(adev, &adev->vpe.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", fw_prefix);
if (ret)
goto out;
@@ -646,16 +647,16 @@ static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static int vpe_set_clockgating_state(void *handle,
+static int vpe_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int vpe_set_powergating_state(void *handle,
+static int vpe_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
if (!adev->pm.dpm_enabled)
@@ -833,7 +834,7 @@ static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index e2cb1f080e88..08d6787893b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -2161,13 +2161,13 @@ static int cik_common_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int cik_common_set_clockgating_state(void *handle,
+static int cik_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int cik_common_set_powergating_state(void *handle,
+static int cik_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 1da17755ad53..444563486769 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -402,13 +402,13 @@ static int cik_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int cik_ih_set_clockgating_state(void *handle,
+static int cik_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int cik_ih_set_powergating_state(void *handle,
+static int cik_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ede1a028d48d..d9bd8f3f17e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -133,9 +133,11 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
@@ -696,7 +698,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1189,11 +1191,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int cik_sdma_set_clockgating_state(void *handle,
+static int cik_sdma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -1204,7 +1206,7 @@ static int cik_sdma_set_clockgating_state(void *handle,
return 0;
}
-static int cik_sdma_set_powergating_state(void *handle,
+static int cik_sdma_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index d72973bd570d..82586b76aeda 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -398,14 +398,14 @@ static int cz_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int cz_ih_set_clockgating_state(void *handle,
+static int cz_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
// TODO
return 0;
}
-static int cz_ih_set_powergating_state(void *handle,
+static int cz_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
// TODO
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5098c50d54c8..c5e3d2251b18 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2687,6 +2687,32 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v10_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v10_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v10_0_panic_flush,
+};
+
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2734,6 +2760,7 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v10_0_drm_primary_plane_helper_funcs);
return 0;
}
@@ -3302,13 +3329,13 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v10_0_set_clockgating_state(void *handle,
+static int dce_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v10_0_set_powergating_state(void *handle,
+static int dce_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index c5680ff4ab9f..ea42a4472bf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2800,6 +2800,32 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v11_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v11_0_panic_flush,
+};
+
static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2847,6 +2873,7 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs);
return 0;
}
@@ -3434,13 +3461,13 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v11_0_set_clockgating_state(void *handle,
+static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v11_0_set_powergating_state(void *handle,
+static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index eb7de9122d99..915804a6a1d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2602,6 +2602,32 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v6_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_ARRAY_MODE(0x7);
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v6_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v6_0_panic_flush,
+};
+
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2629,6 +2655,7 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v6_0_drm_primary_plane_helper_funcs);
return 0;
}
@@ -3124,13 +3151,13 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
}
-static int dce_v6_0_set_clockgating_state(void *handle,
+static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v6_0_set_powergating_state(void *handle,
+static int dce_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 04b79ff87f75..f2edc0fece5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2613,6 +2613,31 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v8_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+}
+
+static const struct drm_plane_helper_funcs dce_v8_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v8_0_panic_flush,
+};
+
static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2640,6 +2665,7 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v8_0_drm_primary_plane_helper_funcs);
return 0;
}
@@ -3212,13 +3238,13 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
}
-static int dce_v8_0_set_clockgating_state(void *handle,
+static int dce_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v8_0_set_powergating_state(void *handle,
+static int dce_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 24dce803a829..5ba263fe5512 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -45,6 +45,7 @@
#include "clearstate_gfx10.h"
#include "v10_structs.h"
#include "gfx_v10_0.h"
+#include "gfx_v10_0_cleaner_shader.h"
#include "nbio_v2_3.h"
/*
@@ -3673,7 +3674,7 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid);
-static int gfx_v10_0_set_powergating_state(void *handle,
+static int gfx_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -4036,7 +4037,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -4138,18 +4139,21 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce%s.bin", ucode_prefix, wks);
if (err)
goto out;
@@ -4173,6 +4177,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec%s.bin", ucode_prefix, wks);
if (err)
goto out;
@@ -4180,6 +4185,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2%s.bin", ucode_prefix, wks);
if (!err) {
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
@@ -4733,6 +4739,23 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 64 &&
+ adev->gfx.pfp_fw_version >= 100 &&
+ adev->gfx.mec_fw_version >= 122) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -5952,7 +5975,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
else
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
- if (adev->job_hang && !enable)
+ if (amdgpu_in_reset(adev) && !enable)
return 0;
for (i = 0; i < adev->usec_timeout; i++) {
@@ -6599,17 +6622,13 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp | 0x80);
break;
default:
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
break;
}
}
@@ -7457,7 +7476,7 @@ static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
* otherwise the gfxoff disallowing will be failed to set.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1))
- gfx_v10_0_set_powergating_state(ip_block->adev, AMD_PG_STATE_UNGATE);
+ gfx_v10_0_set_powergating_state(ip_block, AMD_PG_STATE_UNGATE);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -8345,10 +8364,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
-static int gfx_v10_0_set_powergating_state(void *handle,
+static int gfx_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -8383,10 +8402,10 @@ static int gfx_v10_0_set_powergating_state(void *handle,
return 0;
}
-static int gfx_v10_0_set_clockgating_state(void *handle,
+static int gfx_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
new file mode 100644
index 000000000000..663c2572d440
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Define the cleaner shader gfx_10_3_0 */
+static const u32 gfx_10_3_0_cleaner_shader_hex[] = {
+ 0xb0804004, 0xbf8a0000,
+ 0xbe8203b8, 0xbefc0380,
+ 0x7e008480, 0x7e028480,
+ 0x7e048480, 0x7e068480,
+ 0x7e088480, 0x7e0a8480,
+ 0x7e0c8480, 0x7e0e8480,
+ 0xbefc0302, 0x80828802,
+ 0xbf84fff5, 0xbe8203ff,
+ 0x80000000, 0x87020002,
+ 0xbf840012, 0xbefe03c1,
+ 0xbeff03c1, 0xd7650001,
+ 0x0001007f, 0xd7660001,
+ 0x0002027e, 0x16020288,
+ 0xbe8203bf, 0xbefc03c1,
+ 0xd9382000, 0x00020201,
+ 0xd9386040, 0x00040401,
+ 0xd70f6a01, 0x000202ff,
+ 0x00000400, 0x80828102,
+ 0xbf84fff7, 0xbefc03ff,
+ 0x00000068, 0xbe803080,
+ 0xbe813080, 0xbe823080,
+ 0xbe833080, 0x80fc847c,
+ 0xbf84fffa, 0xbeea0480,
+ 0xbeec0480, 0xbeee0480,
+ 0xbef00480, 0xbef20480,
+ 0xbef40480, 0xbef60480,
+ 0xbef80480, 0xbefa0480,
+ 0xbf810000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm
new file mode 100644
index 000000000000..0e1c246166c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// GFX10.3 : Clear SGPRs, VGPRs and LDS
+// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot
+// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD
+// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS)
+// It takes 2 workgroups to use all of LDS: one on each CU of the WGP
+// Each wave clears SGPRs 0 - 107
+// Each wave clears VGPRs 0 - 63
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+
+
+shader main
+ asic(GFX10)
+ type(CS)
+ wave_size(32)
+// Note: original source code from SQ team
+
+//
+// Create 32 waves in a threadgroup (CS waves)
+// Each allocates 64 VGPRs
+// The workgroup allocates all of LDS (64kbytes)
+//
+// Takes about 2500 clocks to run.
+// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks)
+//
+ S_BARRIER
+ s_mov_b32 s2, 0x00000038 // Loop 64/8=8 times (loop unrolled for performance)
+ s_mov_b32 m0, 0
+ //
+ // CLEAR VGPRs
+ //
+label_0005:
+ v_movreld_b32 v0, 0
+ v_movreld_b32 v1, 0
+ v_movreld_b32 v2, 0
+ v_movreld_b32 v3, 0
+ v_movreld_b32 v4, 0
+ v_movreld_b32 v5, 0
+ v_movreld_b32 v6, 0
+ v_movreld_b32 v7, 0
+ s_mov_b32 m0, s2
+ s_sub_u32 s2, s2, 8
+ s_cbranch_scc0 label_0005
+ //
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+
+ //
+ // CLEAR SGPRs
+ //
+label_0023:
+ s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+
+ s_endpgm
+
+end
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 2ae058a224f4..56c06b72a70a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -615,7 +615,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
if (!ring->is_mes_queue)
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
if (!ring->is_mes_queue)
@@ -639,6 +639,7 @@ static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *
int err = 0;
err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_toc.bin", ucode_prefix);
if (err)
goto out;
@@ -688,6 +689,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", ucode_prefix);
if (err)
goto out;
@@ -705,6 +707,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", ucode_prefix);
if (err)
goto out;
@@ -720,9 +723,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
adev->pdev->revision == 0xCE)
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/gc_11_0_0_rlc_1.bin");
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
@@ -735,6 +740,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", ucode_prefix);
if (err)
goto out;
@@ -1885,6 +1891,7 @@ static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
{
+ u32 rb_bitmap_per_sa;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
@@ -1902,9 +1909,11 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
+
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
- active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
}
active_rb_bitmap &= global_active_rb_bitmap;
@@ -3918,9 +3927,7 @@ static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
@@ -5458,10 +5465,10 @@ static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
-static int gfx_v11_0_set_powergating_state(void *handle,
+static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -5494,10 +5501,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
return 0;
}
-static int gfx_v11_0_set_clockgating_state(void *handle,
+static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -6646,30 +6653,14 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, r = 0;
+ int r = 0;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
- WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
-
- /* make sure dequeue is complete*/
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
- break;
- udelay(1);
- }
- if (i >= adev->usec_timeout)
- r = -ETIMEDOUT;
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ dev_err(adev->dev, "reset via MMIO failed %d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index da327ab48a57..4b6e05750654 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -513,7 +513,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
if (!ring->is_mes_queue)
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
if (!ring->is_mes_queue)
@@ -537,6 +537,7 @@ static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *
int err = 0;
err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_toc.bin", ucode_prefix);
if (err)
goto out;
@@ -566,6 +567,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", ucode_prefix);
if (err)
goto out;
@@ -573,6 +575,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", ucode_prefix);
if (err)
goto out;
@@ -581,6 +584,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
@@ -593,6 +597,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", ucode_prefix);
if (err)
goto out;
@@ -1437,11 +1442,19 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->gfx.gfx_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if ((adev->gfx.me_fw_version >= 2660) &&
+ (adev->gfx.mec_fw_version >= 2920)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
+ }
if (!adev->enable_mes_kiq) {
r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0);
@@ -1610,6 +1623,7 @@ static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev)
static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
{
+ u32 rb_bitmap_per_sa;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
@@ -1627,12 +1641,14 @@ static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
+
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
- active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -2832,9 +2848,7 @@ static void gfx_v12_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v12_0_cp_set_doorbell_range(struct amdgpu_device *adev)
@@ -3864,10 +3878,10 @@ static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
}
#endif
-static int gfx_v12_0_set_powergating_state(void *handle,
+static int gfx_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -4115,10 +4129,10 @@ static int gfx_v12_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v12_0_set_clockgating_state(void *handle,
+static int gfx_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -5233,24 +5247,16 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int r, i;
+ int r;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- mutex_lock(&adev->srbm_mutex);
- soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
- WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
- break;
- udelay(1);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
+ if (r) {
+ dev_err(adev->dev, "reset via MMIO failed %d\n", r);
+ return r;
}
- soc24_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
index bcc9c72ccbde..f7184b2dc4e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
@@ -26,4 +26,6 @@
extern const struct amdgpu_ip_block_version gfx_v12_0_ip_block;
+int gfx_v12_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 41f50bf380c4..f26e2cdec07a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -337,6 +337,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
@@ -345,6 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
@@ -353,6 +355,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
@@ -361,6 +364,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -1906,7 +1910,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
error:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
}
@@ -3373,11 +3377,11 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v6_0_set_clockgating_state(void *handle,
+static int gfx_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -3395,11 +3399,11 @@ static int gfx_v6_0_set_clockgating_state(void *handle,
return 0;
}
-static int gfx_v6_0_set_powergating_state(void *handle,
+static int gfx_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
gate = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 824d5913103b..84745b2453ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -934,33 +934,39 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
if (err)
goto out;
}
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
out:
if (err) {
@@ -2324,7 +2330,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
error:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
}
@@ -4846,11 +4852,11 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v7_0_set_clockgating_state(void *handle,
+static int gfx_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -4869,11 +4875,11 @@ static int gfx_v7_0_set_clockgating_state(void *handle,
return 0;
}
-static int gfx_v7_0_set_powergating_state(void *handle,
+static int gfx_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
gate = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b7006c41e270..6a025438f9d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -914,7 +914,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -982,13 +982,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_pfp_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
}
if (err)
@@ -999,13 +1002,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_me_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
}
if (err)
@@ -1017,13 +1023,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_ce_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
}
if (err)
@@ -1044,6 +1053,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->virt.chained_ib_support = false;
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -1093,13 +1103,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_mec_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
}
if (err)
@@ -1112,13 +1125,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
(adev->asic_type != CHIP_TOPAZ)) {
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_mec2_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
}
if (!err) {
@@ -1640,7 +1656,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
RREG32(sec_ded_counter_registers[i]);
fail:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
@@ -4304,9 +4320,7 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32(mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32(mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32(mmRLC_CP_SCHEDULERS, tmp);
+ WREG32(mmRLC_CP_SCHEDULERS, tmp | 0x80);
}
static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
@@ -5321,7 +5335,7 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
(adev->asic_type == CHIP_POLARIS12) ||
(adev->asic_type == CHIP_VEGAM))
/* Send msg to SMU via Powerplay */
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable, 0);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
@@ -5367,10 +5381,10 @@ static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
}
}
-static int gfx_v8_0_set_powergating_state(void *handle,
+static int gfx_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -5625,8 +5639,6 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t temp, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
@@ -5720,8 +5732,6 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5731,8 +5741,6 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
@@ -5813,12 +5821,12 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
}
gfx_v8_0_wait_for_rlc_serdes(adev);
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
* === MGCG + MGLS + TS(CG/LS) ===
@@ -5832,6 +5840,8 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
}
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5982,10 +5992,10 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v8_0_set_clockgating_state(void *handle,
+static int gfx_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 0b6f09f2cc9b..fa572b40989e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1243,7 +1243,7 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -1429,18 +1429,21 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
int err;
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
@@ -1476,6 +1479,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc_am4.bin", chip_name);
else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
(smu_version >= 0x41e2b))
@@ -1483,9 +1487,11 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
*/
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_kicker_rlc.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -1518,9 +1524,11 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
- "amdgpu/%s_sjt_mec.bin", chip_name);
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sjt_mec.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
@@ -1531,9 +1539,11 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sjt_mec2.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
if (!err) {
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
@@ -3488,9 +3498,7 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
@@ -4780,7 +4788,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
fail:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
@@ -4956,8 +4964,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -5022,8 +5028,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
@@ -5034,8 +5038,6 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (!adev->gfx.num_gfx_rings)
return;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* Enable 3D CGCG/CGLS */
if (enable) {
/* write cmd to clear cgcg/cgls ov */
@@ -5077,8 +5079,6 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5086,8 +5086,6 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t def, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
@@ -5129,13 +5127,12 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS
* === MGCG + MGLS ===
@@ -5155,6 +5152,7 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === MGCG + MGLS === */
gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
}
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5232,10 +5230,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
-static int gfx_v9_0_set_powergating_state(void *handle,
+static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
@@ -5277,10 +5275,10 @@ static int gfx_v9_0_set_powergating_state(void *handle,
return 0;
}
-static int gfx_v9_0_set_clockgating_state(void *handle,
+static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 3f4fd2f08163..d81449f9d822 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -412,7 +412,7 @@ static int gfx_v9_4_2_run_shader(struct amdgpu_device *adev,
r = amdgpu_ib_schedule(ring, 1, ib, NULL, fence_ptr);
if (r) {
dev_err(adev->dev, "ib submit failed (%d).\n", r);
- amdgpu_ib_free(adev, ib, NULL);
+ amdgpu_ib_free(ib, NULL);
}
return r;
}
@@ -611,16 +611,16 @@ static int gfx_v9_4_2_do_sgprs_init(struct amdgpu_device *adev)
}
disp2_failed:
- amdgpu_ib_free(adev, &disp_ibs[2], NULL);
+ amdgpu_ib_free(&disp_ibs[2], NULL);
dma_fence_put(fences[2]);
disp1_failed:
- amdgpu_ib_free(adev, &disp_ibs[1], NULL);
+ amdgpu_ib_free(&disp_ibs[1], NULL);
dma_fence_put(fences[1]);
disp0_failed:
- amdgpu_ib_free(adev, &disp_ibs[0], NULL);
+ amdgpu_ib_free(&disp_ibs[0], NULL);
dma_fence_put(fences[0]);
pro_end:
- amdgpu_ib_free(adev, &wb_ib, NULL);
+ amdgpu_ib_free(&wb_ib, NULL);
if (r)
dev_info(adev->dev, "Init SGPRS Failed\n");
@@ -687,10 +687,10 @@ static int gfx_v9_4_2_do_vgprs_init(struct amdgpu_device *adev)
}
disp_failed:
- amdgpu_ib_free(adev, &disp_ib, NULL);
+ amdgpu_ib_free(&disp_ib, NULL);
dma_fence_put(fence);
pro_end:
- amdgpu_ib_free(adev, &wb_ib, NULL);
+ amdgpu_ib_free(&wb_ib, NULL);
if (r)
dev_info(adev->dev, "Init VGPRS Failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 54459254bd37..2ba185875baa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -43,8 +43,10 @@
MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
@@ -54,10 +56,6 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
-#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
-#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
-#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
-
#define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */
#define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */
#define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */
@@ -351,13 +349,17 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
GOLDEN_GB_ADDR_CONFIG);
- /* Golden settings applied by driver for ASIC with rev_id 0 */
- if (adev->rev_id == 0) {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
- REDUCE_FIFO_DEPTH_BY_2, 2);
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
} else {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
- SPARE, 0x1);
+ /* Golden settings applied by driver for ASIC with rev_id 0 */
+ if (adev->rev_id == 0) {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
+ REDUCE_FIFO_DEPTH_BY_2, 2);
+ } else {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
+ SPARE, 0x1);
+ }
}
}
}
@@ -501,7 +503,7 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -545,6 +547,7 @@ static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -576,12 +579,19 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
{
int err;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
- "amdgpu/%s_sjt_mec.bin", chip_name);
- else
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sjt_mec.bin", chip_name);
+
+ if (err)
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mec.bin", chip_name);
+ } else
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
- "amdgpu/%s_mec.bin", chip_name);
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
@@ -935,6 +945,7 @@ static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1785,9 +1796,7 @@ static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
@@ -2770,16 +2779,16 @@ static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
};
-static int gfx_v9_4_3_set_powergating_state(void *handle,
+static int gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
}
-static int gfx_v9_4_3_set_clockgating_state(void *handle,
+static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, num_xcc;
if (amdgpu_sriov_vf(adev))
@@ -4659,7 +4668,6 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- amdgpu_gfx_off_ctrl(adev, false);
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
xcc_offset = xcc_id * reg_count;
for (i = 0; i < reg_count; i++)
@@ -4667,7 +4675,6 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
GET_INST(GC, xcc_id)));
}
- amdgpu_gfx_off_ctrl(adev, true);
/* dump compute queue registers for all instances */
if (!adev->gfx.ip_dump_compute_queues)
@@ -4676,7 +4683,6 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
- amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->srbm_mutex);
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
xcc_offset = xcc_id * reg_count * num_inst;
@@ -4703,7 +4709,6 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
}
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_off_ctrl(adev, true);
}
static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
@@ -4866,6 +4871,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
/* 9.4.3 removed all the GDS internal memory,
* only support GWS opcode in kernel, like barrier
* semaphore.etc */
@@ -4879,6 +4885,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
/* deprecated for 9.4.3, no usage at all */
adev->gds.gds_compute_max_wave_id = 0;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index ed8e130c7d19..5470cef7e9bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -368,7 +368,9 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
amdgpu_ip_version(adev, GC_HWIP, 0) ==
IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 4));
+ IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 5, 0));
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 697599c46240..9bedca9a79c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1088,11 +1088,11 @@ static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int gmc_v10_0_set_clockgating_state(void *handle,
+static int gmc_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
@@ -1131,7 +1131,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
athub_v2_0_get_clockgating(adev, flags);
}
-static int gmc_v10_0_set_powergating_state(void *handle,
+static int gmc_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index f893ab4c14df..72751ab4c766 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -996,11 +996,11 @@ static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int gmc_v11_0_set_clockgating_state(void *handle,
+static int gmc_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
@@ -1018,7 +1018,7 @@ static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
athub_v3_0_get_clockgating(adev, flags);
}
-static int gmc_v11_0_set_powergating_state(void *handle,
+static int gmc_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index d22b027fd0bb..b749f1c3f6a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -40,7 +40,7 @@
#include "gfxhub_v12_0.h"
#include "mmhub_v4_1_0.h"
#include "athub_v4_1_0.h"
-
+#include "umc_v8_14.h"
static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
@@ -581,6 +581,18 @@ static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev)
{
+ switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
+ case IP_VERSION(8, 14, 0):
+ adev->umc.channel_inst_num = UMC_V8_14_CHANNEL_INSTANCE_NUM;
+ adev->umc.umc_inst_num = UMC_V8_14_UMC_INSTANCE_NUM(adev);
+ adev->umc.node_inst_num = 0;
+ adev->umc.max_ras_err_cnt_per_query = UMC_V8_14_TOTAL_CHANNEL_NUM(adev);
+ adev->umc.channel_offs = UMC_V8_14_PER_CHANNEL_OFFSET;
+ adev->umc.ras = &umc_v8_14_ras;
+ break;
+ default:
+ break;
+ }
}
@@ -829,6 +841,10 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -980,11 +996,11 @@ static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int gmc_v12_0_set_clockgating_state(void *handle,
+static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
@@ -1002,7 +1018,7 @@ static void gmc_v12_0_get_clockgating_state(void *handle, u64 *flags)
athub_v4_1_0_get_clockgating(adev, flags);
}
-static int gmc_v12_0_set_powergating_state(void *handle,
+static int gmc_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index ca000b3d1afc..2245dda92021 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -131,7 +131,8 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
chip_name = "si58";
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
dev_err(adev->dev,
"si_mc: Failed to load firmware \"%s_mc.bin\"\n",
@@ -1094,13 +1095,13 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v6_0_set_clockgating_state(void *handle,
+static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int gmc_v6_0_set_powergating_state(void *handle,
+static int gmc_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index b6016f11956e..9aac4b1101e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -157,7 +157,8 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
pr_err("cik_mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
@@ -1317,11 +1318,11 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v7_0_set_clockgating_state(void *handle,
+static int gmc_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -1337,7 +1338,7 @@ static int gmc_v7_0_set_clockgating_state(void *handle,
return 0;
}
-static int gmc_v7_0_set_powergating_state(void *handle,
+static int gmc_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 12d5967ecd45..d06585207c33 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -259,7 +259,8 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
pr_err("mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
@@ -1658,10 +1659,10 @@ static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
}
}
-static int gmc_v8_0_set_clockgating_state(void *handle,
+static int gmc_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1679,7 +1680,7 @@ static int gmc_v8_0_set_clockgating_state(void *handle,
return 0;
}
-static int gmc_v8_0_set_powergating_state(void *handle,
+static int gmc_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 50c5da3020cb..291549765c38 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -623,6 +623,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
}
+ if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault))
+ return 1;
+
if (!printk_ratelimit())
return 0;
@@ -645,7 +648,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
soc15_ih_clientid_name[entry->client_id]);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -795,7 +799,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -1138,12 +1143,13 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
+ uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
bool snoop = false;
bool is_local;
dma_resv_assert_held(bo->tbo.base.resv);
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ switch (gc_ip_version) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
if (is_vram) {
@@ -1157,10 +1163,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
/* FIXME: is this still needed? Or does
* amdgpu_ttm_tt_pde_flags already handle this?
*/
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 3)) &&
+ if (gc_ip_version == IP_VERSION(9, 4, 2) &&
adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
@@ -1184,6 +1187,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
/* Only local VRAM BOs or system memory on non-NUMA APUs
* can be assumed to be local in their entirety. Choose
* MTYPE_NC as safe fallback for all system memory BOs on
@@ -1208,7 +1212,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
if (uncached) {
mtype = MTYPE_UC;
} else if (ext_coherent) {
- if (adev->rev_id)
+ if (gc_ip_version == IP_VERSION(9, 5, 0) || adev->rev_id)
mtype = is_local ? MTYPE_CC : MTYPE_UC;
else
mtype = MTYPE_UC;
@@ -1218,10 +1222,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
/* dGPU */
if (is_local)
mtype = mtype_local;
- else if (is_vram)
- mtype = MTYPE_NC;
- else
+ else if (gc_ip_version < IP_VERSION(9, 5, 0) && !is_vram)
mtype = MTYPE_UC;
+ else
+ mtype = MTYPE_NC;
}
break;
@@ -1275,7 +1279,8 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
* memory can use more efficient MTYPEs.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 5, 0))
return;
/* Only direct-mapped memory allows us to determine the NUMA node from
@@ -1540,6 +1545,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
adev->mmhub.ras = &mmhub_v1_7_ras;
break;
case IP_VERSION(1, 8, 0):
+ case IP_VERSION(1, 8, 1):
adev->mmhub.ras = &mmhub_v1_8_ras;
break;
default:
@@ -1551,7 +1557,8 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1619,7 +1626,8 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
@@ -1792,6 +1800,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
default:
adev->gmc.gart_size = 512ULL << 20;
break;
@@ -2070,7 +2079,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2154,6 +2164,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
NUM_XCC(adev->gfx.xcc_mask));
@@ -2220,7 +2231,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_gmc_get_vbios_allocations(adev);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
r = gmc_v9_0_init_mem_ranges(adev);
if (r)
return r;
@@ -2250,7 +2262,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) ?
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) ?
3 :
8;
@@ -2263,7 +2276,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
amdgpu_gmc_sysfs_init(adev);
return 0;
@@ -2274,7 +2288,8 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2544,10 +2559,10 @@ static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int gmc_v9_0_set_clockgating_state(void *handle,
+static int gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->set_clockgating(adev, state);
@@ -2565,7 +2580,7 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
athub_v1_0_get_clockgating(adev, flags);
}
-static int gmc_v9_0_set_powergating_state(void *handle,
+static int gmc_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 7f45e93c0397..8ac3d3282268 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -392,13 +392,13 @@ static int iceland_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int iceland_ih_set_clockgating_state(void *handle,
+static int iceland_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int iceland_ih_set_powergating_state(void *handle,
+static int iceland_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 38f953fd65d9..f8a485164437 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -693,10 +693,10 @@ static void ih_v6_0_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int ih_v6_0_set_clockgating_state(void *handle,
+static int ih_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v6_0_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -756,10 +756,10 @@ static void ih_v6_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
-static int ih_v6_0_set_powergating_state(void *handle,
+static int ih_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 61381e0c3795..dd0042efceec 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -674,10 +674,10 @@ static void ih_v6_1_update_clockgating_state(struct amdgpu_device *adev,
return;
}
-static int ih_v6_1_set_clockgating_state(void *handle,
+static int ih_v6_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v6_1_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -737,10 +737,10 @@ static void ih_v6_1_update_ih_mem_power_gating(struct amdgpu_device *adev,
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
-static int ih_v6_1_set_powergating_state(void *handle,
+static int ih_v6_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index d2428cf5d385..8f9b15c171f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -664,10 +664,10 @@ static void ih_v7_0_update_clockgating_state(struct amdgpu_device *adev,
return;
}
-static int ih_v7_0_set_clockgating_state(void *handle,
+static int ih_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v7_0_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -727,10 +727,10 @@ static void ih_v7_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
-static int ih_v7_0_set_powergating_state(void *handle,
+static int ih_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index d4f72e47ae9e..aeca5c08ea2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -50,7 +50,8 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
index 1341f0292031..df898dbb746e 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
@@ -47,7 +47,8 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 6e29b69894a5..7c9251c03815 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -35,7 +35,7 @@
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v2_0_set_powergating_state(void *handle,
+static int jpeg_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
/**
@@ -154,7 +154,7 @@ static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
- jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v2_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -675,14 +675,14 @@ static int jpeg_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int jpeg_v2_0_set_clockgating_state(void *handle,
+static int jpeg_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
- if (!jpeg_v2_0_is_idle(handle))
+ if (!jpeg_v2_0_is_idle(adev))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {
@@ -692,10 +692,10 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v2_0_set_powergating_state(void *handle,
+static int jpeg_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->jpeg.cur_state)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 9ac421486f05..11f6af2646e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -38,7 +38,7 @@
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v2_5_set_powergating_state(void *handle,
+static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev);
@@ -219,7 +219,7 @@ static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
- jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
@@ -518,10 +518,10 @@ static int jpeg_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int jpeg_v2_5_set_clockgating_state(void *handle,
+static int jpeg_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
int i;
@@ -530,7 +530,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
continue;
if (enable) {
- if (!jpeg_v2_5_is_idle(handle))
+ if (!jpeg_v2_5_is_idle(adev))
return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i);
} else {
@@ -541,10 +541,10 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v2_5_set_powergating_state(void *handle,
+static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->jpeg.cur_state)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index e0df6800502c..4eca65ea9053 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -36,7 +36,7 @@
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v3_0_set_powergating_state(void *handle,
+static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
/**
@@ -168,7 +168,7 @@ static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
- jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -466,14 +466,14 @@ static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v3_0_set_clockgating_state(void *handle,
+static int jpeg_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v3_0_is_idle(handle))
+ if (!jpeg_v3_0_is_idle(adev))
return -EBUSY;
jpeg_v3_0_enable_clock_gating(adev);
} else {
@@ -483,10 +483,10 @@ static int jpeg_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v3_0_set_powergating_state(void *handle,
+static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if(state == adev->jpeg.cur_state)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index eca1963c33b6..0aef1f64afd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -39,7 +39,7 @@
static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev);
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v4_0_set_powergating_state(void *handle,
+static int jpeg_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
@@ -206,7 +206,7 @@ static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
- jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v4_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
@@ -635,14 +635,14 @@ static int jpeg_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v4_0_set_clockgating_state(void *handle,
+static int jpeg_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v4_0_is_idle(handle))
+ if (!jpeg_v4_0_is_idle(adev))
return -EBUSY;
jpeg_v4_0_enable_clock_gating(adev);
} else {
@@ -652,10 +652,10 @@ static int jpeg_v4_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v4_0_set_powergating_state(void *handle,
+static int jpeg_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 67b51bcbacd1..88f9771c1686 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -43,7 +43,7 @@ enum jpeg_engin_status {
static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v4_0_3_set_powergating_state(void *handle,
+static int jpeg_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring);
@@ -76,7 +76,7 @@ static int jpeg_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
+ adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
jpeg_v4_0_3_set_dec_ring_funcs(adev);
jpeg_v4_0_3_set_irq_funcs(adev);
@@ -321,7 +321,7 @@ static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
ring->wptr = 0;
@@ -379,7 +379,7 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
return ret;
@@ -949,16 +949,16 @@ static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int jpeg_v4_0_3_set_clockgating_state(void *handle,
+static int jpeg_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (enable) {
- if (!jpeg_v4_0_3_is_idle(handle))
+ if (!jpeg_v4_0_3_is_idle(adev))
return -EBUSY;
jpeg_v4_0_3_enable_clock_gating(adev, i);
} else {
@@ -968,10 +968,10 @@ static int jpeg_v4_0_3_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v4_0_3_set_powergating_state(void *handle,
+static int jpeg_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
@@ -1231,9 +1231,95 @@ static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
.reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
};
+static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int jpeg_v4_0_3_err_codes[] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31
+};
+
+static bool jpeg_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ jpeg_v4_0_3_err_codes,
+ ARRAY_SIZE(jpeg_v4_0_3_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops jpeg_v4_0_3_aca_bank_ops = {
+ .aca_bank_parser = jpeg_v4_0_3_aca_bank_parser,
+ .aca_bank_is_valid = jpeg_v4_0_3_aca_bank_is_valid,
+};
+
+static const struct aca_info jpeg_v4_0_3_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &jpeg_v4_0_3_aca_bank_ops,
+};
+
+static int jpeg_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v4_0_3_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
static struct amdgpu_jpeg_ras jpeg_v4_0_3_ras = {
.ras_block = {
.hw_ops = &jpeg_v4_0_3_ras_hw_ops,
+ .ras_late_init = jpeg_v4_0_3_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 1d9e3b101c3a..6b3656984957 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -48,7 +48,7 @@
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v4_0_5_set_powergating_state(void *handle,
+static int jpeg_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring);
@@ -236,7 +236,7 @@ static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS))
- jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v4_0_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
return 0;
@@ -660,10 +660,10 @@ static int jpeg_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int jpeg_v4_0_5_set_clockgating_state(void *handle,
+static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
@@ -672,7 +672,7 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle,
continue;
if (enable) {
- if (!jpeg_v4_0_5_is_idle(handle))
+ if (!jpeg_v4_0_5_is_idle(adev))
return -EBUSY;
jpeg_v4_0_5_enable_clock_gating(adev, i);
@@ -684,10 +684,10 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v4_0_5_set_powergating_state(void *handle,
+static int jpeg_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index 58fb1e5fa89c..d5cf0f2799d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -31,12 +31,12 @@
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
-#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
#include "jpeg_v5_0_0.h"
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v5_0_0_set_powergating_state(void *handle,
+static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
/**
@@ -74,7 +74,7 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
/* JPEG TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
+ VCN_5_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
if (r)
return r;
@@ -172,7 +172,7 @@ static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
- jpeg_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -560,14 +560,14 @@ static int jpeg_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v5_0_0_set_clockgating_state(void *handle,
+static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (enable) {
- if (!jpeg_v5_0_0_is_idle(handle))
+ if (!jpeg_v5_0_0_is_idle(adev))
return -EBUSY;
jpeg_v5_0_0_enable_clock_gating(adev);
} else {
@@ -577,10 +577,10 @@ static int jpeg_v5_0_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v5_0_0_set_powergating_state(void *handle,
+static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->jpeg.cur_state)
@@ -612,7 +612,7 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
DRM_DEBUG("IH: JPEG TRAP\n");
switch (entry->src_id) {
- case VCN_4_0__SRCID__JPEG_DECODE:
+ case VCN_5_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
new file mode 100644
index 000000000000..40d4c32a8c2a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -0,0 +1,708 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2014-2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "jpeg_v4_0_3.h"
+#include "jpeg_v5_0_1.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+
+static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring);
+
+static int amdgpu_ih_srcid_jpeg[] = {
+ VCN_5_0__SRCID__JPEG_DECODE,
+ VCN_5_0__SRCID__JPEG1_DECODE,
+ VCN_5_0__SRCID__JPEG2_DECODE,
+ VCN_5_0__SRCID__JPEG3_DECODE,
+ VCN_5_0__SRCID__JPEG4_DECODE,
+ VCN_5_0__SRCID__JPEG5_DECODE,
+ VCN_5_0__SRCID__JPEG6_DECODE,
+ VCN_5_0__SRCID__JPEG7_DECODE,
+ VCN_5_0__SRCID__JPEG8_DECODE,
+ VCN_5_0__SRCID__JPEG9_DECODE,
+};
+
+static int jpeg_v5_0_1_core_reg_offset(u32 pipe)
+{
+ if (pipe <= AMDGPU_MAX_JPEG_RINGS_4_0_3)
+ return ((0x40 * pipe) - 0xc80);
+ else
+ return ((0x40 * pipe) - 0x440);
+}
+
+/**
+ * jpeg_v5_0_1_early_init - set function pointers
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ if (!adev->jpeg.num_jpeg_inst || adev->jpeg.num_jpeg_inst > AMDGPU_MAX_JPEG_INSTANCES)
+ return -ENOENT;
+
+ adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
+ jpeg_v5_0_1_set_dec_ring_funcs(adev);
+ jpeg_v5_0_1_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_sw_init - sw init for JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, j, r, jpeg_inst;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ ring->use_doorbell = false;
+ ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
+ if (!amdgpu_sriov_vf(adev)) {
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 1 + j + 11 * jpeg_inst;
+ } else {
+ if (j < 4)
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 4 + j + 32 * jpeg_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 8 + j + 32 * jpeg_inst;
+ }
+ sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch[j] =
+ regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
+ adev->jpeg.inst[i].external.jpeg_pitch[j] =
+ SOC15_REG_OFFSET1(JPEG, jpeg_inst, regUVD_JRBC_SCRATCH0,
+ (j ? jpeg_v5_0_1_core_reg_offset(j) : 0));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_sw_fini - sw fini for JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_1_hw_init - start and test JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ */
+static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, j, r, jpeg_inst;
+
+ if (amdgpu_sriov_vf(adev)) {
+ /* jpeg_v5_0_1_start_sriov(adev); */
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ jpeg_v5_0_1_dec_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ }
+ return 0;
+ }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+ ring = adev->jpeg.inst[i].ring_dec;
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 11 * jpeg_inst,
+ adev->jpeg.inst[i].aid_id);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (ring->use_doorbell)
+ WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
+ (ring->pipe ? (ring->pipe - 0x15) : 0),
+ ring->doorbell_index <<
+ VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_hw_fini - stop the hardware block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0;
+
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+
+ return ret;
+}
+
+/**
+ * jpeg_v5_0_1_suspend - suspend JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = jpeg_v5_0_1_hw_fini(ip_block);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_1_resume - resume JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v5_0_1_hw_init(ip_block);
+
+ return r;
+}
+
+static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx)
+{
+ int jpeg_inst;
+
+ jpeg_inst = GET_INST(JPEG, inst_idx);
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* keep the JPEG in static PG mode */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
+
+ return 0;
+}
+
+static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
+{
+ int jpeg_inst;
+
+ jpeg_inst = GET_INST(JPEG, inst_idx);
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i, j, jpeg_inst, r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ /* disable antihang */
+ r = jpeg_v5_0_1_disable_antihang(adev, i);
+ if (r)
+ return r;
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
+ u32 reg, data, mask;
+
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+
+ /* enable System Interrupt for JRBC */
+ reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
+ if (j < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << j;
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j);
+ WREG32_P(reg, data, mask);
+ } else {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12);
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12));
+ WREG32_P(reg, data, mask);
+ }
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_VMID,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset,
+ (0x00000001L | 0x00000002L));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ reg_offset, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ reg_offset, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_RPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_WPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset, 0x00000002L);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_SIZE,
+ reg_offset, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
+ reg_offset);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v5_0_1_stop(struct amdgpu_device *adev)
+{
+ int i, jpeg_inst, r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable antihang */
+ r = jpeg_v5_0_1_enable_antihang(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v5_0_1_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_RPTR,
+ ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
+}
+
+/**
+ * jpeg_v5_0_1_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v5_0_1_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_WPTR,
+ ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
+}
+
+/**
+ * jpeg_v5_0_1_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
+ regUVD_JRBC_RB_WPTR,
+ (ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0),
+ lower_32_bits(ring->wptr));
+ }
+}
+
+static bool jpeg_v5_0_1_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool ret = false;
+ int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
+
+ ret &= ((RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC_STATUS, reg_offset) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ }
+ }
+
+ return ret;
+}
+
+static int jpeg_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
+
+ ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC_STATUS, reg_offset,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ }
+ }
+ return ret;
+}
+
+static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+ int i;
+
+ if (!enable)
+ return 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (!jpeg_v5_0_1_is_idle(adev))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret;
+
+ if (state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_stop(adev);
+ else
+ ret = jpeg_v5_0_1_start(adev);
+
+ if (!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 i, inst;
+
+ i = node_id_to_phys_map[entry->node_id];
+ DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n");
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst)
+ if (adev->jpeg.inst[inst].aid_id == i)
+ break;
+
+ if (inst >= adev->jpeg.num_jpeg_inst) {
+ dev_WARN_ONCE(adev->dev, 1,
+ "Interrupt received for unknown JPEG instance %d",
+ entry->node_id);
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case VCN_5_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]);
+ break;
+ case VCN_5_0__SRCID__JPEG1_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]);
+ break;
+ case VCN_5_0__SRCID__JPEG2_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]);
+ break;
+ case VCN_5_0__SRCID__JPEG3_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]);
+ break;
+ case VCN_5_0__SRCID__JPEG4_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]);
+ break;
+ case VCN_5_0__SRCID__JPEG5_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]);
+ break;
+ case VCN_5_0__SRCID__JPEG6_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]);
+ break;
+ case VCN_5_0__SRCID__JPEG7_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]);
+ break;
+ case VCN_5_0__SRCID__JPEG8_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[8]);
+ break;
+ case VCN_5_0__SRCID__JPEG9_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[9]);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
+ .name = "jpeg_v5_0_1",
+ .early_init = jpeg_v5_0_1_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v5_0_1_sw_init,
+ .sw_fini = jpeg_v5_0_1_sw_fini,
+ .hw_init = jpeg_v5_0_1_hw_init,
+ .hw_fini = jpeg_v5_0_1_hw_fini,
+ .suspend = jpeg_v5_0_1_suspend,
+ .resume = jpeg_v5_0_1_resume,
+ .is_idle = jpeg_v5_0_1_is_idle,
+ .wait_for_idle = jpeg_v5_0_1_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v5_0_1_set_clockgating_state,
+ .set_powergating_state = jpeg_v5_0_1_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
+ .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
+ .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v5_0_1_dec_ring_emit_vm_flush */
+ 22 + 22 + /* jpeg_v5_0_1_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v5_0_1_dec_ring_emit_ib */
+ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
+ .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v4_0_3_dec_ring_nop,
+ .insert_start = jpeg_v4_0_3_dec_ring_insert_start,
+ .insert_end = jpeg_v4_0_3_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, j, jpeg_inst;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v5_0_1_dec_ring_vm_funcs;
+ adev->jpeg.inst[i].ring_dec[j].me = i;
+ adev->jpeg.inst[i].ring_dec[j].pipe = j;
+ }
+ jpeg_inst = GET_INST(JPEG, i);
+ adev->jpeg.inst[i].aid_id =
+ jpeg_inst / adev->jpeg.num_inst_per_aid;
+ }
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = {
+ .set = jpeg_v5_0_1_set_interrupt_state,
+ .process = jpeg_v5_0_1_process_interrupt,
+};
+
+static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
+ adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
+
+ adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs;
+}
+
+const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 5,
+ .minor = 0,
+ .rev = 1,
+ .funcs = &jpeg_v5_0_1_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
new file mode 100644
index 000000000000..8ce146c00bb6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V5_0_1_H__
+#define __JPEG_V5_0_1_H__
+
+extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
+
+#endif /* __JPEG_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 9c905b9e9376..65f389eb65e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1505,9 +1505,7 @@ static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 9ecc5d61e49b..5b537806b4da 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include "amdgpu.h"
+#include "gfx_v12_0.h"
#include "soc15_common.h"
#include "soc21.h"
#include "gc/gc_12_0_0_offset.h"
@@ -350,6 +351,132 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+int gfx_v12_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req)
+{
+ u32 i, tmp, val;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* Request with MeId=2, PipeId=0 */
+ tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
+ WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
+
+ val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
+ if (req) {
+ if (val == tmp)
+ break;
+ } else {
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
+ REQUEST, 1);
+
+ /* unlocked or locked by firmware */
+ if (val != tmp)
+ break;
+ }
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = mes->adev;
+ uint32_t value, reg;
+ int i, r = 0;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
+ me_id, pipe_id, queue_id, vmid);
+
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ gfx_v12_0_request_gfx_index_mutex(adev, true);
+ /* all se allow writes */
+ WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (pipe_id == 0)
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
+ else
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
+ WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
+ gfx_v12_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
+
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_SDMA) {
+ dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ switch (me_id) {
+ case 1:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ);
+ break;
+ case 0:
+ default:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ);
+ break;
+ }
+
+ value = 1 << queue_id;
+ WREG32(reg, value);
+ /* wait for queue reset done */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(reg) & value))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on sdma queue reset done\n");
+ r = -ETIMEDOUT;
+ }
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input)
{
@@ -721,6 +848,11 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
union MESAPI__RESET mes_reset_queue_pkt;
int pipe;
+ if (input->use_mmio)
+ return mes_v12_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
@@ -1455,9 +1587,7 @@ static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index e9a6f33ca710..243eabda0607 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -356,7 +356,7 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GMC,
- enable);
+ enable, 0);
}
static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index b01bb759d0f4..e646e5cef0a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -33,7 +33,6 @@
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
-#define mmSMNAID_AID0_MCA_SMU 0x03b30400
static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 0820ed62e2e8..62cdfe10e6f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -434,9 +434,8 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
* this should allow us to catch up.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
@@ -667,17 +666,17 @@ static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int navi10_ih_set_clockgating_state(void *handle,
+static int navi10_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
navi10_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
return 0;
}
-static int navi10_ih_set_powergating_state(void *handle,
+static int navi10_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
index 39919e0892c1..c92875ceb31f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
@@ -28,6 +28,7 @@
#include "nbif/nbif_6_3_1_sh_mask.h"
#include "pcie/pcie_6_1_0_offset.h"
#include "pcie/pcie_6_1_0_sh_mask.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
static void nbif_v6_3_1_remap_hdp_registers(struct amdgpu_device *adev)
@@ -518,3 +519,83 @@ const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs = {
.get_rom_offset = nbif_v6_3_1_get_rom_offset,
.set_reg_remap = nbif_v6_3_1_set_reg_remap,
};
+
+static int nbif_v6_3_1_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_doorbell_int_cntl;
+
+ bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+ bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
+
+ return 0;
+}
+
+static int nbif_v6_3_1_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to bif ring. since bif ring is not enabled, just leave process callback
+ * as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbif_v6_3_1_ras_err_event_athub_irq_funcs = {
+ .set = nbif_v6_3_1_set_ras_err_event_athub_irq_state,
+ .process = nbif_v6_3_1_process_err_event_athub_irq,
+};
+
+static void nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_int_cntl;
+
+ bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+static int nbif_v6_3_1_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbif_v6_3_1_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt
+ * nbif v6_3_1 uses the same irq source as nbio v7_4
+ */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+struct amdgpu_nbio_ras nbif_v6_3_1_ras = {
+ .handle_ras_err_event_athub_intr_no_bifring =
+ nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_err_event_athub_interrupt =
+ nbif_v6_3_1_init_ras_err_event_athub_interrupt,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
index b7f2e0d88905..9ac4831d39e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
@@ -29,5 +29,6 @@
extern const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs;
extern const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs;
+extern struct amdgpu_nbio_ras nbif_v6_3_1_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 3bad565ded73..47db483c3516 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -1039,10 +1039,10 @@ static bool nv_common_is_idle(void *handle)
return true;
}
-static int nv_common_set_clockgating_state(void *handle,
+static int nv_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1070,7 +1070,7 @@ static int nv_common_set_clockgating_state(void *handle,
return 0;
}
-static int nv_common_set_powergating_state(void *handle,
+static int nv_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* TODO */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index c4b775aaee9f..cc621064610f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -51,6 +51,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_12_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_12_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_14_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_14_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin");
@@ -122,6 +124,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
@@ -177,6 +180,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
retry_cnt =
((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))) ?
PSP_VMBX_POLLING_LIMIT :
10;
@@ -203,6 +207,7 @@ static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
int ret;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
ret = psp_v13_0_wait_for_vmbx_ready(psp);
if (ret)
@@ -288,6 +293,11 @@ static int psp_v13_0_bootloader_load_ras_drv(struct psp_context *psp)
return psp_v13_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
}
+static int psp_v13_0_bootloader_load_spdm_drv(struct psp_context *psp)
+{
+ return psp_v13_0_bootloader_load_component(psp, &psp->spdm_drv, PSP_BL__LOAD_SPDMDRV);
+}
+
static inline void psp_v13_0_init_sos_version(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -798,6 +808,7 @@ static bool psp_v13_0_get_ras_capability(struct psp_context *psp)
return false;
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) &&
(!(adev->flags & AMD_IS_APU))) {
reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127);
@@ -857,6 +868,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.bootloader_load_intf_drv = psp_v13_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv,
.bootloader_load_ras_drv = psp_v13_0_bootloader_load_ras_drv,
+ .bootloader_load_spdm_drv = psp_v13_0_bootloader_load_spdm_drv,
.bootloader_load_sos = psp_v13_0_bootloader_load_sos,
.ring_create = psp_v13_0_ring_create,
.ring_stop = psp_v13_0_ring_stop,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 7948d74f8722..135c5099bfb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -145,9 +145,11 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
@@ -631,7 +633,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1080,14 +1082,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int sdma_v2_4_set_clockgating_state(void *handle,
+static int sdma_v2_4_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
/* XXX handled via the smc on VI */
return 0;
}
-static int sdma_v2_4_set_powergating_state(void *handle,
+static int sdma_v2_4_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 9a3d729545a7..c611328671ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -305,9 +305,11 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
@@ -904,7 +906,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1483,10 +1485,10 @@ static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
}
}
-static int sdma_v3_0_set_clockgating_state(void *handle,
+static int sdma_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1506,7 +1508,7 @@ static int sdma_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v3_0_set_powergating_state(void *handle,
+static int sdma_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index c1f98f6cf20d..b48d9c0b2e1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1565,7 +1565,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1956,7 +1956,7 @@ static int sdma_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (adev->flags & AMD_IS_APU)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false, 0);
if (!amdgpu_sriov_vf(adev))
sdma_v4_0_init_golden_registers(adev);
@@ -1983,7 +1983,7 @@ static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v4_0_enable(adev, false);
if (adev->flags & AMD_IS_APU)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true, 0);
return 0;
}
@@ -2297,10 +2297,10 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
}
}
-static int sdma_v4_0_set_clockgating_state(void *handle,
+static int sdma_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -2312,10 +2312,10 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v4_0_set_powergating_state(void *handle,
+static int sdma_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index a38553f38fdc..48537eba225d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -189,6 +189,7 @@ static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) {
ret = amdgpu_sdma_init_microcode(adev, 0, true);
break;
@@ -667,11 +668,12 @@ static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
*
* @adev: amdgpu_device pointer
* @i: instance to resume
+ * @restore: used to restore wptr when restart
*
* Set up the gfx DMA ring buffers and enable them.
* Returns 0 for success, error for failure.
*/
-static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
+static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
@@ -698,16 +700,24 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
- ring->wptr = 0;
+ if (!restore)
+ ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
/* Initialize the ring buffer's read and write pointers */
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+ if (restore) {
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ } else {
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+ }
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
@@ -755,11 +765,12 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
*
* @adev: amdgpu_device pointer
* @i: instance to resume
+ * @restore: boolean to say restore needed or not
*
* Set up the page DMA ring buffers and enable them.
* Returns 0 for success, error for failure.
*/
-static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
+static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
@@ -775,10 +786,17 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
- WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
- WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
- WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
+ if (restore) {
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ } else {
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
+ }
/* set the wb address whether it's enabled or not */
WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
@@ -792,7 +810,8 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
- ring->wptr = 0;
+ if (!restore)
+ ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
@@ -911,12 +930,13 @@ static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @inst_mask: mask of dma engine instances to be enabled
+ * @restore: boolean to say restore needed or not
*
* Set up the DMA engines and enable them.
* Returns 0 for success, error for failure.
*/
static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
- uint32_t inst_mask)
+ uint32_t inst_mask, bool restore)
{
struct amdgpu_ring *ring;
uint32_t tmp_mask;
@@ -927,7 +947,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
sdma_v4_4_2_inst_enable(adev, false, inst_mask);
} else {
/* bypass sdma microcode loading on Gopher */
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
+ if (!restore && adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
adev->sdma.instance[0].fw) {
r = sdma_v4_4_2_inst_load_microcode(adev, inst_mask);
if (r)
@@ -946,17 +966,19 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
uint32_t temp;
WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
- sdma_v4_4_2_gfx_resume(adev, i);
+ sdma_v4_4_2_gfx_resume(adev, i, restore);
if (adev->sdma.has_page_queue)
- sdma_v4_4_2_page_resume(adev, i);
+ sdma_v4_4_2_page_resume(adev, i, restore);
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
- /* enable context empty interrupt during initialization */
- temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
- WREG32_SDMA(i, regSDMA_CNTL, temp);
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
+ /* enable context empty interrupt during initialization */
+ temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
+ }
if (!amdgpu_sriov_vf(adev)) {
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
/* unhalt engine */
@@ -1110,7 +1132,7 @@ static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1466,6 +1488,7 @@ static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_sdma_sysfs_reset_mask_fini(adev);
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5))
amdgpu_sdma_destroy_inst_ctx(adev, true);
else
@@ -1486,7 +1509,7 @@ static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev))
sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
- r = sdma_v4_4_2_inst_start(adev, inst_mask);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
return r;
}
@@ -1514,7 +1537,7 @@ static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int sdma_v4_4_2_set_clockgating_state(void *handle,
+static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block)
@@ -1522,7 +1545,7 @@ static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_in_reset(adev))
- sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ sdma_v4_4_2_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
return sdma_v4_4_2_hw_fini(ip_block);
}
@@ -1573,6 +1596,42 @@ static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r;
+ u32 inst_mask;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ /* stop queue */
+ inst_mask = 1 << ring->me;
+ sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_4_2_inst_page_stop(adev, inst_mask);
+
+ r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, ring->me));
+ if (r)
+ return r;
+
+ udelay(50);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32_SDMA(ring->me, regSDMA_F32_CNTL), SDMA_F32_CNTL, HALT))
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout) {
+ dev_err(adev->dev, "timed out waiting for SDMA%d unhalt after reset\n",
+ ring->me);
+ return -ETIMEDOUT;
+ }
+
+ return sdma_v4_4_2_inst_start(adev, inst_mask, true);
+}
+
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1821,10 +1880,10 @@ static void sdma_v4_4_2_inst_update_medium_grain_clock_gating(
}
}
-static int sdma_v4_4_2_set_clockgating_state(void *handle,
+static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t inst_mask;
if (amdgpu_sriov_vf(adev))
@@ -1839,7 +1898,7 @@ static int sdma_v4_4_2_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v4_4_2_set_powergating_state(void *handle,
+static int sdma_v4_4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1895,7 +1954,6 @@ static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
if (!adev->sdma.ip_dump)
return;
- amdgpu_gfx_off_ctrl(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
instance_offset = i * reg_count;
for (j = 0; j < reg_count; j++)
@@ -1903,7 +1961,6 @@ static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
RREG32(sdma_v4_4_2_get_reg_offset(adev, i,
sdma_reg_list_4_4_2[j].reg_offset));
}
- amdgpu_gfx_off_ctrl(adev, true);
}
const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
@@ -1955,6 +2012,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = sdma_v4_4_2_reset_queue,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
@@ -2167,7 +2225,7 @@ static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask)
if (!amdgpu_sriov_vf(adev))
sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
- r = sdma_v4_4_2_inst_start(adev, inst_mask);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index fa9b40934957..b764550834a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1194,7 +1194,7 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1853,10 +1853,10 @@ static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
}
-static int sdma_v5_0_set_clockgating_state(void *handle,
+static int sdma_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1877,7 +1877,7 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v5_0_set_powergating_state(void *handle,
+static int sdma_v5_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index ba5160399ab2..b1818e87889a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1050,7 +1050,7 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1812,10 +1812,10 @@ static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
}
-static int sdma_v5_2_set_clockgating_state(void *handle,
+static int sdma_v5_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1841,7 +1841,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v5_2_set_powergating_state(void *handle,
+static int sdma_v5_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index d46128b0ec92..1a023b45f0be 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1063,7 +1063,7 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1601,13 +1601,13 @@ static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int sdma_v6_0_set_clockgating_state(void *handle,
+static int sdma_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int sdma_v6_0_set_powergating_state(void *handle,
+static int sdma_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index d2ce6b6a7ff6..9c17df2cf37b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -490,162 +490,185 @@ static void sdma_v7_0_enable(struct amdgpu_device *adev, bool enable)
}
/**
- * sdma_v7_0_gfx_resume - setup and start the async dma engines
+ * sdma_v7_0_gfx_resume_instance - start/restart a certain sdma engine
*
* @adev: amdgpu_device pointer
+ * @i: instance
+ * @restore: used to restore wptr when restart
*
- * Set up the gfx DMA ring buffers and enable them.
- * Returns 0 for success, error for failure.
+ * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
+ * Return 0 for success.
*/
-static int sdma_v7_0_gfx_resume(struct amdgpu_device *adev)
+static int sdma_v7_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
u32 doorbell;
u32 doorbell_offset;
- u32 tmp;
+ u32 temp;
u64 wptr_gpu_addr;
- int i, r;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
+ int r;
- //if (!amdgpu_sriov_vf(adev))
- // WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ ring = &adev->sdma.instance[i].ring;
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ if (restore) {
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ } else {
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
+ }
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = ring->wptr_gpu_addr;
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev))
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
+ else
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
- /* setup the wptr shadow polling */
- wptr_gpu_addr = ring->wptr_gpu_addr;
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
-
- /* set the wb address whether it's enabled or not */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- if (amdgpu_sriov_vf(adev))
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
- else
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, MCU_WPTR_POLL_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, MCU_WPTR_POLL_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+ if (!restore)
ring->wptr = 0;
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
- }
+ if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ }
- doorbell = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
- doorbell_offset = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
+ doorbell = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
- OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
- }
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
-
- if (i == 0)
- adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
-
- if (amdgpu_sriov_vf(adev))
- sdma_v7_0_ring_set_wptr(ring);
-
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
-
- /* Set up sdma hang watchdog */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
- /* 100ms per unit */
- tmp = REG_SET_FIELD(tmp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
- max(adev->usec_timeout/100000, 1));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), tmp);
-
- /* Set up RESP_MODE to non-copy addresses */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
- tmp = REG_SET_FIELD(tmp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- tmp = REG_SET_FIELD(tmp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), tmp);
-
- /* program default cache read and write policy */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- tmp &= 0xFF0FFF;
- tmp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), tmp);
-
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL));
- tmp = REG_SET_FIELD(tmp, SDMA0_MCU_CNTL, HALT, 0);
- tmp = REG_SET_FIELD(tmp, SDMA0_MCU_CNTL, RESET, 0);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL), tmp);
- }
+ if (ring->use_doorbell) {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ } else {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
+ }
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+ if (i == 0)
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
- ib_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev))
+ sdma_v7_0_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
+
+ /* Set up sdma hang watchdog */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
+ /* 100ms per unit */
+ temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
+ max(adev->usec_timeout/100000, 1));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_MCU_CNTL, HALT, 0);
+ temp = REG_SET_FIELD(temp, SDMA0_MCU_CNTL, RESET, 0);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL), temp);
+ }
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
- /* enable DMA IBs */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
+ /* enable DMA IBs */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
+ ring->sched.ready = true;
- ring->sched.ready = true;
+ if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
+ sdma_v7_0_ctx_switch_enable(adev, true);
+ sdma_v7_0_enable(adev, true);
+ }
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v7_0_ctx_switch_enable(adev, true);
- sdma_v7_0_enable(adev, true);
- }
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ ring->sched.ready = false;
- r = amdgpu_ring_test_helper(ring);
- if (r) {
- ring->sched.ready = false;
- return r;
- }
+ return r;
+}
+
+/**
+ * sdma_v7_0_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v7_0_gfx_resume(struct amdgpu_device *adev)
+{
+ int i, r;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = sdma_v7_0_gfx_resume_instance(adev, i, false);
+ if (r)
+ return r;
}
return 0;
+
}
/**
@@ -806,6 +829,31 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
return false;
}
+static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring)
+ break;
+ }
+
+ if (i == adev->sdma.num_instances) {
+ DRM_ERROR("sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
+ if (r)
+ return r;
+
+ return sdma_v7_0_gfx_resume_instance(adev, i, true);
+}
+
/**
* sdma_v7_0_start - setup and start the async dma engines
*
@@ -1060,7 +1108,7 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1316,6 +1364,13 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ adev->sdma.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ r = amdgpu_sdma_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
/* Allocate memory for SDMA IP Dump buffer */
ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
if (ptr)
@@ -1334,6 +1389,7 @@ static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ amdgpu_sdma_sysfs_reset_mask_fini(adev);
amdgpu_sdma_destroy_inst_ctx(adev, true);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
@@ -1524,13 +1580,13 @@ static int sdma_v7_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int sdma_v7_0_set_clockgating_state(void *handle,
+static int sdma_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int sdma_v7_0_set_powergating_state(void *handle,
+static int sdma_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1636,6 +1692,7 @@ static const struct amdgpu_ring_funcs sdma_v7_0_ring_funcs = {
.emit_reg_write_reg_wait = sdma_v7_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v7_0_ring_init_cond_exec,
.preempt_ib = sdma_v7_0_ring_preempt_ib,
+ .reset = sdma_v7_0_reset_queue,
};
static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 00f63d3fbea7..77ef7da2e4fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -2649,13 +2649,13 @@ static bool si_common_is_idle(void *handle)
return true;
}
-static int si_common_set_clockgating_state(void *handle,
+static int si_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int si_common_set_powergating_state(void *handle,
+static int si_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 47647a6083e8..dbd78d5345a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -286,7 +286,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -629,13 +629,13 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
-static int si_dma_set_clockgating_state(void *handle,
+static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
u32 orig, data, offset;
int i;
bool enable;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
enable = (state == AMD_CG_STATE_GATE);
@@ -672,12 +672,12 @@ static int si_dma_set_clockgating_state(void *handle,
return 0;
}
-static int si_dma_set_powergating_state(void *handle,
+static int si_dma_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
WREG32(DMA_PGFSM_WRITE, 0x00002000);
WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 2ec1ebe4db11..a32b6243c1f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -263,13 +263,13 @@ static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int si_ih_set_clockgating_state(void *handle,
+static int si_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int si_ih_set_powergating_state(void *handle,
+static int si_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index ede072758dab..a59b4c36cad7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -171,6 +171,24 @@ static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {
.codec_array = NULL,
};
+static const struct amdgpu_video_codecs vcn_5_0_1_video_codecs_encode_vcn0 = {
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+static const struct amdgpu_video_codec_info vcn_5_0_1_video_codecs_decode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs vcn_5_0_1_video_codecs_decode_vcn0 = {
+ .codec_count = ARRAY_SIZE(vcn_5_0_1_video_codecs_decode_array_vcn0),
+ .codec_array = vcn_5_0_1_video_codecs_decode_array_vcn0,
+};
+
static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@@ -209,6 +227,12 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
else
*codecs = &vcn_4_0_3_video_codecs_decode;
return 0;
+ case IP_VERSION(5, 0, 1):
+ if (encode)
+ *codecs = &vcn_5_0_1_video_codecs_encode_vcn0;
+ else
+ *codecs = &vcn_5_0_1_video_codecs_decode_vcn0;
+ return 0;
default:
return -EINVAL;
}
@@ -327,6 +351,7 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
return 10000;
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) ||
@@ -556,6 +581,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
break;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
/* Use gpu_recovery param to target a reset method.
* Enable triggering of GPU reset only if specified
* by module parameter.
@@ -1177,6 +1203,7 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->asic_funcs = &aqua_vanjaram_asic_funcs;
adev->cg_flags =
AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
@@ -1385,10 +1412,10 @@ static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
}
-static int soc15_common_set_clockgating_state(void *handle,
+static int soc15_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1453,6 +1480,7 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) &&
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) &&
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 12)) &&
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14))) {
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
@@ -1473,7 +1501,7 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
adev->df.funcs->get_clockgating_state(adev, flags);
}
-static int soc15_common_set_powergating_state(void *handle,
+static int soc15_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* todo */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index d6999835918f..62ad67d0b598 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -928,10 +928,10 @@ static bool soc21_common_is_idle(void *handle)
return true;
}
-static int soc21_common_set_clockgating_state(void *handle,
+static int soc21_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(4, 3, 0):
@@ -954,10 +954,10 @@ static int soc21_common_set_clockgating_state(void *handle,
return 0;
}
-static int soc21_common_set_powergating_state(void *handle,
+static int soc21_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
case IP_VERSION(6, 0, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index be96de92b2f5..6b8e078ee7c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -444,8 +444,18 @@ static int soc24_common_late_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
+ } else {
+ if (adev->nbio.ras &&
+ adev->nbio.ras_err_event_athub_irq.funcs)
+ /* don't need to fail gpu late init
+ * if enabling athub_err_event interrupt failed
+ * nbif v6_3_1 only support fatal error hanlding
+ * just enable the interrupt directly
+ */
+ amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
/* Enable selfring doorbell aperture late because doorbell BAR
* aperture will change if resize BAR successfully in gmc sw_init.
@@ -501,8 +511,13 @@ static int soc24_common_hw_fini(struct amdgpu_ip_block *ip_block)
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_put_irq(adev);
+ } else {
+ if (adev->nbio.ras &&
+ adev->nbio.ras_err_event_athub_irq.funcs)
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
return 0;
}
@@ -522,10 +537,10 @@ static bool soc24_common_is_idle(void *handle)
return true;
}
-static int soc24_common_set_clockgating_state(void *handle,
+static int soc24_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(6, 3, 1):
@@ -542,10 +557,10 @@ static int soc24_common_set_clockgating_state(void *handle,
return 0;
}
-static int soc24_common_set_powergating_state(void *handle,
+static int soc24_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
case IP_VERSION(7, 0, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 21b71a427b1f..64891f099366 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -30,6 +30,9 @@
#define RSP_ID_MASK (1U << 31)
#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+/* invalid node instance value */
+#define TA_RAS_INV_NODE 0xffff
+
/* RAS related enumerations */
/**********************************************************/
enum ras_command {
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
index 00d8bdb8254f..9ec2e03d41c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -31,10 +31,12 @@
* Secure Display Command ID
*/
enum ta_securedisplay_command {
- /* Query whether TA is responding used only for validation purpose */
+ /* Query whether TA is responding. It is used only for validation purpose */
TA_SECUREDISPLAY_COMMAND__QUERY_TA = 1,
/* Send region of Interest and CRC value to I2C */
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC = 2,
+ /* V2 to send multiple regions of Interest and CRC value to I2C */
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2 = 3,
/* Maximum Command ID */
TA_SECUREDISPLAY_COMMAND__MAX_ID = 0x7FFFFFFF,
};
@@ -83,6 +85,8 @@ enum ta_securedisplay_ta_query_cmd_ret {
enum ta_securedisplay_buffer_size {
/* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */
TA_SECUREDISPLAY_I2C_BUFFER_SIZE = 15,
+ /* 16 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) + 1 byte(roi_idx) */
+ TA_SECUREDISPLAY_V2_I2C_BUFFER_SIZE = 16,
};
/** Input/output structures for Secure Display commands */
@@ -95,7 +99,15 @@ enum ta_securedisplay_buffer_size {
* Physical ID to determine which DIO scratch register should be used to get ROI
*/
struct ta_securedisplay_send_roi_crc_input {
- uint32_t phy_id; /* Physical ID */
+ /* Physical ID */
+ uint32_t phy_id;
+};
+
+struct ta_securedisplay_send_roi_crc_v2_input {
+ /* Physical ID */
+ uint32_t phy_id;
+ /* Region of interest index */
+ uint8_t roi_idx;
};
/** @union ta_securedisplay_cmd_input
@@ -104,6 +116,8 @@ struct ta_securedisplay_send_roi_crc_input {
union ta_securedisplay_cmd_input {
/* send ROI and CRC input buffer format */
struct ta_securedisplay_send_roi_crc_input send_roi_crc;
+ /* send ROI and CRC input buffer format, v2 adds a ROI index */
+ struct ta_securedisplay_send_roi_crc_v2_input send_roi_crc_v2;
uint32_t reserved[4];
};
@@ -128,6 +142,10 @@ struct ta_securedisplay_send_roi_crc_output {
uint8_t reserved;
};
+struct ta_securedisplay_send_roi_crc_v2_output {
+ uint8_t i2c_buf[TA_SECUREDISPLAY_V2_I2C_BUFFER_SIZE]; /* I2C buffer */
+};
+
/** @union ta_securedisplay_cmd_output
* Output buffer
*/
@@ -136,6 +154,8 @@ union ta_securedisplay_cmd_output {
struct ta_securedisplay_query_ta_output query_ta;
/* Send ROI CRC output buffer format used only for validation purpose */
struct ta_securedisplay_send_roi_crc_output send_roi_crc;
+ /* Send ROI CRC output buffer format used only for validation purpose */
+ struct ta_securedisplay_send_roi_crc_v2_output send_roi_crc_v2;
uint32_t reserved[4];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 5a04a6770138..0968e551f7b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -448,13 +448,13 @@ static int tonga_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int tonga_ih_set_clockgating_state(void *handle,
+static int tonga_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int tonga_ih_set_powergating_state(void *handle,
+static int tonga_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 1a8ea834efa6..a7b9c358a2d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -173,156 +173,96 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
-static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
+static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- struct ta_ras_query_address_input *addr_in)
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out,
+ bool dump_addr)
{
- uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column, err_addr;
- struct ta_ras_query_address_output addr_out;
+ uint32_t col, col_lower, row, row_lower, bank;
+ uint32_t channel_index = 0, umc_inst = 0;
+ uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS];
+ uint64_t soc_pa, column, err_addr;
+ struct ta_ras_query_address_output addr_out_tmp;
+ struct ta_ras_query_address_output *paddr_out;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ int ret = 0;
+
+ if (!addr_out)
+ paddr_out = &addr_out_tmp;
+ else
+ paddr_out = addr_out;
- err_addr = addr_in->ma.err_addr;
- addr_in->addr_type = TA_RAS_MCA_TO_PA;
- if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
- dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
- err_addr);
+ err_addr = bank = 0;
+ if (addr_in) {
+ err_addr = addr_in->ma.err_addr;
+ addr_in->addr_type = TA_RAS_MCA_TO_PA;
+ ret = psp_ras_query_address(&adev->psp, addr_in, paddr_out);
+ if (ret) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
- return;
- }
+ goto out;
+ }
- soc_pa = addr_out.pa.pa;
- bank = addr_out.pa.bank;
- channel_index = addr_out.pa.channel_idx;
-
- col = (err_addr >> 1) & 0x1fULL;
- row = (err_addr >> 10) & 0x3fffULL;
- row_xor = row ^ (0x1ULL << 13);
- /* clear [C3 C2] in soc physical address */
- soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
- /* clear [C4] in soc physical address */
- soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
-
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
- retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
- /* include column bit 0 and 1 */
- col &= 0x3;
- col |= (column << 2);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, addr_in->ma.umc_inst);
-
- /* shift R13 bit */
- retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row_xor, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, addr_in->ma.umc_inst);
+ bank = paddr_out->pa.bank;
+ /* no need to care about umc inst if addr_in is NULL */
+ umc_inst = addr_in->ma.umc_inst;
}
-}
-static void umc_v12_0_dump_addr_info(struct amdgpu_device *adev,
- struct ta_ras_query_address_output *addr_out,
- uint64_t err_addr)
-{
- uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column;
-
- soc_pa = addr_out->pa.pa;
- bank = addr_out->pa.bank;
- channel_index = addr_out->pa.channel_idx;
-
- col = (err_addr >> 1) & 0x1fULL;
- row = (err_addr >> 10) & 0x3fffULL;
- row_xor = row ^ (0x1ULL << 13);
- /* clear [C3 C2] in soc physical address */
- soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
- /* clear [C4] in soc physical address */
- soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
-
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
- retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
- /* include column bit 0 and 1 */
- col &= 0x3;
- col |= (column << 2);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row, col, bank, channel_index);
-
- /* shift R13 bit */
- retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row_xor, col, bank, channel_index);
- }
-}
+ loop_bits[0] = UMC_V12_0_PA_C2_BIT;
+ loop_bits[1] = UMC_V12_0_PA_C3_BIT;
+ loop_bits[2] = UMC_V12_0_PA_C4_BIT;
+ loop_bits[3] = UMC_V12_0_PA_R13_BIT;
-static int umc_v12_0_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
- uint64_t pa_addr, uint64_t *pfns, int len)
-{
- uint64_t soc_pa, retired_page, column;
- uint32_t pos = 0;
-
- soc_pa = pa_addr;
- /* clear [C3 C2] in soc physical address */
- soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
- /* clear [C4] in soc physical address */
- soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
-
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
- retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
-
- if (pos >= len)
- return 0;
- pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-
- /* shift R13 bit */
- retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
-
- if (pos >= len)
- return 0;
- pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ /* other nps modes are taken as nps1 */
+ if (nps == AMDGPU_NPS4_PARTITION_MODE) {
+ loop_bits[0] = UMC_V12_0_PA_CH4_BIT;
+ loop_bits[1] = UMC_V12_0_PA_CH5_BIT;
+ loop_bits[2] = UMC_V12_0_PA_B0_BIT;
+ loop_bits[3] = UMC_V12_0_PA_R11_BIT;
}
- return pos;
-}
-
-static int umc_v12_0_convert_mca_to_addr(struct amdgpu_device *adev,
- uint64_t err_addr, uint32_t ch, uint32_t umc,
- uint32_t node, uint32_t socket,
- uint64_t *addr, bool dump_addr)
-{
- struct ta_ras_query_address_input addr_in;
- struct ta_ras_query_address_output addr_out;
-
- memset(&addr_in, 0, sizeof(addr_in));
- addr_in.ma.err_addr = err_addr;
- addr_in.ma.ch_inst = ch;
- addr_in.ma.umc_inst = umc;
- addr_in.ma.node_inst = node;
- addr_in.ma.socket_id = socket;
- addr_in.addr_type = TA_RAS_MCA_TO_PA;
- if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) {
- dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
- err_addr);
- return -EINVAL;
+ soc_pa = paddr_out->pa.pa;
+ channel_index = paddr_out->pa.channel_idx;
+ /* clear loop bits in soc physical address */
+ for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
+ soc_pa &= ~BIT_ULL(loop_bits[i]);
+
+ paddr_out->pa.pa = soc_pa;
+ /* get column bit 0 and 1 in mca address */
+ col_lower = (err_addr >> 1) & 0x3ULL;
+ /* MA_R13_BIT will be handled later */
+ row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
+
+ if (!err_data && !dump_addr)
+ goto out;
+
+ /* loop for all possibilities of retired bits */
+ for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) {
+ soc_pa = paddr_out->pa.pa;
+ for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]);
+
+ col = ((column & 0x7) << 2) | col_lower;
+ /* add row bit 13 */
+ row = ((column >> 3) << 13) | row_lower;
+
+ if (dump_addr)
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ soc_pa, row, col, bank, channel_index);
+
+ if (err_data)
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ soc_pa, channel_index, umc_inst);
}
- if (dump_addr)
- umc_v12_0_dump_addr_info(adev, &addr_out, err_addr);
-
- *addr = addr_out.pa.pa;
-
- return 0;
+out:
+ return ret;
}
static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
@@ -374,7 +314,7 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
addr_in.ma.umc_inst = umc_inst;
addr_in.ma.node_inst = node_inst;
- umc_v12_0_convert_error_address(adev, err_data, &addr_in);
+ umc_v12_0_convert_error_address(adev, err_data, &addr_in, NULL, true);
}
/* clear umc status */
@@ -526,6 +466,9 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
+ struct ta_ras_query_address_output addr_out;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ uint32_t shift_bit = UMC_V12_0_PA_C4_BIT;
int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
@@ -552,10 +495,10 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
MCA_IPID_2_UMC_CH(ipid),
err_addr);
- ret = umc_v12_0_convert_mca_to_addr(adev,
+ ret = amdgpu_umc_mca_to_addr(adev,
err_addr, MCA_IPID_2_UMC_CH(ipid),
MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid),
- MCA_IPID_2_SOCKET_ID(ipid), &pa_addr, true);
+ MCA_IPID_2_SOCKET_ID(ipid), &addr_out, true);
if (ret)
return ret;
@@ -563,14 +506,21 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
if (!ecc_err)
return -ENOMEM;
+ pa_addr = addr_out.pa.pa;
ecc_err->status = status;
ecc_err->ipid = ipid;
ecc_err->addr = addr;
- ecc_err->pa_pfn = UMC_V12_ADDR_MASK_BAD_COLS(pa_addr) >> AMDGPU_GPU_PAGE_SHIFT;
+ ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
+ ecc_err->channel_idx = addr_out.pa.channel_idx;
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ shift_bit = UMC_V12_0_PA_B0_BIT;
/* If converted pa_pfn is 0, use pa C4 pfn. */
if (!ecc_err->pa_pfn)
- ecc_err->pa_pfn = BIT_ULL(UMC_V12_0_PA_C4_BIT) >> AMDGPU_GPU_PAGE_SHIFT;
+ ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
if (ret) {
@@ -586,7 +536,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
con->umc_ecc_log.de_queried_count++;
memset(page_pfn, 0, sizeof(page_pfn));
- count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
pa_addr,
page_pfn, ARRAY_SIZE(page_pfn));
if (count <= 0) {
@@ -629,7 +579,7 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
return -EINVAL;
memset(page_pfn, 0, sizeof(page_pfn));
- count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT,
page_pfn, ARRAY_SIZE(page_pfn));
@@ -637,7 +587,7 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
ret = amdgpu_umc_fill_error_record(err_data,
ecc_err->addr,
page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
- MCA_IPID_2_UMC_CH(ecc_err->ipid),
+ ecc_err->channel_idx,
MCA_IPID_2_UMC_INST(ecc_err->ipid));
if (ret)
break;
@@ -676,6 +626,31 @@ static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
mutex_unlock(&con->umc_ecc_log.lock);
}
+static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev,
+ uint64_t mca_addr, uint64_t retired_page)
+{
+ uint32_t die = 0;
+
+ /* we only calculate die id for nps1 mode right now */
+ die += ((((retired_page >> 12) & 0x1ULL)^
+ ((retired_page >> 20) & 0x1ULL) ^
+ ((retired_page >> 27) & 0x1ULL) ^
+ ((retired_page >> 34) & 0x1ULL) ^
+ ((retired_page >> 41) & 0x1ULL)) << 0);
+
+ /* the original PA_C4 and PA_R13 may be cleared in retired_page, so
+ * get them from mca_addr.
+ */
+ die += ((((retired_page >> 13) & 0x1ULL) ^
+ ((mca_addr >> 5) & 0x1ULL) ^
+ ((retired_page >> 28) & 0x1ULL) ^
+ ((mca_addr >> 23) & 0x1ULL) ^
+ ((retired_page >> 42) & 0x1ULL)) << 1);
+ die &= 3;
+
+ return die;
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -686,5 +661,7 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
.check_ecc_err_status = umc_v12_0_check_ecc_err_status,
.update_ecc_status = umc_v12_0_update_ecc_status,
+ .convert_ras_err_addr = umc_v12_0_convert_error_address,
+ .get_die_id_from_pa = umc_v12_0_get_die_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index be5598d76c1d..9298018d938f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,12 +55,24 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
+/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */
+#define UMC_V12_0_RETIRE_LOOP_BITS 4
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
+#define UMC_V12_0_PA_C3_BIT 16
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
+#define UMC_V12_0_PA_R0_BIT 22
+#define UMC_V12_0_PA_R11_BIT 33
#define UMC_V12_0_PA_R13_BIT 35
+/* channel bit in SOC physical address */
+#define UMC_V12_0_PA_CH4_BIT 12
+#define UMC_V12_0_PA_CH5_BIT 13
+/* bank bit in SOC physical address */
+#define UMC_V12_0_PA_B0_BIT 19
+/* row bits in MCA address */
+#define UMC_V12_0_MA_R0_BIT 10
#define MCA_UMC_HWID_V12_0 0x96
#define MCA_UMC_MCATYPE_V12_0 0x0
@@ -81,11 +93,6 @@
(((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
-#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
- ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
- (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
- (0x1ULL << UMC_V12_0_PA_R13_BIT)))
-
bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_14.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.c
new file mode 100644
index 000000000000..eaca10a3c4a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v8_14.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
+#include "amdgpu.h"
+#include "umc/umc_8_14_0_offset.h"
+#include "umc/umc_8_14_0_sh_mask.h"
+
+static inline uint32_t get_umc_v8_14_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs * ch_inst + UMC_V8_14_INST_DIST * umc_inst;
+}
+
+static int umc_v8_14_clear_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t umc_reg_offset =
+ get_umc_v8_14_reg_offset(adev, umc_inst, ch_inst);
+
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ /* clear error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V8_14_CE_CNT_INIT);
+
+ return 0;
+}
+
+static void umc_v8_14_clear_error_count(struct amdgpu_device *adev)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_14_clear_error_count_per_channel, NULL);
+}
+
+static void umc_v8_14_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+
+ /* UMC 8_14 registers */
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_GeccErrCnt, GeccErrCnt) -
+ UMC_V8_14_CE_CNT_INIT);
+}
+
+static void umc_v8_14_query_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+ /* UMC 8_14 registers */
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_GeccErrCnt, GeccUnCorrErrCnt) -
+ UMC_V8_14_CE_CNT_INIT);
+}
+
+static int umc_v8_14_query_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ uint32_t umc_reg_offset =
+ get_umc_v8_14_reg_offset(adev, umc_inst, ch_inst);
+
+ umc_v8_14_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v8_14_query_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+
+ return 0;
+}
+
+static void umc_v8_14_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_14_query_error_count_per_channel, ras_error_status);
+
+ umc_v8_14_clear_error_count(adev);
+}
+
+static int umc_v8_14_err_cnt_init_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt_addr;
+ uint32_t umc_reg_offset =
+ get_umc_v8_14_reg_offset(adev, umc_inst, ch_inst);
+
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+
+ /* set ce error interrupt type to APIC based interrupt */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_GeccErrCntSel,
+ GeccErrInt, 0x1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ /* set error count to initial value */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_14_CE_CNT_INIT);
+
+ return 0;
+}
+
+static void umc_v8_14_err_cnt_init(struct amdgpu_device *adev)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_14_err_cnt_init_per_channel, NULL);
+}
+
+const struct amdgpu_ras_block_hw_ops umc_v8_14_ras_hw_ops = {
+ .query_ras_error_count = umc_v8_14_query_ras_error_count,
+};
+
+struct amdgpu_umc_ras umc_v8_14_ras = {
+ .ras_block = {
+ .hw_ops = &umc_v8_14_ras_hw_ops,
+ },
+ .err_cnt_init = umc_v8_14_err_cnt_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_14.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.h
new file mode 100644
index 000000000000..20a258f0017a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V8_14_H__
+#define __UMC_V8_14_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+/* number of umc channel instance with memory map register access */
+#define UMC_V8_14_CHANNEL_INSTANCE_NUM 2
+/* number of umc instance with memory map register access */
+#define UMC_V8_14_UMC_INSTANCE_NUM(adev) ((adev)->umc.node_inst_num)
+
+/* Total channel instances for all available umc nodes */
+#define UMC_V8_14_TOTAL_CHANNEL_NUM(adev) \
+ (UMC_V8_14_CHANNEL_INSTANCE_NUM * (adev)->gmc.num_umc)
+
+/* UMC register per channel offset */
+#define UMC_V8_14_PER_CHANNEL_OFFSET 0x400
+
+#define UMC_V8_14_INST_DIST 0x40000
+
+/* EccErrCnt max value */
+#define UMC_V8_14_CE_CNT_MAX 0xffff
+/* umc ce interrupt threshold */
+#define UMC_V8_14_CE_INT_THRESHOLD 0xffff
+/* umc ce count initial value */
+#define UMC_V8_14_CE_CNT_INIT (UMC_V8_14_CE_CNT_MAX - UMC_V8_14_CE_INT_THRESHOLD)
+
+extern struct amdgpu_umc_ras umc_v8_14_ras;
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index bdbca25d80c4..5830e799c0a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -790,13 +790,13 @@ static int uvd_v3_1_soft_reset(struct amdgpu_ip_block *ip_block)
return uvd_v3_1_start(adev);
}
-static int uvd_v3_1_set_clockgating_state(void *handle,
+static int uvd_v3_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int uvd_v3_1_set_powergating_state(void *handle,
+static int uvd_v3_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index a836dc9cfcad..f93079e09215 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -44,7 +44,7 @@ static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v4_2_start(struct amdgpu_device *adev);
static void uvd_v4_2_stop(struct amdgpu_device *adev);
-static int uvd_v4_2_set_clockgating_state(void *handle,
+static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
bool sw_mode);
@@ -708,13 +708,13 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int uvd_v4_2_set_clockgating_state(void *handle,
+static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int uvd_v4_2_set_powergating_state(void *handle,
+static int uvd_v4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -724,7 +724,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index ab55fae3569e..050a0f309390 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -42,7 +42,7 @@ static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v5_0_start(struct amdgpu_device *adev);
static void uvd_v5_0_stop(struct amdgpu_device *adev);
-static int uvd_v5_0_set_clockgating_state(void *handle,
+static int uvd_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
@@ -155,7 +155,7 @@ static int uvd_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
int r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v5_0_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
r = amdgpu_ring_test_helper(ring);
@@ -790,16 +790,11 @@ static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
}
}
-static int uvd_v5_0_set_clockgating_state(void *handle,
+static int uvd_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
- struct amdgpu_ip_block *ip_block;
-
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
- if (!ip_block)
- return -EINVAL;
if (enable) {
/* wait for STATUS to clear */
@@ -817,7 +812,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
return 0;
}
-static int uvd_v5_0_set_powergating_state(void *handle,
+static int uvd_v5_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -827,7 +822,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
if (state == AMD_PG_STATE_GATE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 39f8c3d3a135..d9d036ee51fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -48,7 +48,7 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
-static int uvd_v6_0_set_clockgating_state(void *handle,
+static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
@@ -467,7 +467,7 @@ static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
int i, r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v6_0_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
r = amdgpu_ring_test_helper(ring);
@@ -1450,17 +1450,12 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
}
}
-static int uvd_v6_0_set_clockgating_state(void *handle,
+static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ip_block *ip_block;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
- if (!ip_block)
- return -EINVAL;
-
if (enable) {
/* wait for STATUS to clear */
if (uvd_v6_0_wait_for_idle(ip_block))
@@ -1476,7 +1471,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
return 0;
}
-static int uvd_v6_0_set_powergating_state(void *handle,
+static int uvd_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -1486,7 +1481,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 3c8ab8698af8..9d237b5937fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1511,7 +1511,7 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int uvd_v7_0_set_clockgating_state(void *handle,
+static int uvd_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
/* needed for driver unload*/
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c1ed91b39415..c633b7ff2943 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -578,13 +578,13 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int vce_v2_0_set_clockgating_state(void *handle,
+static int vce_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
bool sw_cg = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE) {
gate = true;
@@ -596,7 +596,7 @@ static int vce_v2_0_set_clockgating_state(void *handle,
return 0;
}
-static int vce_v2_0_set_powergating_state(void *handle,
+static int vce_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
@@ -606,7 +606,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
return vce_v2_0_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6bb318a06f19..f8bddcd19b68 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -65,7 +65,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
-static int vce_v3_0_set_clockgating_state(void *handle,
+static int vce_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
/**
* vce_v3_0_ring_get_rptr - get read pointer
@@ -497,7 +497,7 @@ static int vce_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
return r;
vce_v3_0_stop(adev);
- return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+ return vce_v3_0_set_clockgating_state(ip_block, AMD_CG_STATE_GATE);
}
static int vce_v3_0_suspend(struct amdgpu_ip_block *ip_block)
@@ -760,10 +760,10 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int vce_v3_0_set_clockgating_state(void *handle,
+static int vce_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
int i;
@@ -801,7 +801,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int vce_v3_0_set_powergating_state(void *handle,
+static int vce_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
@@ -811,7 +811,7 @@ static int vce_v3_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
if (state == AMD_PG_STATE_GATE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 79ee555768a5..335bda64ff5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -684,14 +684,14 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
}
-static int vce_v4_0_set_clockgating_state(void *handle,
+static int vce_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
/* needed for driver unload*/
return 0;
}
-static int vce_v4_0_set_powergating_state(void *handle,
+static int vce_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
@@ -701,7 +701,7 @@ static int vce_v4_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
return vce_v4_0_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 10e99c926fb8..5ea96c983517 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -85,7 +85,8 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
+static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -281,7 +282,7 @@ static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
- vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v1_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
return 0;
@@ -303,7 +304,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
if (idle_work_unexecuted) {
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ amdgpu_dpm_enable_vcn(adev, false, 0);
}
r = vcn_v1_0_hw_fini(ip_block);
@@ -344,7 +345,7 @@ static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
*/
static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -411,7 +412,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1394,15 +1395,15 @@ static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int vcn_v1_0_set_clockgating_state(void *handle,
+static int vcn_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v1_0_is_idle(handle))
+ if (!vcn_v1_0_is_idle(adev))
return -EBUSY;
vcn_v1_0_enable_clock_gating(adev);
} else {
@@ -1799,7 +1800,7 @@ static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun
}
}
-static int vcn_v1_0_set_powergating_state(void *handle,
+static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
@@ -1810,7 +1811,7 @@ static int vcn_v1_0_set_powergating_state(void *handle,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == adev->vcn.cur_state)
return 0;
@@ -1856,7 +1857,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ amdgpu_dpm_enable_vcn(adev, false, 0);
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
@@ -1886,7 +1887,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ amdgpu_dpm_enable_vcn(adev, true, 0);
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index e0322cbca3ec..e42cfc731ad8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,7 +92,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_0_set_powergating_state(void *handle,
+static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -318,7 +318,7 @@ static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
- vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v2_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -372,7 +372,7 @@ static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
*/
static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
if (amdgpu_sriov_vf(adev))
@@ -428,7 +428,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -978,7 +978,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
int i, j, r;
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ amdgpu_dpm_enable_vcn(adev, true, 0);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
@@ -1235,7 +1235,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
power_off:
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ amdgpu_dpm_enable_vcn(adev, false, 0);
return 0;
}
@@ -1335,10 +1335,10 @@ static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int vcn_v2_0_set_clockgating_state(void *handle,
+static int vcn_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -1346,7 +1346,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v2_0_is_idle(handle))
+ if (!vcn_v2_0_is_idle(adev))
return -EBUSY;
vcn_v2_0_enable_clock_gating(adev);
} else {
@@ -1796,7 +1796,7 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
}
-static int vcn_v2_0_set_powergating_state(void *handle,
+static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
@@ -1807,7 +1807,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
@@ -1920,7 +1920,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
init_table += header->vcn_table_offset;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 6aa08281d094..b518202955ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -95,7 +95,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_5_set_powergating_state(void *handle,
+static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -399,7 +399,7 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
- vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
@@ -465,7 +465,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -514,7 +514,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1012,8 +1012,10 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1285,7 +1287,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
@@ -1485,8 +1487,10 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1778,6 +1782,7 @@ static bool vcn_v2_5_is_idle(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
@@ -1801,17 +1806,17 @@ static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int vcn_v2_5_set_clockgating_state(void *handle,
+static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
return 0;
if (enable) {
- if (!vcn_v2_5_is_idle(handle))
+ if (!vcn_v2_5_is_idle(adev))
return -EBUSY;
vcn_v2_5_enable_clock_gating(adev);
} else {
@@ -1821,10 +1826,10 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
return 0;
}
-static int vcn_v2_5_set_powergating_state(void *handle,
+static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 6732ad7f16f5..63ddd4cca910 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -105,7 +105,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v3_0_set_powergating_state(void *handle,
+static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -430,9 +430,9 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
- vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
+ vcn_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
}
@@ -490,7 +490,7 @@ static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
*/
static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -540,7 +540,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1141,8 +1141,10 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1373,7 +1375,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
@@ -1632,8 +1634,10 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev)
vcn_v3_0_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -2132,10 +2136,10 @@ static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int vcn_v3_0_set_clockgating_state(void *handle,
+static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -2155,10 +2159,10 @@ static int vcn_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int vcn_v3_0_set_powergating_state(void *handle,
+static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
/* for SRIOV, guest should not control VCN Power-gating
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index fcc8511e91ee..00551d6f0370 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -96,7 +96,7 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_set_powergating_state(void *handle,
+static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -366,9 +366,9 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
continue;
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, regUVD_STATUS))) {
+ vcn_v4_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
@@ -431,7 +431,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -491,7 +491,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
{
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -1097,8 +1097,10 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
uint32_t tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1341,7 +1343,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
@@ -1623,8 +1625,10 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
vcn_v4_0_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -2007,14 +2011,15 @@ static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+static int vcn_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -2037,14 +2042,15 @@ static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_sta
/**
* vcn_v4_0_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state)
+static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
/* for SRIOV, guest should not control VCN Power-gating
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 3f69b9b2bcd0..ecdc027f8220 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -87,7 +87,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_3_set_powergating_state(void *handle,
+static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -349,7 +349,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
- vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -407,7 +407,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
vcn_inst = GET_INST(VCN, inst_idx);
@@ -482,7 +482,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -957,6 +957,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
vcn_inst = GET_INST(VCN, i);
+ vcn_v4_0_3_fw_shared_init(adev, vcn_inst);
+
memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
header.version = MMSCH_VERSION;
header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
@@ -969,7 +971,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
@@ -1121,8 +1123,10 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev)
int i, j, k, r, vcn_inst;
uint32_t tmp;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -1395,8 +1399,10 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
vcn_v4_0_3_enable_clock_gating(adev, i);
}
Done:
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1616,15 +1622,15 @@ static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
/* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v4_0_3_set_clockgating_state(void *handle,
+static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -1644,15 +1650,15 @@ static int vcn_v4_0_3_set_clockgating_state(void *handle,
/**
* vcn_v4_0_3_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v4_0_3_set_powergating_state(void *handle,
+static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
/* for SRIOV, guest should not control VCN Power-gating
@@ -1911,9 +1917,94 @@ static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
.reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
};
+static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int vcn_v4_0_3_err_codes[] = {
+ 14, 15, /* VCN */
+};
+
+static bool vcn_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ vcn_v4_0_3_err_codes,
+ ARRAY_SIZE(vcn_v4_0_3_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops vcn_v4_0_3_aca_bank_ops = {
+ .aca_bank_parser = vcn_v4_0_3_aca_bank_parser,
+ .aca_bank_is_valid = vcn_v4_0_3_aca_bank_is_valid,
+};
+
+static const struct aca_info vcn_v4_0_3_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &vcn_v4_0_3_aca_bank_ops,
+};
+
+static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
+ &vcn_v4_0_3_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
static struct amdgpu_vcn_ras vcn_v4_0_3_ras = {
.ras_block = {
.hw_ops = &vcn_v4_0_3_ras_hw_ops,
+ .ras_late_init = vcn_v4_0_3_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 71961fb3f7ff..23d3c16c9d9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -95,7 +95,7 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_5_set_powergating_state(void *handle,
+static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -309,7 +309,7 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v4_0_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
}
@@ -370,7 +370,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -431,7 +431,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -1000,8 +1000,10 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev)
uint32_t tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1277,8 +1279,10 @@ static int vcn_v4_0_5_stop(struct amdgpu_device *adev)
vcn_v4_0_5_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1492,14 +1496,15 @@ static int vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_5_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v4_0_5_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
@@ -1522,14 +1527,15 @@ static int vcn_v4_0_5_set_clockgating_state(void *handle, enum amd_clockgating_s
/**
* vcn_v4_0_5_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_state state)
+static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->vcn.cur_state)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index bd3d2bbdc16b..b6d78381ebfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -32,7 +32,7 @@
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
-#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
#include "vcn_v5_0_0.h"
#include <drm/drm_drv.h>
@@ -78,7 +78,7 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v5_0_0_set_powergating_state(void *handle,
+static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -105,6 +105,21 @@ static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
return amdgpu_vcn_early_init(adev);
}
+void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
+{
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+ uint32_t *ptr;
+
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+}
+
/**
* vcn_v5_0_0_sw_init - sw init for VCN block
*
@@ -117,8 +132,6 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -140,13 +153,13 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
- VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
+ VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
if (r)
return r;
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
- VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
+ VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
if (r)
return r;
@@ -177,14 +190,7 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ vcn_v5_0_0_alloc_ip_dump(adev);
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -283,7 +289,7 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
}
@@ -344,7 +350,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -405,7 +411,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -771,8 +777,10 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev)
uint32_t tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1018,8 +1026,10 @@ static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
vcn_v5_0_0_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1229,14 +1239,15 @@ static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
/**
* vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
@@ -1259,14 +1270,15 @@ static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_s
/**
* vcn_v5_0_0_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v5_0_0_set_powergating_state(void *handle, enum amd_powergating_state state)
+static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->vcn.cur_state)
@@ -1312,10 +1324,10 @@ static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgp
DRM_DEBUG("IH: VCN TRAP\n");
switch (entry->src_id) {
- case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break;
- case VCN_4_0__SRCID_UVD_POISON:
+ case VCN_5_0__SRCID_UVD_POISON:
amdgpu_vcn_process_poison_irq(adev, source, entry);
break;
default:
@@ -1351,7 +1363,8 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
+ struct drm_printer *p)
{
struct amdgpu_device *adev = ip_block->adev;
int i, j;
@@ -1383,7 +1396,7 @@ static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm
}
}
-static void vcn_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
+void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
int i, j;
@@ -1424,8 +1437,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_v5_0_0_set_powergating_state,
- .dump_ip_state = vcn_v5_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_print_ip_state,
+ .dump_ip_state = vcn_v5_0_0_dump_ip_state,
+ .print_ip_state = vcn_v5_0_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
index 51bbccd4360f..b8927652bc50 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -32,6 +32,11 @@
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
+void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev);
+void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
+ struct drm_printer *p);
+void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block);
+
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
new file mode 100644
index 000000000000..8b463c977d08
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -0,0 +1,1118 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "soc15_hw_ip.h"
+#include "vcn_v2_0.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+#include "vcn_v5_0_0.h"
+#include "vcn_v5_0_1.h"
+
+#include <drm/drm_drv.h>
+
+static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
+static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
+
+/**
+ * vcn_v5_0_1_early_init - set function pointers and load microcode
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Set ring and irq function pointers
+ * Load microcode from filesystem
+ */
+static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ /* re-use enc ring as unified ring */
+ adev->vcn.num_enc_rings = 1;
+
+ vcn_v5_0_1_set_unified_ring_funcs(adev);
+ vcn_v5_0_1_set_irq_funcs(adev);
+
+ return amdgpu_vcn_early_init(adev);
+}
+
+/**
+ * vcn_v5_0_1_sw_init - sw init for VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Load firmware and sw initialization
+ */
+static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, r, vcn_inst;
+
+ r = amdgpu_vcn_sw_init(adev);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev);
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ /* VCN UNIFIED TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+
+ vcn_inst = GET_INST(VCN, i);
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
+
+ ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
+ sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
+
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score);
+ if (r)
+ return r;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = true;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ }
+
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ vcn_v5_0_0_alloc_ip_dump(adev);
+
+ return amdgpu_vcn_sysfs_reset_mask_init(adev);
+}
+
+/**
+ * vcn_v5_0_1_sw_fini - sw fini for VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * VCN suspend and free up sw allocation
+ */
+static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, r, idx;
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ fw_shared->sq.is_enabled = 0;
+ }
+
+ drm_dev_exit(idx);
+ }
+
+ r = amdgpu_vcn_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sw_fini(adev);
+
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
+ kfree(adev->vcn.ip_dump);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_1_hw_init - start and test VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, r, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 9 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_hw_fini - stop the hardware block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Stop the VCN block, mark ring as not ready any more
+ */
+static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_suspend - suspend VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * HW fini and suspend VCN block
+ */
+static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = vcn_v5_0_1_hw_fini(ip_block);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_suspend(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_1_resume - resume VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Resume firmware and hw init VCN block
+ */
+static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ r = vcn_v5_0_1_hw_init(ip_block);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_1_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Let the VCN memory controller know it's offsets
+ */
+static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
+{
+ uint32_t offset, size, vcn_inst;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ vcn_inst = GET_INST(VCN, inst);
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr));
+ offset = size;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
+
+ /* cache window 1: stack */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+
+ /* cache window 2: context */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+}
+
+/**
+ * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Let the VCN memory controller know it's offsets with dpg mode
+ */
+static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ uint32_t offset, size;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
+ inst_idx].tmr_mc_addr_lo), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
+ inst_idx].tmr_mc_addr_hi), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ }
+ offset = 0;
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ offset = size;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+ }
+
+ if (!indirect)
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ else
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+ /* cache window 1: stack */
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ }
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+ /* cache window 2: context */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+ /* non-cache window */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+}
+
+/**
+ * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Disable clock gating for VCN block
+ */
+static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+}
+
+/**
+ * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Enable clock gating for VCN block
+ */
+static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+}
+
+/**
+ * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Start VCN block with dpg mode
+ */
+static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_ring *ring;
+ int vcn_inst;
+ uint32_t tmp;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
+
+ if (indirect) {
+ adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
+ (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+ /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
+ WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF,
+ adev->vcn.inst[inst_idx].aid_id, 0, true);
+ }
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* disable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
+
+ vcn_v5_0_1_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* enable LMI MC and UMC channels */
+ tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
+
+ /* enable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+ if (indirect)
+ amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_start - VCN start
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Start VCN block
+ */
+static int vcn_v5_0_1_start(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_ring *ring;
+ uint32_t tmp;
+ int i, j, k, r, vcn_inst;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v5_0_1_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ continue;
+ }
+
+ vcn_inst = GET_INST(VCN, i);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_1_mc_resume(adev, i);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(100);
+ if (amdgpu_emu_mode == 1)
+ msleep(20);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
+ r = 0;
+ break;
+ }
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
+ }
+ }
+
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
+
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ *
+ * Stop VCN block with dpg mode
+ */
+static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t tmp;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+}
+
+/**
+ * vcn_v5_0_1_stop - VCN stop
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop VCN block
+ */
+static int vcn_v5_0_1_stop(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ uint32_t tmp;
+ int i, r = 0, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_1_stop_dpg_mode(adev, i);
+ continue;
+ }
+
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ return r;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* clear status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+ }
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified read pointer
+ */
+static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
+}
+
+/**
+ * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified write pointer
+ */
+static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell)
+ return *ring->wptr_cpu_addr;
+ else
+ return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR);
+}
+
+/**
+ * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell) {
+ *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ }
+}
+
+static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_ENC,
+ .align_mask = 0x3f,
+ .nop = VCN_ENC_CMD_NO_OP,
+ .get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
+ .get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
+ .set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
+ .emit_ib = vcn_v2_0_enc_ring_emit_ib,
+ .emit_fence = vcn_v2_0_enc_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
+ .test_ib = amdgpu_vcn_unified_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = vcn_v2_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+/**
+ * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set unified ring functions
+ */
+static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs;
+ adev->vcn.inst[i].ring_enc[0].me = i;
+ vcn_inst = GET_INST(VCN, i);
+ adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid;
+ }
+}
+
+/**
+ * vcn_v5_0_1_is_idle - check VCN block is idle
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Check whether VCN block is idle
+ */
+static bool vcn_v5_0_1_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 1;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE);
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_1_wait_for_idle - wait for VCN block idle
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Wait for VCN block idle
+ */
+static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE,
+ UVD_STATUS__IDLE);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ * @state: clock gating state
+ *
+ * Set VCN block clockgating state
+ */
+static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool enable = state == AMD_CG_STATE_GATE;
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (enable) {
+ if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
+ return -EBUSY;
+ vcn_v5_0_1_enable_clock_gating(adev, i);
+ } else {
+ vcn_v5_0_1_disable_clock_gating(adev, i);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_set_powergating_state - set VCN block powergating state
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret;
+
+ if (state == adev->vcn.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = vcn_v5_0_1_stop(adev);
+ else
+ ret = vcn_v5_0_1_start(adev);
+
+ if (!ret)
+ adev->vcn.cur_state = state;
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_1_process_interrupt - process VCN block interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @entry: interrupt entry from clients and sources
+ *
+ * Process VCN block interrupt
+ */
+static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t i, inst;
+
+ i = node_id_to_phys_map[entry->node_id];
+
+ DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
+ if (adev->vcn.inst[inst].aid_id == i)
+ break;
+ if (inst >= adev->vcn.num_vcn_inst) {
+ dev_WARN_ONCE(adev->dev, 1,
+ "Interrupt received for unknown VCN instance %d",
+ entry->node_id);
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
+ .process = vcn_v5_0_1_process_interrupt,
+};
+
+/**
+ * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set VCN block interrupt irq functions
+ */
+static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ adev->vcn.inst->irq.num_types++;
+ adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
+}
+
+static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
+ .name = "vcn_v5_0_1",
+ .early_init = vcn_v5_0_1_early_init,
+ .late_init = NULL,
+ .sw_init = vcn_v5_0_1_sw_init,
+ .sw_fini = vcn_v5_0_1_sw_fini,
+ .hw_init = vcn_v5_0_1_hw_init,
+ .hw_fini = vcn_v5_0_1_hw_fini,
+ .suspend = vcn_v5_0_1_suspend,
+ .resume = vcn_v5_0_1_resume,
+ .is_idle = vcn_v5_0_1_is_idle,
+ .wait_for_idle = vcn_v5_0_1_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
+ .set_powergating_state = vcn_v5_0_1_set_powergating_state,
+ .dump_ip_state = vcn_v5_0_0_dump_ip_state,
+ .print_ip_state = vcn_v5_0_0_print_ip_state,
+};
+
+const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCN,
+ .major = 5,
+ .minor = 0,
+ .rev = 1,
+ .funcs = &vcn_v5_0_1_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
new file mode 100644
index 000000000000..82ac709f44bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCN_v5_0_1_H__
+#define __VCN_v5_0_1_H__
+
+extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block;
+
+#endif /* __VCN_v5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 0fedadd0a6a4..98fc6941159e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -364,9 +364,8 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
* this should allow us to catchup.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn_ratelimited(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
@@ -605,10 +604,10 @@ static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int vega10_ih_set_clockgating_state(void *handle,
+static int vega10_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega10_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -616,7 +615,7 @@ static int vega10_ih_set_clockgating_state(void *handle,
}
-static int vega10_ih_set_powergating_state(void *handle,
+static int vega10_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 1c9aff742e43..e9e3b2ed4b7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -366,6 +366,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
/* Enable IH Retry CAM */
if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0) ||
amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 4) ||
amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 5))
WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN,
ENABLE, 1);
@@ -443,9 +444,8 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
* this should allow us to catchup.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn_ratelimited(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
@@ -697,10 +697,10 @@ static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int vega20_ih_set_clockgating_state(void *handle,
+static int vega20_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega20_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -708,7 +708,7 @@ static int vega20_ih_set_clockgating_state(void *handle,
}
-static int vega20_ih_set_powergating_state(void *handle,
+static int vega20_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index a83505815d39..06615f160331 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1945,10 +1945,10 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
return 0;
}
-static int vi_common_set_clockgating_state(void *handle,
+static int vi_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1988,7 +1988,7 @@ static int vi_common_set_clockgating_state(void *handle,
return 0;
}
-static int vi_common_set_powergating_state(void *handle,
+static int vi_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 02f7ba8c93cd..388b44ed5928 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -4122,3 +4122,494 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
+
+static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
+ 0xbf820001, 0xbf8202d8,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
+ 0x866eff78, 0x00002000,
+ 0xbf840008, 0xbf0d986d,
+ 0xbf85001f, 0x866eff7b,
+ 0x00000400, 0xbf850061,
+ 0xbf8e0010, 0xb8fbf803,
+ 0xbf82fffa, 0x866eff7b,
+ 0x03800900, 0xbf850015,
+ 0x866eff7b, 0x000071ff,
+ 0xbf840008, 0x866fff7b,
+ 0x00007080, 0xbf840001,
+ 0xbeee1a87, 0xb8eff801,
+ 0x8e6e8c6e, 0x866e6f6e,
+ 0xbf85000a, 0xbf0d986d,
+ 0xbf850003, 0x866eff6d,
+ 0x00ff0000, 0xbf850005,
+ 0xbf0d986d, 0xbf850004,
+ 0x866eff7b, 0x00000400,
+ 0xbf850046, 0xbeed1a9d,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8979ff79, 0xfc000000,
+ 0x87797a79, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
+ 0xb8fbf813, 0x8efa887a,
+ 0xbf0d8f7b, 0xbf840002,
+ 0x877bff7b, 0xffff0000,
+ 0xc0031cfd, 0x00000010,
+ 0xc0071bbd, 0x00000000,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x8e739773,
+ 0x8979ff79, 0x01800000,
+ 0x87797379, 0xbf0d986d,
+ 0xbf840009, 0xbf0d9879,
+ 0xbf850007, 0x896dff6d,
+ 0x01ff0000, 0xba7f0583,
+ 0x00000000, 0xbf0d9d6d,
+ 0xbeed189d, 0xbf840012,
+ 0xbef91898, 0xbeed189d,
+ 0x86ee6e6e, 0xbf840001,
+ 0xbe801d6e, 0x866eff6d,
+ 0x01ff0000, 0xbf850005,
+ 0x8778ff78, 0x00002000,
+ 0x80ec886c, 0x82ed806d,
+ 0xbf820005, 0x866eff6d,
+ 0x01000000, 0xbf850002,
+ 0x806c846c, 0x826d806d,
+ 0x866dff6d, 0x0000ffff,
+ 0x8f7a8b79, 0x867aff7a,
+ 0x001f8000, 0xb97af807,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e8378, 0xb96ee0c2,
+ 0xbf800002, 0xb9780002,
+ 0xbe801f6c, 0x866dff6d,
+ 0x0000ffff, 0xbefa0080,
+ 0xb97a0283, 0xb8faf807,
+ 0x867aff7a, 0x001f8000,
+ 0x8e7a8b7a, 0x8979ff79,
+ 0xfc000000, 0x87797a79,
+ 0xba7ff807, 0x00000000,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbf900004,
+ 0x877a8478, 0xb97af802,
+ 0xbf8e0002, 0xbf88fffe,
+ 0xb8fa2985, 0x807a817a,
+ 0x8e7a8a7a, 0x8e7a817a,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b867b, 0x807a7b7a,
+ 0x807a7e7a, 0x827b807f,
+ 0x867bff7b, 0x0000ffff,
+ 0xc04b1c3d, 0x00000050,
+ 0xbf8cc07f, 0xc04b1d3d,
+ 0x00000060, 0xbf8cc07f,
+ 0xc0431e7d, 0x00000074,
+ 0xbf8cc07f, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0xbef1007c,
+ 0xbef00080, 0xb8f02985,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611b3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611b7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611bba,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611bfa, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8fbf803, 0xbefe007c,
+ 0xbefc0070, 0xc0611efa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8f1f801, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf85004d, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb5306, 0x867bc17b,
+ 0xbf840052, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf84004e, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85001d,
+ 0x24040682, 0xd86c0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000100, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffe5,
+ 0xbf820016, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
+ 0xd0c9006a, 0x0000f702,
+ 0xbefe016a, 0xbf87fff6,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200f4,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf840025, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef5306,
+ 0x866fc16f, 0xbf840020,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0xe0510200, 0x781d0000,
+ 0xe0510300, 0x781d0000,
+ 0xe0510400, 0x781d0000,
+ 0x807cff7c, 0x00000500,
+ 0x8078ff78, 0x00000500,
+ 0xbf0a6f7c, 0xbf85fff0,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbef600ff, 0x01000000,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
+ 0x00000078, 0x80788478,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
+ 0x00000078, 0x80788478,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
+ 0x00000078, 0x80788478,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
+ 0x00000078, 0x80788478,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
+ 0x00000078, 0x80788478,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b79, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 44772eec9ef4..96fbb16ceb21 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -34,41 +34,24 @@
* cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
* sp3 gfx11.sp3 -hex gfx11.hex
*
- * gfx12:
- * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx10.asm -P -o gfx12.sp3
- * sp3 gfx12.sp3 -hex gfx12.hex
*/
#define CHIP_NAVI10 26
#define CHIP_SIENNA_CICHLID 30
#define CHIP_PLUM_BONITO 36
-#define CHIP_GFX12 37
#define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
-#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO && ASIC_FAMILY < CHIP_GFX12)
+#define SW_SA_TRAP (ASIC_FAMILY == CHIP_PLUM_BONITO)
#define SAVE_AFTER_XNACK_ERROR (HAVE_XNACK && !NO_SQC_STORE) // workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
-#if ASIC_FAMILY < CHIP_GFX12
#define S_COHERENCE glc:1
#define V_COHERENCE slc:1 glc:1
#define S_WAITCNT_0 s_waitcnt 0
-#else
-#define S_COHERENCE scope:SCOPE_SYS
-#define V_COHERENCE scope:SCOPE_SYS
-#define S_WAITCNT_0 s_wait_idle
-
-#define HW_REG_SHADER_FLAT_SCRATCH_LO HW_REG_WAVE_SCRATCH_BASE_LO
-#define HW_REG_SHADER_FLAT_SCRATCH_HI HW_REG_WAVE_SCRATCH_BASE_HI
-#define HW_REG_GPR_ALLOC HW_REG_WAVE_GPR_ALLOC
-#define HW_REG_LDS_ALLOC HW_REG_WAVE_LDS_ALLOC
-#define HW_REG_MODE HW_REG_WAVE_MODE
-#endif
-#if ASIC_FAMILY < CHIP_GFX12
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
@@ -81,21 +64,6 @@ var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_E
var S_STATUS_HALT_MASK = SQ_WAVE_STATUS_HALT_MASK
var S_SAVE_PC_HI_TRAP_ID_MASK = 0x00FF0000
var S_SAVE_PC_HI_HT_MASK = 0x01000000
-#else
-var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
-var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
-var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00
-var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000
-var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000
-var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15
-var SQ_WAVE_STATUS_WAVE64_SHIFT = 29
-var SQ_WAVE_STATUS_WAVE64_SIZE = 1
-var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9
-var S_STATUS_HWREG = HW_REG_WAVE_STATE_PRIV
-var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
-var S_STATUS_HALT_MASK = SQ_WAVE_STATE_PRIV_HALT_MASK
-var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000
-#endif
var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
@@ -110,7 +78,6 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
#endif
-#if ASIC_FAMILY < CHIP_GFX12
var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
var SQ_WAVE_TRAPSTS_EXCP_MASK = 0x1FF
var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
@@ -161,39 +128,6 @@ var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
var S_TRAPSTS_HWREG = HW_REG_TRAPSTS
var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_TRAPSTS_SAVECTX_MASK
var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_TRAPSTS_SAVECTX_SHIFT
-#else
-var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF
-var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10
-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5
-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20
-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40
-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6
-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80
-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200
-var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800
-var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80
-var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200
-
-var S_TRAPSTS_HWREG = HW_REG_WAVE_EXCP_FLAG_PRIV
-var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
-var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
-var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
-var S_TRAPSTS_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
-var S_TRAPSTS_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
-var S_TRAPSTS_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
-var S_TRAPSTS_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
-var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
-var BARRIER_STATE_SIGNAL_OFFSET = 16
-var BARRIER_STATE_VALID_OFFSET = 0
-#endif
// bits [31:24] unused by SPI debug data
var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
@@ -305,11 +239,7 @@ L_TRAP_NO_BARRIER:
L_HALTED:
// Host trap may occur while wave is halted.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
-#else
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
-#endif
s_cbranch_scc1 L_FETCH_2ND_TRAP
L_CHECK_SAVE:
@@ -336,7 +266,6 @@ L_NOT_HALTED:
// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
// Maskable exceptions only cause the wave to enter the trap handler if
// their respective bit in mode.excp_en is set.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
s_cbranch_scc0 L_CHECK_TRAP_ID
@@ -349,17 +278,6 @@ L_NOT_ADDR_WATCH:
s_lshl_b32 ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
s_and_b32 ttmp2, ttmp2, ttmp3
s_cbranch_scc1 L_FETCH_2ND_TRAP
-#else
- s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- s_and_b32 ttmp3, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
- s_cbranch_scc0 L_NOT_ADDR_WATCH
- s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
-
-L_NOT_ADDR_WATCH:
- s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
- s_and_b32 ttmp2, ttmp3, ttmp2
- s_cbranch_scc1 L_FETCH_2ND_TRAP
-#endif
L_CHECK_TRAP_ID:
// Check trap_id != 0
@@ -369,13 +287,8 @@ L_CHECK_TRAP_ID:
#if SINGLE_STEP_MISSED_WORKAROUND
// Prioritize single step exception over context save.
// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
-#if ASIC_FAMILY < CHIP_GFX12
s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
-#else
- // WAVE_TRAP_CTRL is already in ttmp3.
- s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
-#endif
s_cbranch_scc1 L_FETCH_2ND_TRAP
#endif
@@ -425,12 +338,7 @@ L_NO_NEXT_TRAP:
s_cbranch_scc1 L_TRAP_CASE
// Host trap will not cause trap re-entry.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
-#else
- s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
-#endif
s_cbranch_scc1 L_EXIT_TRAP
s_or_b32 s_save_status, s_save_status, S_STATUS_HALT_MASK
@@ -457,16 +365,7 @@ L_EXIT_TRAP:
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
-#if ASIC_FAMILY < CHIP_GFX12
s_setreg_b32 hwreg(S_STATUS_HWREG), s_save_status
-#else
- // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
- // Only restore fields which the trap handler changes.
- s_lshr_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_SCC_SHIFT
- s_setreg_b32 hwreg(S_STATUS_HWREG, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
- SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_status
-#endif
-
s_rfe_b64 [ttmp0, ttmp1]
L_SAVE:
@@ -478,14 +377,6 @@ L_SAVE:
s_endpgm
L_HAVE_VGPRS:
#endif
-#if ASIC_FAMILY >= CHIP_GFX12
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
- s_cbranch_scc0 L_HAVE_VGPRS
- s_endpgm
-L_HAVE_VGPRS:
-#endif
-
s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
s_mov_b32 s_save_tmp, 0
s_setreg_b32 hwreg(S_TRAPSTS_HWREG, S_TRAPSTS_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit
@@ -671,19 +562,6 @@ L_SAVE_HWREG:
s_mov_b32 m0, 0x0 //Next lane of v2 to write to
#endif
-#if ASIC_FAMILY >= CHIP_GFX12
- // Ensure no further changes to barrier or LDS state.
- // STATE_PRIV.BARRIER_COMPLETE may change up to this point.
- s_barrier_signal -2
- s_barrier_wait -2
-
- // Re-read final state of BARRIER_COMPLETE field for save.
- s_getreg_b32 s_save_tmp, hwreg(S_STATUS_HWREG)
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
- s_or_b32 s_save_status, s_save_status, s_save_tmp
-#endif
-
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
@@ -707,21 +585,6 @@ L_SAVE_HWREG:
s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-#if ASIC_FAMILY >= CHIP_GFX12
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
- write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_get_barrier_state s_save_tmp, -1
- s_wait_kmcnt (0)
- write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
-#endif
-
#if NO_SQC_STORE
// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
s_mov_b32 exec_lo, 0xFFFF
@@ -814,9 +677,7 @@ L_SAVE_LDS_NORMAL:
s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
-#if ASIC_FAMILY < CHIP_GFX12
s_barrier //LDS is used? wait for other waves in the same TG
-#endif
s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
s_cbranch_scc0 L_SAVE_LDS_DONE
@@ -1081,11 +942,6 @@ L_RESTORE:
s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
-#if ASIC_FAMILY >= CHIP_GFX12
- // Save s_restore_spi_init_hi for later use.
- s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
-#endif
-
//determine it is wave32 or wave64
get_wave_size2(s_restore_size)
@@ -1320,9 +1176,7 @@ L_RESTORE_SGPR:
// s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception.
// Clear DEBUG_EN before and restore MODE after the barrier.
s_setreg_imm32_b32 hwreg(HW_REG_MODE), 0
-#if ASIC_FAMILY < CHIP_GFX12
s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
-#endif
/* restore HW registers */
L_RESTORE_HWREG:
@@ -1334,11 +1188,6 @@ L_RESTORE_HWREG:
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
-#if ASIC_FAMILY >= CHIP_GFX12
- // Restore s_restore_spi_init_hi before the saved value gets clobbered.
- s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
-#endif
-
read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
@@ -1358,44 +1207,6 @@ L_RESTORE_HWREG:
s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch
-#if ASIC_FAMILY >= CHIP_GFX12
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
- s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
-
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
- s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
-
- // Only the first wave needs to restore the workgroup barrier.
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
-
- // Skip over WAVE_STATUS, since there is no state to restore from it
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4
-
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
-
- s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
-
- // extract the saved signal count from s_restore_tmp
- s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
-
- // We need to call s_barrier_signal repeatedly to restore the signal
- // count of the work group barrier. The member count is already
- // initialized with the number of waves in the work group.
-L_BARRIER_RESTORE_LOOP:
- s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
- s_barrier_signal -1
- s_add_i32 s_restore_tmp, s_restore_tmp, -1
- s_branch L_BARRIER_RESTORE_LOOP
-
-L_SKIP_BARRIER_RESTORE:
-#endif
-
s_mov_b32 m0, s_restore_m0
s_mov_b32 exec_lo, s_restore_exec_lo
s_mov_b32 exec_hi, s_restore_exec_hi
@@ -1453,13 +1264,6 @@ L_RETURN_WITHOUT_PRIV:
s_setreg_b32 hwreg(S_STATUS_HWREG), s_restore_status // SCC is included, which is changed by previous salu
-#if ASIC_FAMILY >= CHIP_GFX12
- // Make barrier and LDS state visible to all waves in the group.
- // STATE_PRIV.BARRIER_COMPLETE may change after this point.
- s_barrier_signal -2
- s_barrier_wait -2
-#endif
-
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
@@ -1598,11 +1402,7 @@ function get_hwreg_size_bytes
end
function get_wave_size2(s_reg)
-#if ASIC_FAMILY < CHIP_GFX12
s_getreg_b32 s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
-#else
- s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
-#endif
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
new file mode 100644
index 000000000000..1740e98c6719
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
@@ -0,0 +1,1126 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* To compile this assembly code:
+ *
+ * gfx12:
+ * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx12.asm -P -o gfx12.sp3
+ * sp3 gfx12.sp3 -hex gfx12.hex
+ */
+
+#define CHIP_GFX12 37
+
+#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
+
+var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
+var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
+var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00
+var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000
+var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000
+var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15
+var SQ_WAVE_STATUS_WAVE64_SHIFT = 29
+var SQ_WAVE_STATUS_WAVE64_SIZE = 1
+var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24
+var SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
+var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000
+
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 8
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
+var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9
+
+var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF
+var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10
+var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5
+var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20
+var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40
+var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6
+var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80
+var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200
+var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800
+var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80
+var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200
+
+var SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK= SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT
+var BARRIER_STATE_SIGNAL_OFFSET = 16
+var BARRIER_STATE_VALID_OFFSET = 0
+
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
+
+// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
+// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
+var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000
+var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC
+var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_SAVE_PC_HI_FIRST_WAVE_MASK = 0x80000000
+var S_SAVE_PC_HI_FIRST_WAVE_SHIFT = 31
+
+var s_sgpr_save_num = 108
+
+var s_save_spi_init_lo = exec_lo
+var s_save_spi_init_hi = exec_hi
+var s_save_pc_lo = ttmp0
+var s_save_pc_hi = ttmp1
+var s_save_exec_lo = ttmp2
+var s_save_exec_hi = ttmp3
+var s_save_state_priv = ttmp12
+var s_save_excp_flag_priv = ttmp15
+var s_save_xnack_mask = s_save_excp_flag_priv
+var s_wave_size = ttmp7
+var s_save_buf_rsrc0 = ttmp8
+var s_save_buf_rsrc1 = ttmp9
+var s_save_buf_rsrc2 = ttmp10
+var s_save_buf_rsrc3 = ttmp11
+var s_save_mem_offset = ttmp4
+var s_save_alloc_size = s_save_excp_flag_priv
+var s_save_tmp = ttmp14
+var s_save_m0 = ttmp5
+var s_save_ttmps_lo = s_save_tmp
+var s_save_ttmps_hi = s_save_excp_flag_priv
+
+var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
+
+var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+var S_WAVE_SIZE = 25
+
+var s_restore_spi_init_lo = exec_lo
+var s_restore_spi_init_hi = exec_hi
+var s_restore_mem_offset = ttmp12
+var s_restore_alloc_size = ttmp3
+var s_restore_tmp = ttmp2
+var s_restore_mem_offset_save = s_restore_tmp
+var s_restore_m0 = s_restore_alloc_size
+var s_restore_mode = ttmp7
+var s_restore_flat_scratch = s_restore_tmp
+var s_restore_pc_lo = ttmp0
+var s_restore_pc_hi = ttmp1
+var s_restore_exec_lo = ttmp4
+var s_restore_exec_hi = ttmp5
+var s_restore_state_priv = ttmp14
+var s_restore_excp_flag_priv = ttmp15
+var s_restore_xnack_mask = ttmp13
+var s_restore_buf_rsrc0 = ttmp8
+var s_restore_buf_rsrc1 = ttmp9
+var s_restore_buf_rsrc2 = ttmp10
+var s_restore_buf_rsrc3 = ttmp11
+var s_restore_size = ttmp6
+var s_restore_ttmps_lo = s_restore_tmp
+var s_restore_ttmps_hi = s_restore_alloc_size
+var s_restore_spi_init_hi_save = s_restore_exec_hi
+
+shader main
+ asic(DEFAULT)
+ type(CS)
+ wave_size(32)
+
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
+
+L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE
+
+L_SKIP_RESTORE:
+ s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC
+
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK
+
+ s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+
+ s_and_b32 ttmp2, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
+ s_cbranch_scc0 L_NOT_HALTED
+
+L_HALTED:
+ // Host trap may occur while wave is halted.
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_SAVE:
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+ s_cbranch_scc1 L_SAVE
+
+ // Wave is halted but neither host trap nor SAVECTX is raised.
+ // Caused by instruction fetch memory violation.
+ // Spin wait until context saved to prevent interrupt storm.
+ s_sleep 0x10
+ s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ s_branch L_CHECK_SAVE
+
+L_NOT_HALTED:
+ // Let second-level handle non-SAVECTX exception or trap.
+ // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+ // Check non-maskable exceptions. memory_violation, illegal_instruction
+ // and xnack_error exceptions always cause the wave to enter the trap
+ // handler.
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+ // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ // Maskable exceptions only cause the wave to enter the trap handler if
+ // their respective bit in mode.excp_en is set.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ s_and_b32 ttmp3, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
+ s_cbranch_scc0 L_NOT_ADDR_WATCH
+ s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
+
+L_NOT_ADDR_WATCH:
+ s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ s_and_b32 ttmp2, ttmp3, ttmp2
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+ // Check trap_id != 0
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+#if SINGLE_STEP_MISSED_WORKAROUND
+ // Prioritize single step exception over context save.
+ // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+ // WAVE_TRAP_CTRL is already in ttmp3.
+ s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+#endif
+
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+ s_cbranch_scc1 L_SAVE
+
+L_FETCH_2ND_TRAP:
+ // Read second-level TBA/TMA from first-level TMA and jump if available.
+ // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
+ // ttmp12 holds SQ_WAVE_STATUS
+ s_sendmsg_rtn_b64 [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
+ s_wait_idle
+ s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+ s_bitcmp1_b32 ttmp15, 0xF
+ s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA
+ s_or_b32 ttmp15, ttmp15, 0xFFFF0000
+L_NO_SIGN_EXTEND_TMA:
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag
+ s_wait_idle
+ s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+ s_or_b32 ttmp11, ttmp11, ttmp2
+
+ s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 scope:SCOPE_SYS // second-level TBA
+ s_wait_idle
+ s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 scope:SCOPE_SYS // second-level TMA
+ s_wait_idle
+
+ s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
+ s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
+ s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
+
+L_NO_NEXT_TRAP:
+ // If not caused by trap then halt wave to prevent re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_TRAP_CASE
+
+ // Host trap will not cause trap re-entry.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+ s_cbranch_scc1 L_EXIT_TRAP
+ s_or_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
+
+ // If the PC points to S_ENDPGM then context save will fail if STATE_PRIV.HALT is set.
+ // Rewind the PC to prevent this from occurring.
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+ s_branch L_EXIT_TRAP
+
+L_TRAP_CASE:
+ // Advance past trap instruction to prevent re-entry.
+ s_add_u32 ttmp0, ttmp0, 0x4
+ s_addc_u32 ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+
+ // Restore SQ_WAVE_STATUS.
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+ // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
+ // Only restore fields which the trap handler changes.
+ s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
+ SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv
+
+ s_rfe_b64 [ttmp0, ttmp1]
+
+L_SAVE:
+ // If VGPRs have been deallocated then terminate the wavefront.
+ // It has no remaining program to run and cannot save without VGPRs.
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+ s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
+ s_cbranch_scc0 L_HAVE_VGPRS
+ s_endpgm
+L_HAVE_VGPRS:
+
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_mov_b32 s_save_tmp, 0
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+ /* inform SPI the readiness and wait for SPI's go signal */
+ s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+ s_sendmsg_rtn_b64 [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
+ s_wait_idle
+
+ // Save first_wave flag so we can clear high bits of save address.
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_lshl_b32 s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+
+ // Trap temporaries must be saved via VGPR but all VGPRs are in use.
+ // There is no ttmp space to hold the resource constant for VGPR save.
+ // Save v0 by itself since it requires only two SGPRs.
+ s_mov_b32 s_save_ttmps_lo, exec_lo
+ s_and_b32 s_save_ttmps_hi, exec_hi, 0xFFFF
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] scope:SCOPE_SYS
+ v_mov_b32 v0, 0x0
+ s_mov_b32 exec_lo, s_save_ttmps_lo
+ s_mov_b32 exec_hi, s_save_ttmps_hi
+
+ // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
+ get_wave_size2(s_save_ttmps_hi)
+ get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
+ get_svgpr_size_bytes(s_save_ttmps_hi)
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
+ s_and_b32 s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
+ s_addc_u32 s_save_ttmps_hi, s_save_ttmps_hi, 0x0
+
+ v_writelane_b32 v0, ttmp4, 0x4
+ v_writelane_b32 v0, ttmp5, 0x5
+ v_writelane_b32 v0, ttmp6, 0x6
+ v_writelane_b32 v0, ttmp7, 0x7
+ v_writelane_b32 v0, ttmp8, 0x8
+ v_writelane_b32 v0, ttmp9, 0x9
+ v_writelane_b32 v0, ttmp10, 0xA
+ v_writelane_b32 v0, ttmp11, 0xB
+ v_writelane_b32 v0, ttmp13, 0xD
+ v_writelane_b32 v0, exec_lo, 0xE
+ v_writelane_b32 v0, exec_hi, 0xF
+
+ s_mov_b32 exec_lo, 0x3FFF
+ s_mov_b32 exec_hi, 0x0
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] offset:0x40 scope:SCOPE_SYS
+ v_readlane_b32 ttmp14, v0, 0xE
+ v_readlane_b32 ttmp15, v0, 0xF
+ s_mov_b32 exec_lo, ttmp14
+ s_mov_b32 exec_hi, ttmp15
+
+ /* setup Resource Contants */
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
+ s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
+
+ s_mov_b32 s_save_m0, m0
+
+ /* global mem offset */
+ s_mov_b32 s_save_mem_offset, 0x0
+ get_wave_size2(s_wave_size)
+
+ /* save first 4 VGPRs, needed for SGPR save */
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_4VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_4VGPR_WAVE32
+L_ENABLE_SAVE_4VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ s_branch L_SAVE_4VGPR_WAVE64
+L_SAVE_4VGPR_WAVE32:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
+ s_branch L_SAVE_HWREG
+
+L_SAVE_4VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
+
+ /* save HW registers */
+
+L_SAVE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
+ v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
+ v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
+ s_mov_b32 m0, 0x0 //Next lane of v2 to write to
+
+ // Ensure no further changes to barrier or LDS state.
+ // STATE_PRIV.BARRIER_COMPLETE may change up to this point.
+ s_barrier_signal -2
+ s_barrier_wait -2
+
+ // Re-read final state of BARRIER_COMPLETE field for save.
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATE_PRIV)
+ s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+ s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp
+
+ write_hwreg_to_v2(s_save_m0)
+ write_hwreg_to_v2(s_save_pc_lo)
+ s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ write_hwreg_to_v2(s_save_tmp)
+ write_hwreg_to_v2(s_save_exec_lo)
+ write_hwreg_to_v2(s_save_exec_hi)
+ write_hwreg_to_v2(s_save_state_priv)
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ write_hwreg_to_v2(s_save_tmp)
+
+ write_hwreg_to_v2(s_save_xnack_mask)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_MODE)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+ write_hwreg_to_v2(s_save_tmp)
+
+ s_get_barrier_state s_save_tmp, -1
+ s_wait_kmcnt (0)
+ write_hwreg_to_v2(s_save_tmp)
+
+ // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+ s_mov_b32 exec_lo, 0xFFFF
+ s_mov_b32 exec_hi, 0x0
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ // Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+
+ /* save SGPRs */
+ // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
+
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 ttmp13, 0x0 //next VGPR lane to copy SGPR into
+
+ s_mov_b32 m0, 0x0 //SGPR initial index value =0
+ s_nop 0x0 //Manually inserted wait states
+L_SAVE_SGPR_LOOP:
+ // SGPR is allocated in 16 SGPR granularity
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
+ s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
+
+ write_16sgpr_to_v2(s0)
+
+ s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled?
+ s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE
+
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80
+ s_mov_b32 ttmp13, 0x0
+ v_mov_b32 v2, 0x0
+L_SAVE_SGPR_SKIP_TCP_STORE:
+
+ s_add_u32 m0, m0, 16 //next sgpr index
+ s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SGPR_LOOP //first 96 SGPR save is complete?
+
+ //save the rest 12 SGPR
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ write_12sgpr_to_v2(s0)
+
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ /* save LDS */
+
+L_SAVE_LDS:
+ // Change EXEC to all threads...
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_LDS_NORMAL
+L_ENABLE_SAVE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_LDS_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
+
+ s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SAVE_LDS_DONE
+
+ // first wave do LDS save;
+
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
+ s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ //load 0~63*4(byte address) to vgpr v0
+ v_mbcnt_lo_u32_b32 v0, -1, 0
+ v_mbcnt_hi_u32_b32 v0, -1, v0
+ v_mul_u32_u24 v0, 4, v0
+
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_SAVE_LDS_W64
+
+L_SAVE_LDS_W32:
+ s_mov_b32 s3, 128
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W32:
+ ds_read_b32 v1, v0
+ s_wait_idle
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 128 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete?
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_W64:
+ s_mov_b32 s3, 256
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W64:
+ ds_read_b32 v1, v0
+ s_wait_idle
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 256 //mem offset increased by 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete?
+
+L_SAVE_LDS_DONE:
+ /* save VGPRs - set the Rest VGPRs */
+L_SAVE_VGPR:
+ // VGPR SR memory offset: 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI
+ s_mov_b32 s_save_mem_offset, (0+128*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_VGPR_NORMAL
+L_ENABLE_SAVE_VGPR_EXEC_HI:
+ s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_VGPR_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_SAVE_VGPR_WAVE64
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+L_SAVE_VGPR_W32_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128*4 //every buffer_store_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W32_LOOP //VGPR save is complete?
+
+ s_branch L_SAVE_VGPR_END
+
+L_SAVE_VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_SHARED_VGPR
+
+L_SAVE_VGPR_W64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete?
+
+L_SAVE_SHARED_VGPR:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_SAVE_VGPR_END //no shared_vgpr used? jump to L_SAVE_LDS
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //save shared_vgpr will start from the index of m0
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+
+L_SAVE_SHARED_VGPR_WAVE64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete?
+
+L_SAVE_VGPR_END:
+ s_branch L_END_PGM
+
+L_RESTORE:
+ /* Setup Resource Contants */
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
+ s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+
+ // Save s_restore_spi_init_hi for later use.
+ s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
+
+ //determine it is wave32 or wave64
+ get_wave_size2(s_restore_size)
+
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_RESTORE_VGPR
+
+ /* restore LDS */
+L_RESTORE_LDS:
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_LDS_NORMAL
+L_ENABLE_RESTORE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_LDS_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
+ s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
+
+L_RESTORE_LDS_LOOP_W32:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_wait_idle
+ ds_store_addtid_b32 v0
+ s_add_u32 m0, m0, 128 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete?
+ s_branch L_RESTORE_VGPR
+
+L_RESTORE_LDS_LOOP_W64:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_wait_idle
+ ds_store_addtid_b32 v0
+ s_add_u32 m0, m0, 256 // 256 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete?
+
+ /* restore VGPRs */
+L_RESTORE_VGPR:
+ // VGPR SR memory offset : 0
+ s_mov_b32 s_restore_mem_offset, 0x0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_VGPR_NORMAL
+L_ENABLE_RESTORE_VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_VGPR_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE32_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*3
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 //every buffer_load_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete?
+
+ /* VGPR restore on v0 */
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*3
+ s_wait_idle
+
+ s_branch L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE64:
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SHARED_VGPR
+
+L_RESTORE_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*3
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+L_RESTORE_SHARED_VGPR:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used?
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //restore shared_vgpr will start from the index of m0
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+ s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!!
+
+ /* VGPR restore on v0 */
+L_RESTORE_V0:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*3
+ s_wait_idle
+
+ /* restore SGPRs */
+ //will be 2+8+16*6
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+L_RESTORE_SGPR:
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 20*4 //s108~s127 is not saved
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 m0, s_sgpr_save_num
+
+ read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 4 // Restore from S[0] to S[104]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+
+ read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 8 // Restore from S[0] to S[96]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+
+ L_RESTORE_SGPR_LOOP:
+ read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+ s_movreld_b64 s8, s8
+ s_movreld_b64 s10, s10
+ s_movreld_b64 s12, s12
+ s_movreld_b64 s14, s14
+
+ s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0
+ s_cbranch_scc0 L_RESTORE_SGPR_LOOP
+
+ // s_barrier with STATE_PRIV.TRAP_AFTER_INST=1, STATUS.PRIV=1 incorrectly asserts debug exception.
+ // Clear DEBUG_EN before and restore MODE after the barrier.
+ s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE), 0
+
+ /* restore HW registers */
+L_RESTORE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // Restore s_restore_spi_init_hi before the saved value gets clobbered.
+ s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_state_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_excp_flag_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_LO), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_HI), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+ s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
+
+ // Only the first wave needs to restore the workgroup barrier.
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+
+ // Skip over WAVE_STATUS, since there is no state to restore from it
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+
+ // extract the saved signal count from s_restore_tmp
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
+
+ // We need to call s_barrier_signal repeatedly to restore the signal
+ // count of the work group barrier. The member count is already
+ // initialized with the number of waves in the work group.
+L_BARRIER_RESTORE_LOOP:
+ s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+ s_barrier_signal -1
+ s_add_i32 s_restore_tmp, s_restore_tmp, -1
+ s_branch L_BARRIER_RESTORE_LOOP
+
+L_SKIP_BARRIER_RESTORE:
+
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+
+ // EXCP_FLAG_PRIV.SAVE_CONTEXT and HOST_TRAP may have changed.
+ // Only restore the other fields to avoid clobbering them.
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, 0, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE), s_restore_excp_flag_priv
+ s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE), s_restore_excp_flag_priv
+ s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE), s_restore_excp_flag_priv
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_MODE), s_restore_mode
+
+ // Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
+ get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
+ get_svgpr_size_bytes(s_restore_ttmps_hi)
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
+ s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
+ s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
+ s_load_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 scope:SCOPE_SYS
+ s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 scope:SCOPE_SYS
+ s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 scope:SCOPE_SYS
+ s_wait_idle
+
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu
+
+ // Make barrier and LDS state visible to all waves in the group.
+ // STATE_PRIV.BARRIER_COMPLETE may change after this point.
+ s_barrier_signal -2
+ s_barrier_wait -2
+
+ s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+
+L_END_PGM:
+ s_endpgm_saved
+end
+
+function write_hwreg_to_v2(s)
+ // Copy into VGPR for later TCP store.
+ v_writelane_b32 v2, s, m0
+ s_add_u32 m0, m0, 0x1
+end
+
+
+function write_16sgpr_to_v2(s)
+ // Copy into VGPR for later TCP store.
+ for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
+ v_writelane_b32 v2, s[sgpr_idx], ttmp13
+ s_add_u32 ttmp13, ttmp13, 0x1
+ end
+end
+
+function write_12sgpr_to_v2(s)
+ // Copy into VGPR for later TCP store.
+ for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
+ v_writelane_b32 v2, s[sgpr_idx], ttmp13
+ s_add_u32 ttmp13, ttmp13, 0x1
+ end
+end
+
+function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dword s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+end
+
+function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*16
+ s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*8
+ s_buffer_load_dwordx8 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*4
+ s_buffer_load_dwordx4 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function get_vgpr_size_bytes(s_vgpr_size_byte, s_size)
+ s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
+ s_bitcmp1_b32 s_size, S_WAVE_SIZE
+ s_cbranch_scc1 L_ENABLE_SHIFT_W64
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+7) //Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4 (non-zero value)
+ s_branch L_SHIFT_DONE
+L_ENABLE_SHIFT_W64:
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value)
+L_SHIFT_DONE:
+end
+
+function get_svgpr_size_bytes(s_svgpr_size_byte)
+ s_getreg_b32 s_svgpr_size_byte, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_lshl_b32 s_svgpr_size_byte, s_svgpr_size_byte, (3+7)
+end
+
+function get_sgpr_size_bytes
+ return 512
+end
+
+function get_hwreg_size_bytes
+ return 128
+end
+
+function get_wave_size2(s_reg)
+ s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
+ s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index bb26338204f4..0eabb7a8cab9 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -37,17 +37,28 @@
* gc_9_4_3:
* cpp -DASIC_FAMILY=GC_9_4_3 cwsr_trap_handler_gfx9.asm -P -o gc_9_4_3.sp3
* sp3 gc_9_4_3.sp3 -hex gc_9_4_3.hex
+ *
+ * gc_9_5_0:
+ * cpp -DASIC_FAMILY=GC_9_5_0 cwsr_trap_handler_gfx9.asm -P -o gc_9_5_0.sp3
+ * sp3 gc_9_5_0.sp3 -hex gc_9_5_0.hex
*/
#define CHIP_VEGAM 18
#define CHIP_ARCTURUS 23
#define CHIP_ALDEBARAN 25
#define CHIP_GC_9_4_3 26
+#define CHIP_GC_9_5_0 27
var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
var SINGLE_STEP_MISSED_WORKAROUND = (ASIC_FAMILY <= CHIP_ALDEBARAN) //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
+#if ASIC_FAMILY < CHIP_GC_9_4_3
+#define VMEM_MODIFIERS slc:1 glc:1
+#else
+#define VMEM_MODIFIERS sc0:1 nt:1
+#endif
+
/**************************************************************************/
/* variables */
/**************************************************************************/
@@ -62,7 +73,13 @@ var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK = 0x400000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+#if ASIC_FAMILY >= CHIP_GC_9_5_0
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 11
+var LDS_RESTORE_GRANULARITY_BYTES = 1280
+#else
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var LDS_RESTORE_GRANULARITY_BYTES = 512
+#endif
var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 3 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits
var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
@@ -557,12 +574,21 @@ if SAVE_AFTER_XNACK_ERROR
v_lshlrev_b32 v2, 2, v3
L_SAVE_LDS_LOOP_SQC:
+#if ASIC_FAMILY < CHIP_GC_9_5_0
ds_read2_b32 v[0:1], v2 offset0:0 offset1:0x40
s_waitcnt lgkmcnt(0)
-
write_vgprs_to_mem_with_sqc(v0, 2, s_save_buf_rsrc0, s_save_mem_offset)
v_add_u32 v2, 0x200, v2
+#else
+ // gfx950 needs to save in multiple of 256 bytes.
+ ds_read_b32 v0, v2
+ s_waitcnt lgkmcnt(0)
+ write_vgprs_to_mem_with_sqc(v0, 1, s_save_buf_rsrc0, s_save_mem_offset)
+
+ v_add_u32 v2, 0x100, v2
+#endif
+
v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
s_cbranch_vccnz L_SAVE_LDS_LOOP_SQC
@@ -581,11 +607,14 @@ end
L_SAVE_LDS_LOOP_VECTOR:
ds_read_b64 v[0:1], v2 //x =LDS[a], byte address
s_waitcnt lgkmcnt(0)
- buffer_store_dwordx2 v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset offen:1 glc:1 slc:1
+ buffer_store_dwordx2 v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset VMEM_MODIFIERS offen:1
// s_waitcnt vmcnt(0)
// v_add_u32 v2, vcc[0:1], v2, v3
v_add_u32 v2, v2, v3
v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
+#if ASIC_FAMILY >= CHIP_GC_9_5_0
+ s_mov_b64 exec, vcc
+#endif
s_cbranch_vccnz L_SAVE_LDS_LOOP_VECTOR
// restore rsrc3
@@ -748,8 +777,13 @@ L_RESTORE:
L_RESTORE_LDS_LOOP:
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
- s_add_u32 m0, m0, 256*2 // 128 DW
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
+#if ASIC_FAMILY >= CHIP_GC_9_5_0
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:512 // third 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:768 // forth 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:1024 // fifth 64DW
+#endif
+ s_add_u32 m0, m0, LDS_RESTORE_GRANULARITY_BYTES // 128/320 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, LDS_RESTORE_GRANULARITY_BYTES //mem offset increased by 128/320 DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
s_cbranch_scc1 L_RESTORE_LDS_LOOP //LDS restore is complete?
@@ -979,17 +1013,17 @@ L_TCP_STORE_CHECK_DONE:
end
function write_4vgprs_to_mem(s_rsrc, s_mem_offset)
- buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- buffer_store_dword v1, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256
- buffer_store_dword v2, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*2
- buffer_store_dword v3, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*3
+ buffer_store_dword v0, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS
+ buffer_store_dword v1, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256
+ buffer_store_dword v2, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*2
+ buffer_store_dword v3, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*3
end
function read_4vgprs_from_mem(s_rsrc, s_mem_offset)
- buffer_load_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- buffer_load_dword v1, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256
- buffer_load_dword v2, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*2
- buffer_load_dword v3, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*3
+ buffer_load_dword v0, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS
+ buffer_load_dword v1, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256
+ buffer_load_dword v2, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*2
+ buffer_load_dword v3, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*3
s_waitcnt vmcnt(0)
end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index e5324c5bc6c7..693469c18c60 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1639,6 +1639,7 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd,
*pcache_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
index 924d0fd85dfb..27aa1a5b120f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
@@ -79,6 +79,7 @@ static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
return (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0) ||
KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0));
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 9b51dd75fefc..a29374c86405 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -85,6 +85,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(4, 4, 0):/* ALDEBARAN */
case IP_VERSION(4, 4, 2):
case IP_VERSION(4, 4, 5):
+ case IP_VERSION(4, 4, 4):
case IP_VERSION(5, 0, 0):/* NAVI10 */
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
case IP_VERSION(5, 0, 2):/* NAVI14 */
@@ -152,6 +153,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
break;
case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
case IP_VERSION(9, 4, 4): /* GC 9.4.4 */
+ case IP_VERSION(9, 5, 0): /* GC 9.5.0 */
kfd->device_info.event_interrupt_class =
&event_interrupt_class_v9_4_3;
break;
@@ -356,6 +358,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 90402;
f2g = &gc_9_4_3_kfd2kgd;
break;
+ case IP_VERSION(9, 5, 0):
+ gfx_target_version = 90500;
+ f2g = &gc_9_4_3_kfd2kgd;
+ break;
/* Navi10 */
case IP_VERSION(10, 1, 10):
gfx_target_version = 100100;
@@ -515,6 +521,10 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
> KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
+ } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 5, 0)) {
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_5_0_hex) > PAGE_SIZE);
+ kfd->cwsr_isa = cwsr_trap_gfx9_5_0_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_5_0_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex)
> KFD_CWSR_TMA_OFFSET);
@@ -567,6 +577,7 @@ static int kfd_gws_init(struct kfd_node *node)
&& kfd->mec2_fw_version >= 0x28) ||
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(node) == IP_VERSION(9, 4, 4)) ||
+ (KFD_GC_VERSION(node) == IP_VERSION(9, 5, 0)) ||
(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
&& kfd->mec2_fw_version >= 0x6b) ||
@@ -638,6 +649,14 @@ static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
struct kfd_node *knode;
unsigned int i;
+ /*
+ * flush_work ensures that there are no outstanding
+ * work-queue items that will access interrupt_ring. New work items
+ * can't be created because we stopped interrupt handling above.
+ */
+ flush_workqueue(kfd->ih_wq);
+ destroy_workqueue(kfd->ih_wq);
+
for (i = 0; i < num_nodes; i++) {
knode = kfd->nodes[i];
device_queue_manager_uninit(knode->dqm);
@@ -733,14 +752,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
- /* For GFX9.4.3, we need special handling for VMIDs depending on
- * partition mode.
+ /* For multi-partition capable GPUs, we need special handling for VMIDs
+ * depending on partition mode.
* In CPX mode, the VMID range needs to be shared between XCDs.
* Additionally, there are 13 VMIDs (3-15) available for KFD. To
* divide them equally, we change starting VMID to 4 and not use
* VMID 3.
- * If the VMID range changes for GFX9.4.3, then this code MUST be
- * revisited.
+ * If the VMID range changes for multi-partition capable GPUs, then
+ * this code MUST be revisited.
*/
if (kfd->adev->xcp_mgr) {
partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
@@ -805,14 +824,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
/*
- * For GFX9.4.3, the KFD abstracts all partitions within a socket as
- * xGMI connected in the topology so assign a unique hive id per
- * device based on the pci device location if device is in PCIe mode.
+ * For multi-partition capable GPUs, the KFD abstracts all partitions
+ * within a socket as xGMI connected in the topology so assign a unique
+ * hive id per device based on the pci device location if device is in
+ * PCIe mode.
*/
- if (!kfd->hive_id &&
- (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) &&
- kfd->num_nodes > 1)
+ if (!kfd->hive_id && kfd->num_nodes > 1)
kfd->hive_id = pci_dev_id(kfd->adev->pdev);
kfd->noretry = kfd->adev->gmc.noretry;
@@ -850,12 +867,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
}
- if ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) &&
- partition_mode == AMDGPU_CPX_PARTITION_MODE &&
+ if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
kfd->num_nodes != 1) {
- /* For GFX9.4.3 and CPX mode, first XCD gets VMID range
- * 4-9 and second XCD gets VMID range 10-15.
+ /* For multi-partition capable GPUs and CPX mode, first
+ * XCD gets VMID range 4-9 and second XCD gets VMID
+ * range 10-15.
*/
node->vm_info.first_vmid_kfd = (i%2 == 0) ?
@@ -879,8 +895,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
amdgpu_amdkfd_get_local_mem_info(kfd->adev,
&node->local_mem_info, node->xcp);
- if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4))
+ if (kfd->adev->xcp_mgr)
kfd_setup_interrupt_bitmap(node, i);
/* Initialize the KFD node */
@@ -1059,21 +1074,6 @@ static int kfd_resume(struct kfd_node *node)
return err;
}
-static inline void kfd_queue_work(struct workqueue_struct *wq,
- struct work_struct *work)
-{
- int cpu, new_cpu;
-
- cpu = new_cpu = smp_processor_id();
- do {
- new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
- if (cpu_to_node(new_cpu) == numa_node_id())
- break;
- } while (cpu != new_cpu);
-
- queue_work_on(new_cpu, wq, work);
-}
-
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
@@ -1099,7 +1099,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(node,
is_patched ? patched_ihre : ih_ring_entry)) {
- kfd_queue_work(node->ih_wq, &node->interrupt_work);
+ queue_work(node->kfd->ih_wq, &node->interrupt_work);
spin_unlock_irqrestore(&node->interrupt_lock, flags);
return;
}
@@ -1514,6 +1514,73 @@ bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
return kfd_compute_active(node);
}
+/**
+ * kgd2kfd_vmfault_fast_path() - KFD vm page fault interrupt handling fast path for gmc v9
+ * @adev: amdgpu device
+ * @entry: vm fault interrupt vector
+ * @retry_fault: if this is retry fault
+ *
+ * retry fault -
+ * with CAM enabled, adev primary ring
+ * | gmc_v9_0_process_interrupt()
+ * adev soft_ring
+ * | gmc_v9_0_process_interrupt() worker failed to recover page fault
+ * KFD node ih_fifo
+ * | KFD interrupt_wq worker
+ * kfd_signal_vm_fault_event
+ *
+ * without CAM, adev primary ring1
+ * | gmc_v9_0_process_interrupt worker failed to recvoer page fault
+ * KFD node ih_fifo
+ * | KFD interrupt_wq worker
+ * kfd_signal_vm_fault_event
+ *
+ * no-retry fault -
+ * adev primary ring
+ * | gmc_v9_0_process_interrupt()
+ * KFD node ih_fifo
+ * | KFD interrupt_wq worker
+ * kfd_signal_vm_fault_event
+ *
+ * fast path - After kfd_signal_vm_fault_event, gmc_v9_0_process_interrupt drop the page fault
+ * of same process, don't copy interrupt to KFD node ih_fifo.
+ * With gdb debugger enabled, need convert the retry fault to no-retry fault for
+ * debugger, cannot use the fast path.
+ *
+ * Return:
+ * true - use the fast path to handle this fault
+ * false - use normal path to handle it
+ */
+bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault)
+{
+ struct kfd_process *p;
+ u32 cam_index;
+
+ if (entry->ih == &adev->irq.ih_soft || entry->ih == &adev->irq.ih1) {
+ p = kfd_lookup_process_by_pasid(entry->pasid);
+ if (!p)
+ return true;
+
+ if (p->gpu_page_fault && !p->debug_trap_enabled) {
+ if (retry_fault && adev->irq.retry_cam_enabled) {
+ cam_index = entry->src_data[2] & 0x3ff;
+ WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
+ }
+
+ kfd_unref_process(p);
+ return true;
+ }
+
+ /*
+ * This is the first page fault, set flag and then signal user space
+ */
+ p->gpu_page_fault = true;
+ kfd_unref_process(p);
+ }
+ return false;
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 16b5daaa272f..1405e8affd48 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2388,6 +2388,9 @@ static int wait_on_destroy_queue(struct device_queue_manager *dqm,
q->process);
int ret = 0;
+ if (WARN_ON(!pdd))
+ return ret;
+
if (pdd->qpd.is_debug)
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 210bcc048f4c..67137e674f1d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -64,7 +64,8 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0))
qpd->sh_mem_config |=
(1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index ea3792249209..d075f24e5f9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -748,6 +748,16 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint64_t *slots = page_slots(p->signal_page);
uint32_t id;
+ /*
+ * If id is valid but slot is not signaled, GPU may signal the same event twice
+ * before driver have chance to process the first interrupt, then signal slot is
+ * auto-reset after set_event wakeup the user space, just drop the second event as
+ * the application only need wakeup once.
+ */
+ if ((valid_id_bits > 31 || (1U << valid_id_bits) >= KFD_SIGNAL_EVENT_LIMIT) &&
+ partial_id < KFD_SIGNAL_EVENT_LIMIT && slots[partial_id] == UNSIGNALED_EVENT_SLOT)
+ goto out_unlock;
+
if (valid_id_bits)
pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
partial_id, valid_id_bits);
@@ -776,6 +786,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
}
}
+out_unlock:
rcu_read_unlock();
kfd_unref_process(p);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index d46a13156ee9..0cb5c582ce7d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -184,6 +184,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
} else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
+ amdgpu_ras_set_err_poison(dev->adev, AMDGPU_RAS_BLOCK__GFX);
break;
case SOC15_IH_CLIENTID_VMC:
case SOC15_IH_CLIENTID_VMC1:
@@ -213,6 +214,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
} else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
+ amdgpu_ras_set_err_poison(dev->adev, AMDGPU_RAS_BLOCK__SDMA);
break;
default:
dev_warn(dev->adev->dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 9b6b6e882593..783c2f5a04e4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -46,7 +46,7 @@
#include <linux/kfifo.h>
#include "kfd_priv.h"
-#define KFD_IH_NUM_ENTRIES 8192
+#define KFD_IH_NUM_ENTRIES 16384
static void interrupt_wq(struct work_struct *);
@@ -62,11 +62,14 @@ int kfd_interrupt_init(struct kfd_node *node)
return r;
}
- node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
- if (unlikely(!node->ih_wq)) {
- kfifo_free(&node->ih_fifo);
- dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
- return -ENOMEM;
+ if (!node->kfd->ih_wq) {
+ node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND,
+ node->kfd->num_nodes);
+ if (unlikely(!node->kfd->ih_wq)) {
+ kfifo_free(&node->ih_fifo);
+ dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
+ return -ENOMEM;
+ }
}
spin_lock_init(&node->interrupt_lock);
@@ -96,16 +99,6 @@ void kfd_interrupt_exit(struct kfd_node *node)
spin_lock_irqsave(&node->interrupt_lock, flags);
node->interrupts_active = false;
spin_unlock_irqrestore(&node->interrupt_lock, flags);
-
- /*
- * flush_work ensures that there are no outstanding
- * work-queue items that will access interrupt_ring. New work items
- * can't be created because we stopped interrupt handling above.
- */
- flush_workqueue(node->ih_wq);
-
- destroy_workqueue(node->ih_wq);
-
kfifo_free(&node->ih_fifo);
}
@@ -114,55 +107,48 @@ void kfd_interrupt_exit(struct kfd_node *node)
*/
bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry)
{
- int count;
-
- count = kfifo_in(&node->ih_fifo, ih_ring_entry,
- node->kfd->device_info.ih_ring_entry_size);
- if (count != node->kfd->device_info.ih_ring_entry_size) {
- dev_dbg_ratelimited(node->adev->dev,
- "Interrupt ring overflow, dropping interrupt %d\n",
- count);
+ if (kfifo_is_full(&node->ih_fifo)) {
+ dev_warn_ratelimited(node->adev->dev, "KFD node %d ih_fifo overflow\n",
+ node->node_id);
return false;
}
+ kfifo_in(&node->ih_fifo, ih_ring_entry, node->kfd->device_info.ih_ring_entry_size);
return true;
}
/*
* Assumption: single reader/writer. This function is not re-entrant
*/
-static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry)
+static bool dequeue_ih_ring_entry(struct kfd_node *node, u32 **ih_ring_entry)
{
int count;
- count = kfifo_out(&node->ih_fifo, ih_ring_entry,
- node->kfd->device_info.ih_ring_entry_size);
-
- WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size);
+ if (kfifo_is_empty(&node->ih_fifo))
+ return false;
+ count = kfifo_out_linear_ptr(&node->ih_fifo, ih_ring_entry,
+ node->kfd->device_info.ih_ring_entry_size);
+ WARN_ON(count != node->kfd->device_info.ih_ring_entry_size);
return count == node->kfd->device_info.ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
{
- struct kfd_node *dev = container_of(work, struct kfd_node,
- interrupt_work);
- uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
+ struct kfd_node *dev = container_of(work, struct kfd_node, interrupt_work);
+ uint32_t *ih_ring_entry;
unsigned long start_jiffies = jiffies;
- if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
- dev_err_once(dev->adev->dev, "Ring entry too small\n");
- return;
- }
-
- while (dequeue_ih_ring_entry(dev, ih_ring_entry)) {
+ while (dequeue_ih_ring_entry(dev, &ih_ring_entry)) {
dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
+ kfifo_skip_count(&dev->ih_fifo, dev->kfd->device_info.ih_ring_entry_size);
+
if (time_is_before_jiffies(start_jiffies + HZ)) {
/* If we spent more than a second processing signals,
* reschedule the worker to avoid soft-lockup warnings
*/
- queue_work(dev->ih_wq, &dev->interrupt_work);
+ queue_work(dev->kfd->ih_wq, &dev->interrupt_work);
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 84e8ea3a8a0c..ff417d5361c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -78,7 +78,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0)) {
m->compute_static_thread_mgmt_se4 = se_mask[4];
m->compute_static_thread_mgmt_se5 = se_mask[5];
m->compute_static_thread_mgmt_se6 = se_mask[6];
@@ -301,7 +302,8 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_ctx_save_control = 0;
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0))
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
@@ -885,7 +887,8 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) {
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
@@ -909,8 +912,10 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
+ mqd->check_preemption_failed = check_preemption_failed;
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) {
mqd->init_mqd = init_mqd_hiq_v9_4_3;
mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 37930629edc5..4984b41cd372 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -28,6 +28,10 @@
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
+#define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
+#define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
+#define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
+
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
{
@@ -40,7 +44,7 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
- bool *over_subscription)
+ int *over_subscription)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
@@ -58,17 +62,20 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- *over_subscription = false;
+ *over_subscription = 0;
if (node->max_proc_per_quantum > 1)
max_proc_per_quantum = node->max_proc_per_quantum;
- if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_cp_queues_num(pm->dqm) ||
- gws_queue_count > 1) {
- *over_subscription = true;
+ if (process_count > max_proc_per_quantum)
+ *over_subscription |= OVER_SUBSCRIPTION_PROCESS_COUNT;
+ if (compute_queue_count > get_cp_queues_num(pm->dqm))
+ *over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
+ if (gws_queue_count > 1)
+ *over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
+
+ if (*over_subscription)
dev_dbg(dev, "Over subscribed runlist\n");
- }
map_queue_size = pm->pmf->map_queues_size;
/* calculate run list ib allocation size */
@@ -89,7 +96,7 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
- bool *is_over_subscription)
+ int *is_over_subscription)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
@@ -134,7 +141,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct qcm_process_device *qpd;
struct queue *q;
struct kernel_queue *kq;
- bool is_over_subscription;
+ int is_over_subscription;
rl_wptr = retval = processes_mapped = 0;
@@ -213,15 +220,20 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (is_over_subscription) {
if (!pm->is_over_subscription)
- dev_warn(
- dev,
- "Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
+ dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
+ is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
+ " too many processes." : "",
+ is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
+ " too many queues." : "",
+ is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
+ " multiple processes using cooperative launch." : "");
+
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,
alloc_size_bytes / sizeof(uint32_t),
true);
}
- pm->is_over_subscription = is_over_subscription;
+ pm->is_over_subscription = !!is_over_subscription;
for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
pr_debug("0x%2X ", rl_buffer[i]);
@@ -248,7 +260,8 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
default:
if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 5, 0))
pm->pmf = &kfd_aldebaran_pm_funcs;
else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
pm->pmf = &kfd_v9_pm_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 9e5ca0b93b2a..d8cd913aa772 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -32,7 +32,7 @@
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
-#include <linux/kfd_ioctl.h>
+#include <uapi/linux/kfd_ioctl.h>
#include <linux/idr.h>
#include <linux/kfifo.h>
#include <linux/seq_file.h>
@@ -207,7 +207,8 @@ enum cache_policy {
#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) || \
- (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)))
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) || \
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)))
struct kfd_node;
@@ -273,7 +274,6 @@ struct kfd_node {
/* Interrupts */
struct kfifo ih_fifo;
- struct workqueue_struct *ih_wq;
struct work_struct interrupt_work;
spinlock_t interrupt_lock;
@@ -366,6 +366,8 @@ struct kfd_dev {
struct kfd_node *nodes[MAX_KFD_NODES];
unsigned int num_nodes;
+ struct workqueue_struct *ih_wq;
+
/* Kernel doorbells for KFD device */
struct amdgpu_bo *doorbells;
@@ -1002,6 +1004,9 @@ struct kfd_process {
struct semaphore runtime_enable_sema;
bool is_runtime_retry;
struct kfd_runtime_info runtime_info;
+
+ /* if gpu page fault sent to KFD */
+ bool gpu_page_fault;
};
#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
@@ -1150,7 +1155,8 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
uint32_t i;
if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0))
return dev->nodes[0];
for (i = 0; i < dev->num_nodes; i++)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index edfe0b4788f4..083f83c94531 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -2128,10 +2128,11 @@ int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
irq_drain_fence[3] = pdd->process->pasid;
/*
- * For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
+ * For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
*/
if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 5, 0)) {
node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
irq_drain_fence[3] |= node_id << 16;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 59b92d66e958..9df56f8e09f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -131,8 +131,9 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
if (!gws && pdd->qpd.num_gws == 0)
return -EINVAL;
- if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
+ if ((KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0)) &&
!dev->kfd->shared_resources.enable_mes) {
if (gws)
ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
@@ -197,6 +198,7 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
if (pqn->q->gws) {
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 5, 0) &&
!dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(
pqm->process->kgd_process_info, pqn->q->gws);
@@ -320,11 +322,12 @@ int pqm_create_queue(struct process_queue_manager *pqm,
unsigned int max_queues = 127; /* HWS limit */
/*
- * On GFX 9.4.3, increase the number of queues that
- * can be created to 255. No HWS limit on GFX 9.4.3.
+ * On GFX 9.4.3/9.5.0, increase the number of queues that
+ * can be created to 255. No HWS limit on GFX 9.4.3/9.5.0.
*/
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0))
max_queues = 255;
q = NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index ad29634f8b44..ecccd7adbab4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -394,7 +394,8 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
- gfxv == 90008) /* GFX_VERSION_ARCTURUS */
+ gfxv == 90008 || /* GFX_VERSION_ARCTURUS */
+ gfxv == 90500)
vgpr_size = 0x80000;
else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */
gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */
@@ -405,9 +406,10 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
return vgpr_size;
}
-#define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv) \
+#define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props) \
(kfd_get_vgpr_size_per_cu(gfxv) + SGPR_SIZE_PER_CU +\
- LDS_SIZE_PER_CU + HWREG_SIZE_PER_CU)
+ (((gfxv) == 90500) ? (props->lds_size_in_kb << 10) : LDS_SIZE_PER_CU) +\
+ HWREG_SIZE_PER_CU)
#define CNTL_STACK_BYTES_PER_WAVE(gfxv) \
((gfxv) >= 100100 ? 12 : 8) /* GFX_VERSION_NAVI10*/
@@ -431,7 +433,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
min(cu_num * 40, props->array_count / props->simd_arrays_per_engine * 512)
: cu_num * 32;
- wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv), PAGE_SIZE);
+ wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props), PAGE_SIZE);
ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8;
ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size,
PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 3e2911895c74..bd3e20d981e0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1195,6 +1195,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
struct kfd_node *bo_node;
uint32_t flags = prange->flags;
uint32_t mapping_flags = 0;
+ uint32_t gc_ip_version = KFD_GC_VERSION(node);
uint64_t pte_flags;
bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
@@ -1204,7 +1205,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_node = prange->svm_bo->node;
- switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) {
+ switch (gc_ip_version) {
case IP_VERSION(9, 4, 1):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_node == node) {
@@ -1241,8 +1242,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
if (ext_coherent)
- mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC;
+ mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
else
mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -1257,9 +1260,13 @@ svm_range_get_pte_flags(struct kfd_node *node,
*/
else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
mapping_flags |= AMDGPU_VM_MTYPE_NC;
- /* PCIe P2P or extended system scope coherence */
- else
+ /* PCIe P2P on GPUs pre-9.5.0 */
+ else if (gc_ip_version < IP_VERSION(9, 5, 0) &&
+ !svm_nodes_in_same_hive(bo_node, node))
mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ /* Other remote memory */
+ else
+ mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the APU */
} else if (node->adev->flags & AMD_IS_APU) {
/* On NUMA systems, locality is determined per-page
@@ -1271,7 +1278,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ if (gc_ip_version < IP_VERSION(9, 5, 0))
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
}
break;
case IP_VERSION(12, 0, 0):
@@ -1299,7 +1309,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
pte_flags = AMDGPU_PTE_VALID;
pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
- if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
+ if (gc_ip_version >= IP_VERSION(12, 0, 0))
pte_flags |= AMDGPU_PTE_IS_PTE;
pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 9476e30d6baa..ceb9fb475ef1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1714,7 +1714,8 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
pcache->cacheline_size = pcache_info[cache_type].cache_line_size;
if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(knode) == IP_VERSION(9, 5, 0))
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
else
mode = UNKNOWN_MEMORY_PARTITION_MODE;
@@ -1776,7 +1777,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
struct amdgpu_cu_info *cu_info = &kdev->adev->gfx.cu_info;
struct amdgpu_gfx_config *gfx_info = &kdev->adev->gfx.config;
int gpu_processor_id;
- struct kfd_cache_properties *props_ext;
+ struct kfd_cache_properties *props_ext = NULL;
int num_of_entries = 0;
int num_of_cache_types = 0;
struct kfd_gpu_cache_info cache_info[KFD_MAX_CACHE_TYPES];
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 11e3f2f3b174..abd3b6564373 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,6 +8,8 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
depends on BROKEN || !CC_IS_CLANG || ARM64 || LOONGARCH || RISCV || SPARC64 || X86_64
+ select CEC_CORE
+ select CEC_NOTIFIER
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || LOONGARCH || RISCV))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index cd16dae534dc..0ec178ca7434 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -93,10 +93,12 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_utils.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <media/cec-notifier.h>
#include <acpi/video.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -955,13 +957,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
}
}
-static int dm_set_clockgating_state(void *handle,
+static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dm_set_powergating_state(void *handle,
+static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1036,8 +1038,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
continue;
*enabled = true;
+ mutex_lock(&connector->eld_mutex);
ret = drm_eld_size(connector->eld);
memcpy(buf, connector->eld, min(max_bytes, ret));
+ mutex_unlock(&connector->eld_mutex);
break;
}
@@ -2152,9 +2156,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
- if (!adev->dm.secure_display_ctxs)
+ amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctx.crtc_ctx)
DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
+ adev->dm.secure_display_ctx.support_mul_roi = true;
+
#endif
DRM_DEBUG_DRIVER("KMS initialized.\n");
@@ -2197,15 +2205,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- if (adev->dm.secure_display_ctxs) {
+ if (adev->dm.secure_display_ctx.crtc_ctx) {
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->dm.secure_display_ctxs[i].crtc) {
- flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
- flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work);
}
}
- kfree(adev->dm.secure_display_ctxs);
- adev->dm.secure_display_ctxs = NULL;
+ kfree(adev->dm.secure_display_ctx.crtc_ctx);
+ adev->dm.secure_display_ctx.crtc_ctx = NULL;
}
#endif
if (adev->dm.hdcp_workqueue) {
@@ -2338,7 +2346,8 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, "%s", fw_name_dmcu);
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name_dmcu);
if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
@@ -2746,6 +2755,48 @@ out_fail:
mutex_unlock(&mgr->lock);
}
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_phys_addr_invalidate(n);
+}
+
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_set_phys_addr(n,
+ connector->display_info.source_physical_address);
+}
+
+static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(ddev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (suspend)
+ hdmi_cec_unset_edid(aconnector);
+ else
+ hdmi_cec_set_edid(aconnector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
@@ -3017,6 +3068,8 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
if (IS_ERR(adev->dm.cached_state))
return PTR_ERR(adev->dm.cached_state);
+ s3_handle_hdmi_cec(adev_to_drm(adev), true);
+
s3_handle_mst(adev_to_drm(adev), true);
amdgpu_dm_irq_suspend(adev);
@@ -3289,6 +3342,8 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
*/
amdgpu_dm_irq_resume_early(adev);
+ s3_handle_hdmi_cec(ddev, false);
+
/* On resume we need to rewrite the MSTM control bits to enable MST*/
s3_handle_mst(ddev, false);
@@ -3457,6 +3512,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct drm_luminance_range_info *luminance_range;
+ int min_input_signal_override;
if (aconnector->bl_idx == -1 ||
aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
@@ -3493,6 +3549,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->aux_min_input_signal = 0;
caps->aux_max_input_signal = 512;
}
+
+ min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
+ if (min_input_signal_override >= 0)
+ caps->min_input_signal = min_input_signal_override;
}
void amdgpu_dm_update_connector_after_detect(
@@ -3598,6 +3658,7 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->drm_edid = NULL;
+ hdmi_cec_unset_edid(aconnector);
if (aconnector->dc_link->aux_mode) {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
}
@@ -3607,6 +3668,7 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length);
drm_edid_connector_update(connector, aconnector->drm_edid);
+ hdmi_cec_set_edid(aconnector);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_attach(&aconnector->dm_dp_aux.aux,
connector->display_info.source_physical_address);
@@ -3623,6 +3685,7 @@ void amdgpu_dm_update_connector_after_detect(
amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid);
update_connector_ext_caps(aconnector);
} else {
+ hdmi_cec_unset_edid(aconnector);
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
aconnector->num_modes = 0;
@@ -5306,7 +5369,8 @@ static int dm_init_microcode(struct amdgpu_device *adev)
/* ASIC doesn't support DMUB. */
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, "%s", fw_name_dmub);
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name_dmub);
return r;
}
@@ -5522,8 +5586,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const u64 tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc)
+ bool tmz_surface)
{
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
@@ -5622,7 +5685,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
&plane_info->tiling_info,
&plane_info->plane_size,
&plane_info->dcc, address,
- tmz_surface, force_disable_dcc);
+ tmz_surface);
if (ret)
return ret;
@@ -5643,7 +5706,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_scaling_info scaling_info;
struct dc_plane_info plane_info;
int ret;
- bool force_disable_dcc = false;
ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
if (ret)
@@ -5654,13 +5716,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->clip_rect = scaling_info.clip_rect;
dc_plane_state->scaling_quality = scaling_info.scaling_quality;
- force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state,
afb->tiling_flags,
&plane_info,
&dc_plane_state->address,
- afb->tmz_surface,
- force_disable_dcc);
+ afb->tmz_surface);
if (ret)
return ret;
@@ -7042,6 +7102,7 @@ static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector))
sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+ cec_notifier_conn_unregister(amdgpu_dm_connector->notifier);
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
}
@@ -8278,6 +8339,27 @@ create_i2c(struct ddc_service *ddc_service,
return i2c;
}
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_connector_info conn_info;
+ struct drm_device *ddev = aconnector->base.dev;
+ struct device *hdmi_dev = ddev->dev;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) {
+ drm_info(ddev, "HDMI-CEC feature masked\n");
+ return -EINVAL;
+ }
+
+ cec_fill_conn_info_from_drm(&conn_info, &aconnector->base);
+ aconnector->notifier =
+ cec_notifier_conn_register(hdmi_dev, NULL, &conn_info);
+ if (!aconnector->notifier) {
+ drm_err(ddev, "Failed to create cec notifier\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
/*
* Note: this function assumes that dc_link_detect() was called for the
@@ -8341,6 +8423,10 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ amdgpu_dm_initialize_hdmi_connector(aconnector);
+
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
@@ -8896,6 +8982,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (pr->config.replay_supported && !pr->replay_feature_enabled)
@@ -8922,14 +9009,15 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
- if (acrtc_attach->dm_irq_params.allow_sr_entry &&
+ if (!vrr_active &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
if (pr->replay_feature_enabled && !pr->replay_allow_active)
amdgpu_dm_replay_enable(acrtc_state->stream, true);
- if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
+ if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -9066,7 +9154,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
afb->tiling_flags,
&bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address,
- afb->tmz_surface, false);
+ afb->tmz_surface);
drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
@@ -9100,7 +9188,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
timestamp_ns;
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
mutex_unlock(&dm->dc_lock);
}
}
@@ -9266,11 +9354,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
amdgpu_dm_replay_disable(acrtc_state->stream);
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
}
mutex_unlock(&dm->dc_lock);
@@ -10029,14 +10117,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
+ uint8_t cnt;
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- acrtc->dm_irq_params.window_param.update_win = true;
-
- /**
- * It takes 2 frames for HW to stably generate CRC when
- * resuming from suspend, so we set skip_frame_cnt 2.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
+ for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
+ if (acrtc->dm_irq_params.window_param[cnt].enable) {
+ acrtc->dm_irq_params.window_param[cnt].update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
+ acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2;
+ }
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
@@ -11379,6 +11472,25 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
return 0;
}
+static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state, *old_plane_state;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ old_plane_state = drm_atomic_get_plane_state(state, plane);
+
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
+ return true;
+ }
+
+ return false;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
@@ -11576,10 +11688,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
- if (old_plane_state->fb && new_plane_state->fb &&
- get_mem_type(old_plane_state->fb) !=
- get_mem_type(new_plane_state->fb))
- lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
@@ -11874,9 +11982,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/*
* Only allow async flips for fast updates that don't change
- * the FB pitch, the DCC state, rotation, etc.
+ * the FB pitch, the DCC state, rotation, mem_type, etc.
*/
- if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ if (new_crtc_state->async_flip &&
+ (lock_and_validation_needed ||
+ amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6464a8378387..d2703ca7dff3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -541,12 +541,12 @@ struct amdgpu_display_manager {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
- * @secure_display_ctxs:
+ * @secure_display_ctx:
*
- * Store the ROI information and the work_struct to command dmub and psp for
- * all crtcs.
+ * Store secure display relevant info. e.g. the ROI information
+ * , the work_struct to command dmub, etc.
*/
- struct secure_display_context *secure_display_ctxs;
+ struct secure_display_context secure_display_ctx;
#endif
/**
* @hpd_rx_offload_wq:
@@ -671,6 +671,8 @@ struct amdgpu_dm_connector {
uint32_t connector_id;
int bl_idx;
+ struct cec_notifier *notifier;
+
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
const struct drm_edid *drm_edid;
@@ -697,6 +699,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ uint32_t mst_local_bw;
+ uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
@@ -1010,4 +1014,8 @@ void dm_free_gpu_mem(struct amdgpu_device *adev,
bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index f936a35fa9eb..033bd817d871 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -30,6 +30,7 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "amdgpu_securedisplay.h"
+#include "amdgpu_dm_psr.h"
static const char *const pipe_crc_sources[] = {
"none",
@@ -83,45 +84,274 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+static void update_phy_id_mapping(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
+ struct drm_connector_list_iter iter;
+ uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
+
+ dm->secure_display_ctx.phy_mapping_updated = false;
+
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->status != connector_status_connected)
+ continue;
+
+ if (idx >= AMDGPU_DM_MAX_CRTC) {
+ DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
+ mutex_unlock(&ddev->mode_config.mutex);
+ return;
+ }
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ sort_connector[idx] = aconnector;
+ idx++;
+ connector_cnt++;
+ }
+ drm_connector_list_iter_end(&iter);
+
+ /* sort connectors by link_enc_hw_instance first */
+ for (idx = connector_cnt; idx > 1 ; idx--) {
+ for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
+ if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
+ sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
+ swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
+ }
+ }
+
+ /*
+ * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
+ * sorted together above.
+ */
+ for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
+ if (sort_connector[idx]->mst_root) {
+ uint8_t i, j, k;
+ uint8_t mst_con_cnt = 1;
+
+ for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
+ if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
+ mst_con_cnt++;
+ else
+ break;
+ }
+
+ for (i = mst_con_cnt; i > 1; i--) {
+ for (j = idx; j < (idx + i - 2); j++) {
+ int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
+ int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
+ u8 *rad;
+ u8 *next_rad;
+ bool swap = false;
+
+ /* Sort by mst tree depth first. Then compare RAD if depth is the same*/
+ if (mstb_lct > next_mstb_lct) {
+ swap = true;
+ } else if (mstb_lct == next_mstb_lct) {
+ if (mstb_lct == 1) {
+ if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
+ swap = true;
+ } else if (mstb_lct > 1) {
+ rad = sort_connector[j]->mst_output_port->parent->rad;
+ next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
+
+ for (k = 0; k < mstb_lct - 1; k++) {
+ int shift = (k % 2) ? 0 : 4;
+ int port_num = (rad[k / 2] >> shift) & 0xf;
+ int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
+
+ if (port_num > next_port_num) {
+ swap = true;
+ break;
+ }
+ }
+ } else {
+ DRM_ERROR("MST LCT shouldn't be set as < 1");
+ mutex_unlock(&ddev->mode_config.mutex);
+ return;
+ }
+ }
+
+ if (swap)
+ swap(sort_connector[j], sort_connector[j + 1]);
+ }
+ }
+
+ idx += mst_con_cnt;
+ } else {
+ idx++;
+ }
+ }
+
+ /* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
+ memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
+ for (idx = 0; idx < connector_cnt; idx++) {
+ aconnector = sort_connector[idx];
+
+ dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
+ dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
+ dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
+
+ if (sort_connector[idx]->mst_root) {
+ dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
+ dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
+ dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
+ memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
+ aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
+ }
+ }
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
+ dm->secure_display_ctx.phy_mapping_updated = true;
+}
+
+static bool get_phy_id(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
+{
+ int idx, idx_2;
+ bool found = false;
+
+ /*
+ * Assume secure display start after all connectors are probed. The connection
+ * config is static as well
+ */
+ if (!dm->secure_display_ctx.phy_mapping_updated) {
+ DRM_WARN("%s Should update the phy id table before get it's value", __func__);
+ return false;
+ }
+
+ for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
+ if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
+ DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
+ return false;
+ }
+
+ if (aconnector->dc_link->link_enc_hw_inst ==
+ dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
+ if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
+ found = true;
+ goto out;
+ } else {
+ /* Could caused by wrongly pass mst root connector */
+ if (!aconnector->mst_output_port) {
+ DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
+ return false;
+ }
+
+ if (aconnector->mst_root &&
+ aconnector->mst_root->mst_mgr.mst_primary == NULL) {
+ DRM_WARN("%s pass in a stale mst connector", __func__);
+ }
+
+ if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
+ aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
+ if (aconnector->mst_output_port->parent->lct == 1) {
+ found = true;
+ goto out;
+ } else if (aconnector->mst_output_port->parent->lct > 1) {
+ /* Check RAD */
+ for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
+ int shift = (idx_2 % 2) ? 0 : 4;
+ int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
+ int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
+
+ if (port_num != port_num2)
+ break;
+ }
+
+ if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
+ found = true;
+ goto out;
+ }
+ } else {
+ DRM_ERROR("lCT should be >= 1");
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+out:
+ if (found) {
+ DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
+ *phy_id = idx;
+ } else {
+ DRM_WARN("Can't find associated phy ID");
+ return false;
+ }
+
+ return true;
+}
+
static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
{
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_dm_connector *aconnector;
bool was_activated;
+ uint8_t phy_id;
+ unsigned long flags;
+ int i;
- spin_lock_irq(&drm_dev->event_lock);
- was_activated = acrtc->dm_irq_params.window_param.activated;
- acrtc->dm_irq_params.window_param.x_start = 0;
- acrtc->dm_irq_params.window_param.y_start = 0;
- acrtc->dm_irq_params.window_param.x_end = 0;
- acrtc->dm_irq_params.window_param.y_end = 0;
- acrtc->dm_irq_params.window_param.activated = false;
- acrtc->dm_irq_params.window_param.update_win = false;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- spin_unlock_irq(&drm_dev->event_lock);
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ was_activated = acrtc->dm_irq_params.crc_window_activated;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ acrtc->dm_irq_params.window_param[i].x_start = 0;
+ acrtc->dm_irq_params.window_param[i].y_start = 0;
+ acrtc->dm_irq_params.window_param[i].x_end = 0;
+ acrtc->dm_irq_params.window_param[i].y_end = 0;
+ acrtc->dm_irq_params.window_param[i].enable = false;
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
+ }
+ acrtc->dm_irq_params.crc_window_activated = false;
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
/* Disable secure_display if it was enabled */
- if (was_activated) {
+ if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
/* stop ROI update on this crtc */
- flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work);
- flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work);
- dc_stream_forward_crc_window(stream, NULL, true);
+ flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
+ flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
+ else
+ dc_stream_forward_crc_window(stream, NULL, phy_id, true);
+ } else {
+ DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
+ }
}
}
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
{
- struct secure_display_context *secure_display_ctx;
+ struct secure_display_crtc_context *crtc_ctx;
struct psp_context *psp;
struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_crtc *crtc;
struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector;
uint8_t phy_inst;
+ struct amdgpu_display_manager *dm;
+ struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
+ uint8_t roi_idx = 0;
int ret;
+ int i;
- secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
- crtc = secure_display_ctx->crtc;
+ crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
+ crtc = crtc_ctx->crtc;
if (!crtc)
return;
@@ -133,21 +363,50 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
return;
}
+ dm = &drm_to_adev(crtc->dev)->dm;
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
- phy_inst = stream->link->link_enc_hw_inst;
-
- /* need lock for multiple crtcs to use the command buffer */
- mutex_lock(&psp->securedisplay_context.mutex);
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ if (!aconnector)
+ return;
- psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
- TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ mutex_lock(&crtc->dev->mode_config.mutex);
+ if (!get_phy_id(dm, aconnector, &phy_inst)) {
+ DRM_WARN("%s Can't find mapping phy id!", __func__);
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+ return;
+ }
+ mutex_unlock(&crtc->dev->mode_config.mutex);
- securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ /* need lock for multiple crtcs to use the command buffer */
+ mutex_lock(&psp->securedisplay_context.mutex);
/* PSP TA is expected to finish data transmission over I2C within current frame,
* even there are up to 4 crtcs request to send in this frame.
*/
- ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ if (dm->secure_display_ctx.support_mul_roi) {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
+
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ if (crc_cpy[i].crc_ready)
+ roi_idx |= 1 << i;
+ }
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+ } else {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ }
if (!ret) {
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
@@ -160,22 +419,47 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
static void
amdgpu_dm_forward_crc_window(struct work_struct *work)
{
- struct secure_display_context *secure_display_ctx;
+ struct secure_display_crtc_context *crtc_ctx;
struct amdgpu_display_manager *dm;
struct drm_crtc *crtc;
struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector;
+ struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
+ uint8_t phy_id;
- secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work);
- crtc = secure_display_ctx->crtc;
+ crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
+ crtc = crtc_ctx->crtc;
if (!crtc)
return;
dm = &drm_to_adev(crtc->dev)->dm;
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector)
+ return;
+
+ mutex_lock(&crtc->dev->mode_config.mutex);
+ if (!get_phy_id(dm, aconnector, &phy_id)) {
+ DRM_WARN("%s Can't find mapping phy id!", __func__);
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+ return;
+ }
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
mutex_lock(&dm->dc_lock);
- dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false);
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, roi_cpy,
+ phy_id, false);
+ else
+ dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
+ phy_id, false);
mutex_unlock(&dm->dc_lock);
}
@@ -186,7 +470,7 @@ bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
bool ret = false;
spin_lock_irq(&drm_dev->event_lock);
- ret = acrtc->dm_irq_params.window_param.activated;
+ ret = acrtc->dm_irq_params.crc_window_activated;
spin_unlock_irq(&drm_dev->event_lock);
return ret;
@@ -224,10 +508,14 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
+ /* For PSR1, check that the panel has exited PSR */
+ if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ amdgpu_dm_psr_wait_disable(stream_state);
+
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
- stream_state, NULL, enable, enable)) {
+ stream_state, NULL, enable, enable, 0, true)) {
ret = -EINVAL;
goto unlock;
}
@@ -258,6 +546,10 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
struct drm_crtc_commit *commit;
struct dm_crtc_state *crtc_state;
struct drm_device *drm_dev = crtc->dev;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+#endif
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct drm_dp_aux *aux = NULL;
bool enable = false;
@@ -357,6 +649,17 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
+ /*
+ * Reading the CRC requires the vblank interrupt handler to be
+ * enabled. Keep a reference until CRC capture stops.
+ */
+ enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+ }
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
@@ -367,16 +670,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- /*
- * Reading the CRC requires the vblank interrupt handler to be
- * enabled. Keep a reference until CRC capture stops.
- */
- enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
if (!enabled && enable) {
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto cleanup;
-
if (dm_is_crc_source_dprx(source)) {
if (drm_dp_start_crc(aux, crtc)) {
DRM_DEBUG_DRIVER("dp start crc failed\n");
@@ -402,6 +696,13 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Initialize phy id mapping table for secure display*/
+ if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
+ !dm->secure_display_ctx.phy_mapping_updated)
+ update_phy_id_mapping(adev);
+#endif
+
cleanup:
if (commit)
drm_crtc_commit_put(commit);
@@ -456,7 +757,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
}
if (dm_is_crc_source_crtc(cur_crc_src)) {
- if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
&crcs[0], &crcs[1], &crcs[2]))
return;
@@ -472,8 +773,17 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
enum amdgpu_dm_pipe_crc_source cur_crc_src;
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_device *adev = NULL;
- struct secure_display_context *secure_display_ctx = NULL;
+ struct secure_display_crtc_context *crtc_ctx = NULL;
+ bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
+ uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
unsigned long flags1;
+ bool forward_roi_change = false;
+ bool notify_ta = false;
+ bool all_crc_ready = true;
+ struct dc_stream_state *stream_state;
+ int i;
if (crtc == NULL)
return;
@@ -481,78 +791,160 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
acrtc = to_amdgpu_crtc(crtc);
adev = drm_to_adev(crtc->dev);
drm_dev = crtc->dev;
+ stream_state = to_dm_crtc_state(crtc->state)->stream;
spin_lock_irqsave(&drm_dev->event_lock, flags1);
cur_crc_src = acrtc->dm_irq_params.crc_src;
/* Early return if CRC capture is not enabled. */
if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
- !dm_is_crc_source_crtc(cur_crc_src))
- goto cleanup;
-
- if (!acrtc->dm_irq_params.window_param.activated)
- goto cleanup;
+ !dm_is_crc_source_crtc(cur_crc_src)) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
+ }
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
+ if (!acrtc->dm_irq_params.crc_window_activated) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
}
- secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id];
- if (WARN_ON(secure_display_ctx->crtc != crtc)) {
- /* We have set the crtc when creating secure_display_context,
+ crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
+ if (WARN_ON(crtc_ctx->crtc != crtc)) {
+ /* We have set the crtc when creating secure_display_crtc_context,
* don't expect it to be changed here.
*/
- secure_display_ctx->crtc = crtc;
+ crtc_ctx->crtc = crtc;
}
- if (acrtc->dm_irq_params.window_param.update_win) {
- /* prepare work for dmub to update ROI */
- secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
- secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
- secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
- acrtc->dm_irq_params.window_param.x_start;
- secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
- acrtc->dm_irq_params.window_param.y_start;
- schedule_work(&secure_display_ctx->forward_roi_work);
-
- acrtc->dm_irq_params.window_param.update_win = false;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ struct crc_params crc_window = {
+ .windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ .windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ };
+
+ crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
+
+ if (!acrtc->dm_irq_params.window_param[i].enable) {
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- /* Statically skip 1 frame, because we may need to wait below things
- * before sending ROI to dmub:
- * 1. We defer the work by using system workqueue.
- * 2. We may need to wait for dc_lock before accessing dmub.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
+ if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- } else {
- /* prepare work for psp to read ROI/CRC and send to I2C */
- schedule_work(&secure_display_ctx->notify_ta_work);
+ if (acrtc->dm_irq_params.window_param[i].update_win) {
+ crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
+ crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
+ crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
+ crc_window.windowa_y_start;
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to dmub to update ROI */
+ forward_roi_change = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* update ROI via dm*/
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ reset_crc_frame_count[i] = true;
+
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+
+ /* Statically skip 1 frame, because we may need to wait below things
+ * before sending ROI to dmub:
+ * 1. We defer the work by using system workqueue.
+ * 2. We may need to wait for dc_lock before accessing dmub.
+ */
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ } else {
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
+ &crc_r[i], &crc_g[i], &crc_b[i]))
+ DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to psp to read ROI/CRC and output via I2C */
+ notify_ta = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* Avoid ROI window get changed, keep overwriting. */
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ /* crc ready for psp to read out */
+ crtc_ctx->crc_info.crc[i].crc_ready = true;
+ }
}
-cleanup:
spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+
+ if (forward_roi_change)
+ schedule_work(&crtc_ctx->forward_roi_work);
+
+ if (notify_ta)
+ schedule_work(&crtc_ctx->notify_ta_work);
+
+ spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
+ crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
+ crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
+
+ if (!crtc_ctx->roi[i].enable) {
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ continue;
+ }
+
+ if (!crtc_ctx->crc_info.crc[i].crc_ready)
+ all_crc_ready = false;
+
+ if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
+ /* Reset the reference frame count after user update the ROI
+ * or it reaches the maximum value.
+ */
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ else
+ crtc_ctx->crc_info.crc[i].frame_count += 1;
+ }
+ spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
+
+ if (all_crc_ready)
+ complete_all(&crtc_ctx->crc_info.completion);
}
-struct secure_display_context *
-amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
+void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
{
- struct secure_display_context *secure_display_ctxs = NULL;
+ struct secure_display_crtc_context *crtc_ctx = NULL;
int i;
- secure_display_ctxs = kcalloc(adev->mode_info.num_crtc,
- sizeof(struct secure_display_context),
+ crtc_ctx = kcalloc(adev->mode_info.num_crtc,
+ sizeof(struct secure_display_crtc_context),
GFP_KERNEL);
- if (!secure_display_ctxs)
- return NULL;
+ if (!crtc_ctx) {
+ adev->dm.secure_display_ctx.crtc_ctx = NULL;
+ return;
+ }
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window);
- INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
- secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base;
+ INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
+ INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
+ spin_lock_init(&crtc_ctx[i].crc_info.lock);
}
- return secure_display_ctxs;
+ adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
+
+ adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 748e80ef40d0..3da056c8d20b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -40,20 +40,53 @@ enum amdgpu_dm_pipe_crc_source {
};
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+#define MAX_CRTC 6
+
+enum secure_display_mode {
+ /* via dmub + psp */
+ LEGACY_MODE = 0,
+ /* driver directly */
+ DISPLAY_CRC_MODE,
+ SECURE_DISPLAY_MODE_MAX,
+};
+
+struct phy_id_mapping {
+ bool assigned;
+ bool is_mst;
+ uint8_t enc_hw_inst;
+ u8 lct;
+ u8 port_num;
+ u8 rad[8];
+};
+
+struct crc_data {
+ uint32_t crc_R;
+ uint32_t crc_G;
+ uint32_t crc_B;
+ uint32_t frame_count;
+ bool crc_ready;
+};
+
+struct crc_info {
+ struct crc_data crc[MAX_CRC_WINDOW_NUM];
+ struct completion completion;
+ spinlock_t lock;
+};
+
struct crc_window_param {
uint16_t x_start;
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
/* CRC window is activated or not*/
- bool activated;
+ bool enable;
/* Update crc window during vertical blank or not */
bool update_win;
/* skip reading/writing for few frames */
int skip_frame_cnt;
};
-struct secure_display_context {
+struct secure_display_crtc_context {
/* work to notify PSP TA*/
struct work_struct notify_ta_work;
@@ -63,7 +96,20 @@ struct secure_display_context {
struct drm_crtc *crtc;
/* Region of Interest (ROI) */
- struct rect rect;
+ struct crc_window roi[MAX_CRC_WINDOW_NUM];
+
+ struct crc_info crc_info;
+};
+
+struct secure_display_context {
+
+ struct secure_display_crtc_context *crtc_ctx;
+ /* Whether dmub support multiple ROI setting */
+ bool support_mul_roi;
+ enum secure_display_mode op_mode;
+ bool phy_mapping_updated;
+ int phy_id_mapping_cnt;
+ struct phy_id_mapping phy_id_mapping[MAX_CRTC];
};
#endif
@@ -95,8 +141,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
-struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(
- struct amdgpu_device *adev);
+void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev);
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 64a041c2af05..36a830a7440f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -93,7 +93,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -142,7 +142,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
+ amdgpu_dm_psr_disable(vblank_work->stream, false);
} else if (link->psr_settings.psr_feature_enabled &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 17e948753f59..c1212947a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -37,7 +37,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state);
int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 6a97bb2d9160..049046c60462 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -25,6 +25,7 @@
#include <linux/string_helpers.h>
#include <linux/uaccess.h>
+#include <media/cec-notifier.h>
#include "dc.h"
#include "amdgpu.h"
@@ -258,7 +259,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
struct dc_link *link = connector->dc_link;
struct amdgpu_device *adev = drm_to_adev(connector->base.dev);
struct dc *dc = (struct dc *)link->dc;
- struct dc_link_settings prefer_link_settings;
+ struct dc_link_settings prefer_link_settings = {0};
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
/* 0: lane_count; 1: link_rate */
@@ -389,7 +390,7 @@ static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf,
struct dc_link *link = aconnector->dc_link;
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
struct dc *dc = (struct dc *)link->dc;
- struct dc_link_settings prefer_link_settings;
+ struct dc_link_settings prefer_link_settings = {0};
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
/* 0: lane_count; 1: link_rate */
@@ -613,7 +614,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
uint32_t wr_buf_size = 40;
long param[3];
bool use_prefer_link_setting;
- struct link_training_settings link_lane_settings;
+ struct link_training_settings link_lane_settings = {0};
int max_param_num = 3;
uint8_t param_nums = 0;
int r = 0;
@@ -768,7 +769,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
- struct link_training_settings link_training_settings;
+ struct link_training_settings link_training_settings = {0};
int i;
if (size == 0)
@@ -902,9 +903,10 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
{
struct amdgpu_device *adev = m->private;
struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ struct dmub_fw_meta_info *fw_meta_info = NULL;
struct dmub_debugfs_trace_entry *entries;
uint8_t *tbuf_base;
- uint32_t tbuf_size, max_entries, num_entries, i;
+ uint32_t tbuf_size, max_entries, num_entries, first_entry, i;
if (!fb_info)
return 0;
@@ -913,20 +915,42 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
if (!tbuf_base)
return 0;
- tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
+ if (adev->dm.dmub_srv)
+ fw_meta_info = &adev->dm.dmub_srv->meta_info;
+
+ tbuf_size = fw_meta_info ? fw_meta_info->trace_buffer_size :
+ DMUB_TRACE_BUFFER_SIZE;
max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
sizeof(struct dmub_debugfs_trace_entry);
num_entries =
((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
+ /* DMCUB tracebuffer is a ring. If it rolled over, print a hint that
+ * entries are being overwritten.
+ */
+ if (num_entries > max_entries)
+ seq_printf(m, "...\n");
+
+ first_entry = num_entries % max_entries;
num_entries = min(num_entries, max_entries);
entries = (struct dmub_debugfs_trace_entry
*)(tbuf_base +
sizeof(struct dmub_debugfs_trace_header));
- for (i = 0; i < num_entries; ++i) {
+ /* To print entries chronologically, start from the first entry till the
+ * top of buffer, then from base of buffer to first entry.
+ */
+ for (i = first_entry; i < num_entries; ++i) {
+ struct dmub_debugfs_trace_entry *entry = &entries[i];
+
+ seq_printf(m,
+ "trace_code=%u tick_count=%u param0=%u param1=%u\n",
+ entry->trace_code, entry->tick_count, entry->param0,
+ entry->param1);
+ }
+ for (i = 0; i < first_entry; ++i) {
struct dmub_debugfs_trace_entry *entry = &entries[i];
seq_printf(m,
@@ -2825,6 +2849,67 @@ static int is_dpia_link_show(struct seq_file *m, void *data)
return 0;
}
+/**
+ * hdmi_cec_state_show - Read out the HDMI-CEC feature status
+ * @m: sequence file.
+ * @data: unused.
+ *
+ * Return 0 on success
+ */
+static int hdmi_cec_state_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ seq_printf(m, "%s:%d\n", connector->name, connector->base.id);
+ seq_printf(m, "HDMI-CEC status: %d\n", aconnector->notifier ? 1 : 0);
+
+ return 0;
+}
+
+/**
+ * hdmi_cec_state_write - Enable/Disable HDMI-CEC feature from driver side
+ * @f: file structure.
+ * @buf: userspace buffer. set to '1' to enable; '0' to disable cec feature.
+ * @size: size of buffer from userpsace.
+ * @pos: unused.
+ *
+ * Return size on success, error code on failure
+ */
+static ssize_t hdmi_cec_state_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int ret;
+ bool enable;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct drm_device *ddev = aconnector->base.dev;
+
+ if (size == 0)
+ return -EINVAL;
+
+ ret = kstrtobool_from_user(buf, size, &enable);
+ if (ret) {
+ drm_dbg_driver(ddev, "invalid user data !\n");
+ return ret;
+ }
+
+ if (enable) {
+ if (aconnector->notifier)
+ return -EINVAL;
+ ret = amdgpu_dm_initialize_hdmi_connector(aconnector);
+ if (ret)
+ return ret;
+ hdmi_cec_set_edid(aconnector);
+ } else {
+ if (!aconnector->notifier)
+ return -EINVAL;
+ cec_notifier_conn_unregister(aconnector->notifier);
+ aconnector->notifier = NULL;
+ }
+
+ return size;
+}
+
DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
@@ -2837,6 +2922,7 @@ DEFINE_SHOW_ATTRIBUTE(psr_capability);
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status);
DEFINE_SHOW_ATTRIBUTE(is_dpia_link);
+DEFINE_SHOW_STORE_ATTRIBUTE(hdmi_cec_state);
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
.owner = THIS_MODULE,
@@ -2972,7 +3058,8 @@ static const struct {
char *name;
const struct file_operations *fops;
} hdmi_debugfs_entries[] = {
- {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+ {"hdmi_cec_state", &hdmi_cec_state_fops}
};
/*
@@ -3457,8 +3544,8 @@ static int crc_win_x_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3474,7 +3561,7 @@ static int crc_win_x_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_start;
+ *val = acrtc->dm_irq_params.window_param[0].x_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3494,8 +3581,8 @@ static int crc_win_y_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3511,7 +3598,7 @@ static int crc_win_y_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_start;
+ *val = acrtc->dm_irq_params.window_param[0].y_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3530,8 +3617,8 @@ static int crc_win_x_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3547,7 +3634,7 @@ static int crc_win_x_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_end;
+ *val = acrtc->dm_irq_params.window_param[0].x_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3566,8 +3653,8 @@ static int crc_win_y_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3583,7 +3670,7 @@ static int crc_win_y_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_end;
+ *val = acrtc->dm_irq_params.window_param[0].y_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3606,13 +3693,14 @@ static int crc_win_update_set(void *data, u64 val)
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
- acrtc->dm_irq_params.window_param.activated = true;
- acrtc->dm_irq_params.window_param.update_win = true;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
+ acrtc->dm_irq_params.window_param[0].enable = true;
+ acrtc->dm_irq_params.window_param[0].update_win = true;
+ acrtc->dm_irq_params.window_param[0].skip_frame_cnt = 0;
+ acrtc->dm_irq_params.crc_window_activated = true;
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
mutex_unlock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 6cbbb71d752b..fbd80d8545a8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -885,6 +885,12 @@ bool dm_helpers_dp_write_dsc_enable(
return ret;
}
+bool dm_helpers_dp_write_hblank_reduction(struct dc_context *ctx, const struct dc_stream_state *stream)
+{
+ // TODO
+ return false;
+}
+
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
{
bool dp_sink_present;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 6a7ecc1e4602..6c9de834455b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -39,7 +39,9 @@ struct dm_irq_params {
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source crc_src;
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- struct crc_window_param window_param;
+ struct crc_window_param window_param[MAX_CRC_WINDOW_NUM];
+ /* At least one CRC window is activated or not*/
+ bool crc_window_activated;
#endif
#endif
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6e4359490613..07e744da7bf4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -155,6 +155,17 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
return 0;
}
+
+static inline void
+amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->drm_edid = NULL;
+ aconnector->dsc_aux = NULL;
+ aconnector->mst_output_port->passthrough_aux = NULL;
+ aconnector->mst_local_bw = 0;
+ aconnector->vc_full_pbn = 0;
+}
+
static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
@@ -182,9 +193,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -504,9 +513,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -590,11 +597,12 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_PROBE, true);
- if (drm_connector_init(
+ if (drm_connector_dynamic_init(
dev,
connector,
&dm_dp_mst_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort)) {
+ DRM_MODE_CONNECTOR_DisplayPort,
+ NULL)) {
kfree(aconnector);
return NULL;
}
@@ -1688,16 +1696,16 @@ clean_exit:
return ret;
}
-static unsigned int kbps_from_pbn(unsigned int pbn)
+static uint32_t kbps_from_pbn(unsigned int pbn)
{
- unsigned int kbps = pbn;
+ uint64_t kbps = (uint64_t)pbn;
kbps *= (1000000 / PEAK_FACTOR_X1000);
kbps *= 8;
kbps *= 54;
kbps /= 64;
- return kbps;
+ return (uint32_t)kbps;
}
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
@@ -1819,9 +1827,18 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct drm_dp_mst_port *immediate_upstream_port = NULL;
uint32_t end_link_bw = 0;
- /*Get last DP link BW capability*/
- if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
- if (stream_kbps > end_link_bw) {
+ /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
+ if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
+ if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
+ dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
+ aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
+ aconnector->mst_local_bw = end_link_bw;
+ } else {
+ end_link_bw = aconnector->mst_local_bw;
+ }
+
+ if (end_link_bw > 0 && stream_kbps > end_link_bw) {
DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
@@ -1835,11 +1852,15 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (immediate_upstream_port) {
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
- if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
- "Max dsc compression can't fit into MST available bw\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ } else {
+ /* For topology LCT 1 case - only one mstb*/
+ virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
+ }
+
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 495e3cd70426..774cc3f4f3fd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include "drm/drm_framebuffer.h"
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -176,7 +177,7 @@ static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier
return AMD_FMT_MOD_GET(TILE, modifier);
}
-static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
+static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info,
uint64_t tiling_flags)
{
/* Fill GFX8 params */
@@ -189,6 +190,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+ tiling_info->gfxversion = DcGfxVersion8;
/* XXX fix me for VI */
tiling_info->gfx8.num_banks = num_banks;
tiling_info->gfx8.array_mode =
@@ -209,7 +211,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info)
+ struct dc_tiling_info *tiling_info)
{
/* Fill GFX9 params */
tiling_info->gfx9.num_pipes =
@@ -230,7 +232,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgp
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
uint64_t modifier)
{
unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
@@ -260,7 +262,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amd
static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
- const union dc_tiling_info *tiling_info,
+ const struct dc_tiling_info *tiling_info,
const struct dc_plane_dcc_param *dcc,
const struct dc_plane_address *address,
const struct plane_size *plane_size)
@@ -307,18 +309,18 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address,
- const bool force_disable_dcc)
+ struct dc_plane_address *address)
{
const uint64_t modifier = afb->base.modifier;
int ret = 0;
amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxVersion9;
- if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
uint64_t dcc_address = afb->address + afb->base.offsets[1];
bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
@@ -358,10 +360,9 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address,
- const bool force_disable_dcc)
+ struct dc_plane_address *address)
{
const uint64_t modifier = afb->base.modifier;
int ret = 0;
@@ -370,8 +371,9 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxAddr3;
- if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
dcc->enable = 1;
@@ -835,12 +837,11 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc)
+ bool tmz_surface)
{
const struct drm_framebuffer *fb = &afb->base;
int ret;
@@ -900,16 +901,14 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
- address,
- force_disable_dcc);
+ address);
if (ret)
return ret;
} else if (adev->family >= AMDGPU_FAMILY_AI) {
ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
- address,
- force_disable_dcc);
+ address);
if (ret)
return ret;
} else {
@@ -1000,14 +999,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state =
dm_plane_state_new->dc_state;
- bool force_disable_dcc = !plane_state->dcc.enable;
amdgpu_dm_plane_fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
afb->tiling_flags,
&plane_state->tiling_info, &plane_state->plane_size,
&plane_state->dcc, &plane_state->address,
- afb->tmz_surface, force_disable_dcc);
+ afb->tmz_surface);
}
return 0;
@@ -1421,6 +1419,20 @@ static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
amdgpu_dm_plane_handle_cursor_update(plane, old_state);
}
+static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct dc_plane_state *dc_plane_state;
+
+ if (!dm_plane_state || !dm_plane_state->dc_state)
+ return;
+
+ dc_plane_state = dm_plane_state->dc_state;
+
+ dc_plane_force_update_for_panic(dc_plane_state, fb->modifier ? true : false);
+}
+
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
.cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
@@ -1429,6 +1441,16 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.atomic_async_update = amdgpu_dm_plane_atomic_async_update
};
+static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = {
+ .prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
+ .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
+ .atomic_check = amdgpu_dm_plane_atomic_check,
+ .atomic_async_check = amdgpu_dm_plane_atomic_async_check,
+ .atomic_async_update = amdgpu_dm_plane_atomic_async_update,
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = amdgpu_dm_plane_panic_flush,
+};
+
static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
{
struct dm_plane_state *amdgpu_state = NULL;
@@ -1855,7 +1877,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
plane->type != DRM_PLANE_TYPE_CURSOR)
drm_plane_enable_fb_damage_clips(plane);
- drm_plane_helper_add(plane, &dm_plane_helper_funcs);
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(plane, &dm_plane_helper_funcs);
#ifdef AMD_PRIVATE_COLOR
dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 6498359bff6f..615d2ab2b803 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -47,12 +47,11 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc);
+ bool tmz_surface);
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f40240aafe98..45858bf1523d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -201,14 +201,13 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*
* Return: true if success
*/
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait)
{
- unsigned int power_opt = 0;
bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL);
}
/*
@@ -251,3 +250,33 @@ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
return allow_active;
}
+
+/**
+ * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR
+ * @stream: stream state attached to the eDP link
+ *
+ * Waits for a max of 500ms for the eDP panel to exit PSR.
+ *
+ * Return: true if panel exited PSR, false otherwise.
+ */
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream)
+{
+ enum dc_psr_state psr_state = PSR_STATE0;
+ struct dc_link *link = stream->link;
+ int retry_count;
+
+ if (link == NULL)
+ return false;
+
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dc_link_get_psr_state(link, &psr_state);
+ if (psr_state == PSR_STATE0)
+ break;
+ udelay(500);
+ }
+
+ if (retry_count == 1000)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index cd2d45c2b5ef..e2366321a3c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -34,8 +34,9 @@
void amdgpu_dm_set_psr_caps(struct dc_link *link);
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index c9a6de110b74..a62f6c51301c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -3088,11 +3088,12 @@ static enum bp_result construct_integrated_info(
info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id,
info->ext_disp_conn_info.path[i].caps
);
- if (info->ext_disp_conn_info.path[i].caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
- DC_LOG_BIOS("BIOS EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ if ((info->ext_disp_conn_info.path[i].caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ DC_LOG_BIOS("BIOS AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
else if (bp->base.ctx->dc->config.force_bios_fixed_vs) {
- info->ext_disp_conn_info.path[i].caps |= EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
- DC_LOG_BIOS("driver forced EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ info->ext_disp_conn_info.path[i].caps &= ~AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ info->ext_disp_conn_info.path[i].caps |= AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
+ DC_LOG_BIOS("driver forced AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
}
}
// Log the Checksum and Voltage Swing
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index ab1132bc896a..d9955c5d2e5e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -174,7 +174,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN32)
###############################################################################
# DCN35
###############################################################################
-CLK_MGR_DCN35 = dcn35_smu.o dcn35_clk_mgr.o
+CLK_MGR_DCN35 = dcn35_smu.o dcn351_clk_mgr.o dcn35_clk_mgr.o
AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 0e243f4344d0..4c3e58c730b1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -355,8 +355,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
BREAK_TO_DEBUGGER();
return NULL;
}
+ if (ctx->dce_version == DCN_VERSION_3_51)
+ dcn351_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ else
+ dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index 7920f6f1aa62..76c612ecfe3c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -34,8 +34,8 @@
#include "dm_services.h"
#include "cyan_skillfish_ip_offset.h"
-#include "dcn/dcn_2_0_3_offset.h"
-#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dcn/dcn_2_0_1_offset.h"
+#include "dcn/dcn_2_0_1_sh_mask.h"
#include "clk/clk_11_0_1_offset.h"
#include "clk/clk_11_0_1_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
new file mode 100644
index 000000000000..6a6ae618650b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "dcn35_clk_mgr.h"
+
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define mmCLK1_CLK_PLL_REQ 0x16E37
+
+#define mmCLK1_CLK0_DFS_CNTL 0x16E69
+#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
+#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
+#define mmCLK1_CLK3_DFS_CNTL 0x16E72
+#define mmCLK1_CLK4_DFS_CNTL 0x16E75
+#define mmCLK1_CLK5_DFS_CNTL 0x16E78
+
+#define mmCLK1_CLK0_CURRENT_CNT 0x16EFC
+#define mmCLK1_CLK1_CURRENT_CNT 0x16EFD
+#define mmCLK1_CLK2_CURRENT_CNT 0x16EFE
+#define mmCLK1_CLK3_CURRENT_CNT 0x16EFF
+#define mmCLK1_CLK4_CURRENT_CNT 0x16F00
+#define mmCLK1_CLK5_CURRENT_CNT 0x16F01
+
+#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
+#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
+#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
+#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
+#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
+#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
+
+#define mmCLK1_CLK0_DS_CNTL 0x16E83
+#define mmCLK1_CLK1_DS_CNTL 0x16E8C
+#define mmCLK1_CLK2_DS_CNTL 0x16E95
+#define mmCLK1_CLK3_DS_CNTL 0x16E9E
+#define mmCLK1_CLK4_DS_CNTL 0x16EA7
+#define mmCLK1_CLK5_DS_CNTL 0x16EB0
+
+#define mmCLK1_CLK0_ALLOW_DS 0x16E84
+#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
+#define mmCLK1_CLK2_ALLOW_DS 0x16E96
+#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
+#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
+#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
+
+#define mmCLK5_spll_field_8 0x1B04B
+#define mmDENTIST_DISPCLK_CNTL 0x0124
+#define regDENTIST_DISPCLK_CNTL 0x0064
+#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+
+// DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+
+#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
+#define REG(reg) \
+ (clk_mgr->regs->reg)
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define CLK_SR_DCN35(reg_name)\
+ .reg_name = mm ## reg_name
+
+static const struct clk_mgr_registers clk_mgr_regs_dcn351 = {
+ CLK_REG_LIST_DCN35()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift_dcn351 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
+
+static const struct clk_mgr_mask clk_mgr_mask_dcn351 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
+
+#define TO_CLK_MGR_DCN35(clk_mgr)\
+ container_of(clk_mgr, struct clk_mgr_dcn35, base)
+
+
+void dcn351_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ /*register offset changed*/
+ clk_mgr->base.regs = &clk_mgr_regs_dcn351;
+ clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn351;
+ clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn351;
+
+ dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index b77333817f18..1f974ea3b0c6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -36,15 +36,11 @@
#include "dcn20/dcn20_clk_mgr.h"
-
-
#include "reg_helper.h"
#include "core_types.h"
#include "dcn35_smu.h"
#include "dm_helpers.h"
-/* TODO: remove this include once we ported over remaining clk mgr functions*/
-#include "dcn30/dcn30_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
@@ -55,35 +51,102 @@
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define mmCLK1_CLK_PLL_REQ 0x16E37
+
+#define mmCLK1_CLK0_DFS_CNTL 0x16E69
+#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
+#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
+#define mmCLK1_CLK3_DFS_CNTL 0x16E72
+#define mmCLK1_CLK4_DFS_CNTL 0x16E75
+#define mmCLK1_CLK5_DFS_CNTL 0x16E78
+
+#define mmCLK1_CLK0_CURRENT_CNT 0x16EFB
+#define mmCLK1_CLK1_CURRENT_CNT 0x16EFC
+#define mmCLK1_CLK2_CURRENT_CNT 0x16EFD
+#define mmCLK1_CLK3_CURRENT_CNT 0x16EFE
+#define mmCLK1_CLK4_CURRENT_CNT 0x16EFF
+#define mmCLK1_CLK5_CURRENT_CNT 0x16F00
+
+#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
+#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
+#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
+#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
+#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
+#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
+
+#define mmCLK1_CLK0_DS_CNTL 0x16E83
+#define mmCLK1_CLK1_DS_CNTL 0x16E8C
+#define mmCLK1_CLK2_DS_CNTL 0x16E95
+#define mmCLK1_CLK3_DS_CNTL 0x16E9E
+#define mmCLK1_CLK4_DS_CNTL 0x16EA7
+#define mmCLK1_CLK5_DS_CNTL 0x16EB0
+
+#define mmCLK1_CLK0_ALLOW_DS 0x16E84
+#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
+#define mmCLK1_CLK2_ALLOW_DS 0x16E96
+#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
+#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
+#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
+
+#define mmCLK5_spll_field_8 0x1B04B
+#define mmDENTIST_DISPCLK_CNTL 0x0124
+#define regDENTIST_DISPCLK_CNTL 0x0064
+#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+// DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+
+#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
-#define regCLK1_CLK_PLL_REQ 0x0237
-#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
+#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
-#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
-#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
-#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
-#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
-#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
-#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define REG(reg) \
+ (clk_mgr->regs->reg)
-#define regCLK1_CLK2_BYPASS_CNTL 0x029c
-#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+#define BASE(seg) BASE_INNER(seg)
-#define regCLK5_0_CLK5_spll_field_8 0x464b
-#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+#define SR(reg_name)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+#define CLK_SR_DCN35(reg_name)\
+ .reg_name = mm ## reg_name
-#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+static const struct clk_mgr_registers clk_mgr_regs_dcn35 = {
+ CLK_REG_LIST_DCN35()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift_dcn35 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
-#define REG(reg_name) \
- (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+static const struct clk_mgr_mask clk_mgr_mask_dcn35 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
#define TO_CLK_MGR_DCN35(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn35, base)
@@ -338,6 +401,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
dcn35_smu_set_dtbclk(clk_mgr, false);
+
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */
@@ -355,11 +419,17 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
- dcn35_smu_set_dtbclk(clk_mgr, true);
- clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ int actual_dtbclk = 0;
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
- clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ dcn35_smu_set_dtbclk(clk_mgr, true);
+
+ actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+ if (actual_dtbclk) {
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
}
/* check that we're not already in D0 */
@@ -452,7 +522,6 @@ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
struct fixed31_32 pll_req;
unsigned int fbmult_frac_val = 0;
unsigned int fbmult_int_val = 0;
- struct dc_context *ctx = clk_mgr->base.ctx;
/*
* Register value of fbmult is in 8.16 format, we are converting to 314.32
@@ -512,22 +581,20 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc_context *ctx = clk_mgr->base.ctx;
+
uint32_t ssc_enable;
- REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+ ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
- return ssc_enable == 1;
+ return ssc_enable != 0;
}
static void init_clk_states(struct clk_mgr *clk_mgr)
{
- struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
- clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
@@ -538,6 +605,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+
init_clk_states(clk_mgr);
// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
@@ -632,6 +700,7 @@ static struct wm_table lpddr5_wm_table = {
};
static DpmClocks_t_dcn35 dummy_clocks;
+static DpmClocks_t_dcn351 dummy_clocks_dcn351;
static struct dcn35_watermarks dummy_wms = { 0 };
@@ -642,10 +711,10 @@ static struct dcn35_ss_info_table ss_info_table = {
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
{
- struct dc_context *ctx = clk_mgr->base.ctx;
- uint32_t clock_source;
+ uint32_t clock_source = 0;
+
+ clock_source = REG_READ(CLK1_CLK2_BYPASS_CNTL) & CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK;
- REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
// If it's DFS mode, clock_source is 0.
if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
@@ -755,6 +824,22 @@ static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
+static void dcn351_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+ struct dcn351_smu_dpm_clks *smu_dpm_clks)
+{
+ DpmClocks_t_dcn351 *table = smu_dpm_clks->dpm_clks;
+
+ if (!clk_mgr->smu_ver)
+ return;
+ if (!table || smu_dpm_clks->mc_address.quad_part == 0)
+ return;
+ memset(table, 0, sizeof(*table));
+ dcn35_smu_set_dram_addr_high(clk_mgr,
+ smu_dpm_clks->mc_address.high_part);
+ dcn35_smu_set_dram_addr_low(clk_mgr,
+ smu_dpm_clks->mc_address.low_part);
+ dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
+}
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
@@ -1093,6 +1178,57 @@ struct clk_mgr_funcs dcn35_fpga_funcs = {
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
};
+static void translate_to_DpmClocks_t_dcn35(struct dcn351_smu_dpm_clks *smu_dpm_clks_a,
+ struct dcn35_smu_dpm_clks *smu_dpm_clks_b)
+{
+ /*translate two structures and only take need clock tables*/
+ uint8_t i;
+
+ if (smu_dpm_clks_a == NULL || smu_dpm_clks_b == NULL ||
+ smu_dpm_clks_a->dpm_clks == NULL || smu_dpm_clks_b->dpm_clks == NULL)
+ return;
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++)
+ smu_dpm_clks_b->dpm_clks->DcfClocks[i] = smu_dpm_clks_a->dpm_clks->DcfClocks[i];
+
+ for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++)
+ smu_dpm_clks_b->dpm_clks->DispClocks[i] = smu_dpm_clks_a->dpm_clks->DispClocks[i];
+
+ for (i = 0; i < NUM_DPPCLK_DPM_LEVELS; i++)
+ smu_dpm_clks_b->dpm_clks->DppClocks[i] = smu_dpm_clks_a->dpm_clks->DppClocks[i];
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ smu_dpm_clks_b->dpm_clks->FclkClocks_Freq[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Freq[i];
+ smu_dpm_clks_b->dpm_clks->FclkClocks_Voltage[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Voltage[i];
+ }
+ for (i = 0; i < NUM_MEM_PSTATE_LEVELS; i++) {
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].MemClk =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].MemClk;
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].UClk =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].UClk;
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].Voltage =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].Voltage;
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].WckRatio =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].WckRatio;
+ }
+ smu_dpm_clks_b->dpm_clks->MaxGfxClk = smu_dpm_clks_a->dpm_clks->MaxGfxClk;
+ smu_dpm_clks_b->dpm_clks->MinGfxClk = smu_dpm_clks_a->dpm_clks->MinGfxClk;
+ smu_dpm_clks_b->dpm_clks->NumDcfClkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumDcfClkLevelsEnabled;
+ smu_dpm_clks_b->dpm_clks->NumDispClkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumDispClkLevelsEnabled;
+ smu_dpm_clks_b->dpm_clks->NumFclkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumFclkLevelsEnabled;
+ smu_dpm_clks_b->dpm_clks->NumMemPstatesEnabled =
+ smu_dpm_clks_a->dpm_clks->NumMemPstatesEnabled;
+ smu_dpm_clks_b->dpm_clks->NumSocClkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumSocClkLevelsEnabled;
+
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
+ smu_dpm_clks_b->dpm_clks->SocClocks[i] = smu_dpm_clks_a->dpm_clks->SocClocks[i];
+ smu_dpm_clks_b->dpm_clks->SocVoltage[i] = smu_dpm_clks_a->dpm_clks->SocVoltage[i];
+ }
+}
void dcn35_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn35 *clk_mgr,
@@ -1100,6 +1236,7 @@ void dcn35_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
+ struct dcn351_smu_dpm_clks smu_dpm_clks_dcn351 = { 0 };
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn35_funcs;
@@ -1112,6 +1249,12 @@ void dcn35_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000;
+ if (ctx->dce_version == DCN_VERSION_3_5) {
+ clk_mgr->base.regs = &clk_mgr_regs_dcn35;
+ clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35;
+ clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35;
+ }
+
clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
@@ -1130,14 +1273,24 @@ void dcn35_clk_mgr_construct(
DC_MEM_ALLOC_TYPE_GART,
sizeof(DpmClocks_t_dcn35),
&smu_dpm_clks.mc_address.quad_part);
-
if (smu_dpm_clks.dpm_clks == NULL) {
smu_dpm_clks.dpm_clks = &dummy_clocks;
smu_dpm_clks.mc_address.quad_part = 0;
}
-
ASSERT(smu_dpm_clks.dpm_clks);
+ if (ctx->dce_version == DCN_VERSION_3_51) {
+ smu_dpm_clks_dcn351.dpm_clks = (DpmClocks_t_dcn351 *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
+ DC_MEM_ALLOC_TYPE_GART,
+ sizeof(DpmClocks_t_dcn351),
+ &smu_dpm_clks_dcn351.mc_address.quad_part);
+ if (smu_dpm_clks_dcn351.dpm_clks == NULL) {
+ smu_dpm_clks_dcn351.dpm_clks = &dummy_clocks_dcn351;
+ smu_dpm_clks_dcn351.mc_address.quad_part = 0;
+ }
+ }
+
clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(&clk_mgr->base);
if (clk_mgr->base.smu_ver)
@@ -1166,7 +1319,11 @@ void dcn35_clk_mgr_construct(
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
int i;
- dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ if (ctx->dce_version == DCN_VERSION_3_51) {
+ dcn351_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks_dcn351);
+ translate_to_DpmClocks_t_dcn35(&smu_dpm_clks_dcn351, &smu_dpm_clks);
+ } else
+ dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
"NumDispClkLevelsEnabled: %d\n"
"NumSocClkLevelsEnabled: %d\n"
@@ -1227,6 +1384,10 @@ void dcn35_clk_mgr_construct(
dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
smu_dpm_clks.dpm_clks);
+ if (smu_dpm_clks_dcn351.dpm_clks && smu_dpm_clks_dcn351.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
+ smu_dpm_clks_dcn351.dpm_clks);
+
if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
bool ips_support = false;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
index 1203dc605b12..a12a9bf90806 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
@@ -60,4 +60,8 @@ void dcn35_clk_mgr_construct(struct dc_context *ctx,
void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+void dcn351_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg);
#endif //__DCN35_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
index 3fae13c73934..ab9d21ba0c43 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
@@ -126,18 +126,31 @@ typedef struct {
uint32_t MaxGfxClk;
} DpmClocks_t_dcn35;
-
-// Throttler Status Bitmask
-
-
-
-
-
-
-
-
-
-
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+ uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+ uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; // Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t Vcn0ClkLevelsEnabled; // Applies to both Vclk0 and Dclk0
+ uint8_t Vcn1ClkLevelsEnabled; // Applies to both Vclk1 and Dclk1
+ uint8_t VpeClkLevelsEnabled;
+ uint8_t NumMemPstatesEnabled;
+ uint8_t NumFclkLevelsEnabled;
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t_dcn351;
#define TABLE_BIOS_IF 0 // Called by BIOS
#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
@@ -163,6 +176,10 @@ struct dcn35_smu_dpm_clks {
union large_integer mc_address;
};
+struct dcn351_smu_dpm_clks {
+ DpmClocks_t_dcn351 *dpm_clks;
+ union large_integer mc_address;
+};
/* TODO: taken from vgh, may not be correct */
struct display_idle_optimization {
unsigned int df_request_disabled : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
index dbfdd3487da5..2e0d34fd7512 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
@@ -43,7 +43,9 @@
#define DALSMC_MSG_ActiveUclkFclk 0x18
#define DALSMC_MSG_IdleUclkFclk 0x19
#define DALSMC_MSG_SetUclkPstateAllow 0x1A
-#define DALSMC_Message_Count 0x1B
+#define DALSMC_MSG_SubvpUclkFclk 0x1B
+#define DALSMC_MSG_GetNumUmcChannels 0x1C
+#define DALSMC_Message_Count 0x1D
typedef enum {
FCLK_SWITCH_DISALLOW,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 8cfc5f435937..8082bb877611 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -141,6 +141,20 @@ static bool dcn401_is_ppclk_idle_dpm_enabled(struct clk_mgr_internal *clk_mgr, P
return ppclk_idle_dpm_enabled;
}
+static bool dcn401_is_df_throttle_opt_enabled(struct clk_mgr_internal *clk_mgr)
+{
+ bool is_df_throttle_opt_enabled = false;
+
+ if (ASICREV_IS_GC_12_0_1_A0(clk_mgr->base.ctx->asic_id.hw_internal_rev) &&
+ clk_mgr->smu_ver >= 0x663500) {
+ is_df_throttle_opt_enabled = !clk_mgr->base.ctx->dc->debug.force_subvp_df_throttle;
+ }
+
+ is_df_throttle_opt_enabled &= clk_mgr->smu_present;
+
+ return is_df_throttle_opt_enabled;
+}
+
/* Query SMU for all clock states for a particular clock */
static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0,
unsigned int *num_levels)
@@ -614,207 +628,6 @@ static void dcn401_update_clocks_update_dentist(
}
-static void dcn401_update_clocks_legacy(struct clk_mgr *clk_mgr_base,
- struct dc_state *context,
- bool safe_to_lower)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
- struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
- bool update_dppclk = false;
- bool update_dispclk = false;
- bool enter_display_off = false;
- bool dpp_clock_lowered = false;
- struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- bool force_reset = false;
- bool update_uclk = false, update_fclk = false;
- bool p_state_change_support;
- bool fclk_p_state_change_support;
- int total_plane_count;
-
- if (dc->work_arounds.skip_clock_update)
- return;
-
- if (clk_mgr_base->clks.dispclk_khz == 0 ||
- (dc->debug.force_clock_mode & 0x1)) {
- /* This is from resume or boot up, if forced_clock cfg option used,
- * we bypass program dispclk and DPPCLK, but need set them for S3.
- */
- force_reset = true;
-
- dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
-
- /* Force_clock_mode 0x1: force reset the clock even it is the same clock
- * as long as it is in Passive level.
- */
- }
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
-
- if (display_count == 0)
- enter_display_off = true;
-
- if (clk_mgr->smu_present) {
- if (enter_display_off == safe_to_lower)
- dcn401_smu_set_num_of_displays(clk_mgr, display_count);
-
- clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
-
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
-
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
- clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
-
- /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW */
- if (clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- dcn401_smu_send_fclk_pstate_message(clk_mgr, true);
- }
- }
-
- if (dc->debug.force_min_dcfclk_mhz > 0)
- new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
- new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
- clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DCFCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
- clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DCFCLK))
- dcn401_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
- /* We don't actually care about socclk, don't notify SMU of hard min */
- clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
-
- clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
-
- if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
- clk_mgr_base->clks.num_ways < new_clocks->num_ways) {
- clk_mgr_base->clks.num_ways = new_clocks->num_ways;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
- }
-
-
- p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.prev_p_state_change_support)) {
- clk_mgr_base->clks.p_state_change_support = p_state_change_support;
- clk_mgr_base->clks.fw_based_mclk_switching = p_state_change_support && new_clocks->fw_based_mclk_switching;
-
- /* to disable P-State switching, set UCLK min = max */
- if (!clk_mgr_base->clks.p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
- }
-
- /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
- if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
- update_fclk = true;
- }
-
- if (!clk_mgr_base->clks.fclk_p_state_change_support &&
- update_fclk &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_FCLK)) {
- /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
- dcn401_smu_send_fclk_pstate_message(clk_mgr, false);
- }
-
- /* Always update saved value, even if new value not set due to P-State switching unsupported */
- if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
- clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
- update_uclk = true;
- }
-
- /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
- if (clk_mgr_base->clks.p_state_change_support &&
- (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
-
- if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
- clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
- clk_mgr_base->clks.num_ways = new_clocks->num_ways;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
- }
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
- if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
- dpp_clock_lowered = true;
-
- clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
- clk_mgr_base->clks.actual_dppclk_khz = new_clocks->dppclk_khz;
-
- if (clk_mgr->smu_present && !dpp_clock_lowered && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK))
- clk_mgr_base->clks.actual_dppclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DPPCLK, clk_mgr_base->clks.dppclk_khz);
- update_dppclk = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
-
- if (clk_mgr->smu_present && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK))
- clk_mgr_base->clks.actual_dispclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DISPCLK, clk_mgr_base->clks.dispclk_khz);
-
- update_dispclk = true;
- }
-
- if (!new_clocks->dtbclk_en && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DTBCLK)) {
- new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
- }
-
- /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
- if (!dc->debug.disable_dtb_ref_clk_switch &&
- should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DTBCLK)) {
- /* DCCG requires KHz precision for DTBCLK */
- clk_mgr_base->clks.ref_dtbclk_khz =
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
-
- dcn401_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
- }
-
- if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
- if (dpp_clock_lowered) {
- /* if clock is being lowered, increase DTO before lowering refclk */
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context,
- safe_to_lower, clk_mgr_base->clks.dppclk_khz);
- dcn401_update_clocks_update_dentist(clk_mgr, context);
- if (clk_mgr->smu_present && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK)) {
- clk_mgr_base->clks.actual_dppclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DPPCLK,
- clk_mgr_base->clks.dppclk_khz);
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower,
- clk_mgr_base->clks.actual_dppclk_khz);
- }
-
- } else {
- /* if clock is being raised, increase refclk before lowering DTO */
- if (update_dppclk || update_dispclk)
- dcn401_update_clocks_update_dentist(clk_mgr, context);
- /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
- * that we do not lower dto when it is not safe to lower. We do not need to
- * compare the current and new dppclk before calling this function.
- */
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context,
- safe_to_lower, clk_mgr_base->clks.actual_dppclk_khz);
- }
- }
-
- if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
- /*update dmcu for wait_loop count*/
- dmcu->funcs->set_psr_wait_loop(dmcu,
- clk_mgr_base->clks.dispclk_khz / 1000 / 7);
-}
-
static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned int num_steps)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -869,6 +682,12 @@ static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned
params->update_idle_hardmin_params.uclk_mhz,
params->update_idle_hardmin_params.fclk_mhz);
break;
+ case CLK_MGR401_UPDATE_SUBVP_HARDMINS:
+ dcn401_smu_set_subvp_uclk_fclk_hardmin(
+ clk_mgr_internal,
+ params->update_idle_hardmin_params.uclk_mhz,
+ params->update_idle_hardmin_params.fclk_mhz);
+ break;
case CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK:
dcn401_smu_set_min_deep_sleep_dcef_clk(
clk_mgr_internal,
@@ -945,15 +764,21 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
bool update_active_uclk = false;
bool update_idle_fclk = false;
bool update_idle_uclk = false;
+ bool update_subvp_prefetch_dramclk = false;
+ bool update_subvp_prefetch_fclk = false;
bool is_idle_dpm_enabled = dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) &&
dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK) &&
dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) &&
dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_FCLK);
+ bool is_df_throttle_opt_enabled = is_idle_dpm_enabled &&
+ dcn401_is_df_throttle_opt_enabled(clk_mgr_internal);
int total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
int active_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz);
int active_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.fclk_khz);
int idle_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_dramclk_khz);
int idle_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_fclk_khz);
+ int subvp_prefetch_dramclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_dramclk_khz);
+ int subvp_prefetch_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_fclk_khz);
unsigned int num_steps = 0;
@@ -982,15 +807,15 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
update_active_fclk = true;
update_idle_fclk = true;
- /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW */
- if (clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
- block_sequence[num_steps].params.update_pstate_support_params.support = true;
- block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
- num_steps++;
- }
- }
+ /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW (message not supported on DCN401)*/
+ // if (clk_mgr_base->clks.fclk_p_state_change_support) {
+ // /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+ // if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
+ // block_sequence[num_steps].params.update_pstate_support_params.support = true;
+ // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
+ // num_steps++;
+ // }
+ // }
}
if (!clk_mgr_base->clks.fclk_p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
@@ -1109,6 +934,12 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
}
}
+ if (should_set_clock(safe_to_lower, new_clocks->subvp_prefetch_dramclk_khz, clk_mgr_base->clks.subvp_prefetch_dramclk_khz)) {
+ clk_mgr_base->clks.subvp_prefetch_dramclk_khz = new_clocks->subvp_prefetch_dramclk_khz;
+ update_subvp_prefetch_dramclk = true;
+ subvp_prefetch_dramclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_dramclk_khz);
+ }
+
/* FCLK */
/* Always update saved value, even if new value not set due to P-State switching unsupported */
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
@@ -1129,6 +960,12 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
}
}
+ if (should_set_clock(safe_to_lower, new_clocks->subvp_prefetch_fclk_khz, clk_mgr_base->clks.subvp_prefetch_fclk_khz)) {
+ clk_mgr_base->clks.subvp_prefetch_fclk_khz = new_clocks->subvp_prefetch_fclk_khz;
+ update_subvp_prefetch_fclk = true;
+ subvp_prefetch_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_fclk_khz);
+ }
+
/* When idle DPM is enabled, need to send active and idle hardmins separately */
/* CLK_MGR401_UPDATE_ACTIVE_HARDMINS */
if ((update_active_uclk || update_active_fclk) && is_idle_dpm_enabled) {
@@ -1146,6 +983,14 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
num_steps++;
}
+ /* CLK_MGR401_UPDATE_SUBVP_HARDMINS */
+ if ((update_subvp_prefetch_dramclk || update_subvp_prefetch_fclk) && is_df_throttle_opt_enabled) {
+ block_sequence[num_steps].params.update_idle_hardmin_params.uclk_mhz = subvp_prefetch_dramclk_mhz;
+ block_sequence[num_steps].params.update_idle_hardmin_params.fclk_mhz = subvp_prefetch_fclk_mhz;
+ block_sequence[num_steps].func = CLK_MGR401_UPDATE_SUBVP_HARDMINS;
+ num_steps++;
+ }
+
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (update_active_uclk || update_idle_uclk) {
if (!is_idle_dpm_enabled) {
@@ -1178,14 +1023,14 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
// (*num_steps)++;
// }
- /* disable FCLK P-State support if needed */
- if (!fclk_p_state_change_support &&
- should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
- block_sequence[num_steps].params.update_pstate_support_params.support = false;
- block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
- num_steps++;
- }
+ /* disable FCLK P-State support if needed (message not supported on DCN401)*/
+ // if (!fclk_p_state_change_support &&
+ // should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support) &&
+ // dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
+ // block_sequence[num_steps].params.update_pstate_support_params.support = false;
+ // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
+ // num_steps++;
+ // }
}
if (new_clocks->fw_based_mclk_switching != clk_mgr_base->clks.fw_based_mclk_switching &&
@@ -1366,11 +1211,6 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
unsigned int num_steps = 0;
- if (dc->debug.enable_legacy_clock_update) {
- dcn401_update_clocks_legacy(clk_mgr_base, context, safe_to_lower);
- return;
- }
-
/* build bandwidth related clocks update sequence */
num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base,
context,
@@ -1505,6 +1345,20 @@ static void dcn401_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool curren
dcn401_execute_block_sequence(clk_mgr_base, num_steps);
}
+static int dcn401_get_hard_min_memclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.dramclk_khz;
+}
+
+static int dcn401_get_hard_min_fclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz;
+}
+
/* Get current memclk states, update bounding box */
static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
@@ -1549,6 +1403,15 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
if (clk_mgr->dpm_present && !num_levels)
clk_mgr->dpm_present = false;
+ clk_mgr_base->bw_params->num_channels = dcn401_smu_get_num_of_umc_channels(clk_mgr);
+ if (clk_mgr_base->ctx->dc_bios) {
+ /* use BIOS values if none provided by PMFW */
+ if (clk_mgr_base->bw_params->num_channels == 0) {
+ clk_mgr_base->bw_params->num_channels = clk_mgr_base->ctx->dc_bios->vram_info.num_chans;
+ }
+ clk_mgr_base->bw_params->dram_channel_width_bytes = clk_mgr_base->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ }
+
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
@@ -1638,6 +1501,8 @@ static struct clk_mgr_funcs dcn401_funcs = {
.enable_pme_wa = dcn401_enable_pme_wa,
.is_smu_present = dcn401_is_smu_present,
.get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist,
+ .get_hard_min_memclk = dcn401_get_hard_min_memclk,
+ .get_hard_min_fclk = dcn401_get_hard_min_fclk,
};
struct clk_mgr_internal *dcn401_clk_mgr_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
index 8b0461992b22..6c9ae5ca2c7e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
@@ -90,6 +90,7 @@ enum dcn401_clk_mgr_block_sequence_func {
CLK_MGR401_UPDATE_DTBCLK_DTO,
CLK_MGR401_UPDATE_DENTIST,
CLK_MGR401_UPDATE_PSR_WAIT_LOOP,
+ CLK_MGR401_UPDATE_SUBVP_HARDMINS,
};
struct dcn401_clk_mgr_block_sequence {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
index 7700477d019b..21c35528f61f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
@@ -21,6 +21,14 @@
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
+/* temporary define */
+#ifndef DALSMC_MSG_SubvpUclkFclk
+#define DALSMC_MSG_SubvpUclkFclk 0x1B
+#endif
+#ifndef DALSMC_MSG_GetNumUmcChannels
+#define DALSMC_MSG_GetNumUmcChannels 0x1C
+#endif
+
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
@@ -296,6 +304,24 @@ bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
return success;
}
+bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
+ uint16_t uclk_freq_mhz,
+ uint16_t fclk_freq_mhz)
+{
+ uint32_t response = 0;
+ bool success;
+
+ /* 15:0 for uclk, 32:16 for fclk */
+ uint32_t param = (fclk_freq_mhz << 16) | uclk_freq_mhz;
+
+ smu_print("SMU Set active hardmin by freq: uclk_freq_mhz = %d MHz, fclk_freq_mhz = %d MHz\n", uclk_freq_mhz, fclk_freq_mhz);
+
+ success = dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SubvpUclkFclk, param, &response);
+
+ return success;
+}
+
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz)
{
smu_print("SMU Set min deep sleep dcef clk: freq_mhz = %d MHz\n", freq_mhz);
@@ -311,3 +337,14 @@ void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t n
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_NumOfDisplays, num_displays, NULL);
}
+
+unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr)
+{
+ unsigned int response = 0;
+
+ dcn401_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_GetNumUmcChannels, 0, &response);
+
+ smu_print("SMU Get Num UMC Channels: num_umc_channels = %d\n", response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
index 651fb8d62864..e02eb1294b37 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
@@ -23,7 +23,11 @@ bool dcn401_smu_set_idle_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
uint16_t uclk_freq_mhz,
uint16_t fclk_freq_mhz);
+bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
+ uint16_t uclk_freq_mhz,
+ uint16_t fclk_freq_mhz);
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
+unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2723558049d6..cecaadf741ad 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -579,7 +579,7 @@ dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
bool
dc_stream_forward_crc_window(struct dc_stream_state *stream,
- struct rect *rect, bool is_stop)
+ struct rect *rect, uint8_t phy_id, bool is_stop)
{
struct dmcu *dmcu;
struct dc_dmub_srv *dmub_srv;
@@ -598,7 +598,7 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
if (i == MAX_PIPES)
return false;
- mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
+ mux_mapping.phy_output_num = phy_id;
mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
dmcu = dc->res_pool->dmcu;
@@ -615,6 +615,68 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
return true;
}
+
+static void
+dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv *dmub_srv,
+ struct crc_window *window, struct otg_phy_mux *mux_mapping, bool stop)
+{
+ int i;
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.secure_display.mul_roi_ctl.phy_id = mux_mapping->phy_output_num;
+ cmd.secure_display.mul_roi_ctl.otg_id = mux_mapping->otg_output_num;
+
+ cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+
+ if (stop) {
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE;
+ } else {
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_start = window[i].rect.x;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_start = window[i].rect.y;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_end = window[i].rect.x + window[i].rect.width;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_end = window[i].rect.y + window[i].rect.height;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].enable = window[i].enable;
+ }
+ }
+
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+bool
+dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream,
+ struct crc_window *window, uint8_t phy_id, bool stop)
+{
+ struct dc_dmub_srv *dmub_srv;
+ struct otg_phy_mux mux_mapping;
+ struct pipe_ctx *pipe;
+ int i;
+ struct dc *dc = stream->ctx->dc;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
+ break;
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
+
+ mux_mapping.phy_output_num = phy_id;
+ mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
+
+ dmub_srv = dc->ctx->dmub_srv;
+
+ /* forward to dmub only. no dmcu support*/
+ if (dmub_srv)
+ dc_stream_forward_dmub_multiple_crc_window(dmub_srv, window, &mux_mapping, stop);
+ else
+ return false;
+
+ return true;
+}
#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
/**
@@ -625,15 +687,17 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
* @enable: Enable CRC if true, disable otherwise.
* @continuous: Capture CRC on every frame if true. Otherwise, only capture
* once.
+ * @idx: Capture CRC on which CRC engine instance
+ * @reset: Reset CRC engine before the configuration
*
- * By default, only CRC0 is configured, and the entire frame is used to
- * calculate the CRC.
+ * By default, the entire frame is used to calculate the CRC.
*
* Return: %false if the stream is not found or CRC capture is not supported;
* %true if the stream has been configured.
*/
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
- struct crc_params *crc_window, bool enable, bool continuous)
+ struct crc_params *crc_window, bool enable, bool continuous,
+ uint8_t idx, bool reset)
{
struct pipe_ctx *pipe;
struct crc_params param;
@@ -677,6 +741,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
param.continuous_mode = continuous;
param.enable = enable;
+ param.crc_eng_inst = idx;
+ param.reset = reset;
+
tg = pipe->stream_res.tg;
/* Only call if supported */
@@ -691,6 +758,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
*
* @dc: DC object.
* @stream: The DC stream state of the stream to get CRCs from.
+ * @idx: index of crc engine to get CRC from
* @r_cr: CRC value for the red component.
* @g_y: CRC value for the green component.
* @b_cb: CRC value for the blue component.
@@ -700,7 +768,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* Return:
* %false if stream is not found, or if CRCs are not enabled.
*/
-bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
+bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
int i;
@@ -721,7 +789,7 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
tg = pipe->stream_res.tg;
if (tg->funcs->get_crc)
- return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
+ return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb);
DC_LOG_WARNING("CRC capture not supported.");
return false;
}
@@ -1173,6 +1241,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2)
get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_VABC)
+ get_vabc_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
}
}
}
@@ -2153,6 +2223,11 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
+ /* revalidate streams */
+ res = dc_validate_stream(dc, stream);
+ if (res != DC_OK)
+ return res;
+
dc_stream_log(dc, stream);
set[i].stream = stream;
@@ -2487,7 +2562,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
- sizeof(union dc_tiling_info)) != 0) {
+ sizeof(struct dc_tiling_info)) != 0) {
update_flags->bits.swizzle_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_MED);
@@ -2982,6 +3057,10 @@ static void copy_surface_update_to_plane(
if (srf_update->cursor_csc_color_matrix)
surface->cursor_csc_color_matrix =
*srf_update->cursor_csc_color_matrix;
+
+ if (srf_update->bias_and_scale.bias_and_scale_valid)
+ surface->bias_and_scale =
+ srf_update->bias_and_scale;
}
static void copy_stream_update_to_stream(struct dc *dc,
@@ -5307,11 +5386,9 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc->vm_pa_config.valid) {
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
-
break;
default:
ASSERT(dc->current_state->stream_count == 0);
-
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
dc_state_destruct(dc->current_state);
@@ -5435,6 +5512,11 @@ bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips)
void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
{
+ int idle_fclk_khz = 0, idle_dramclk_khz = 0, i = 0;
+ enum mall_stream_type subvp_pipe_type[MAX_PIPES] = {0};
+ struct pipe_ctx *pipe = NULL;
+ struct dc_state *context = dc->current_state;
+
if (dc->debug.disable_idle_power_optimizations) {
DC_LOG_DEBUG("%s: disabled\n", __func__);
return;
@@ -5459,6 +5541,23 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
dc->idle_optimizations_allowed = allow;
DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled");
}
+
+ // log idle clocks and sub vp pipe types at idle optimization time
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_fclk)
+ idle_fclk_khz = dc->clk_mgr->funcs->get_hard_min_fclk(dc->clk_mgr);
+
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk)
+ idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
+ }
+
+ DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
+ __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
+ subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);
+
}
void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
@@ -6056,7 +6155,7 @@ void dc_query_current_properties(struct dc *dc, struct dc_current_properties *pr
bool subvp_sw_cursor_req = false;
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
+ if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) {
subvp_sw_cursor_req = true;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 252af83e34a5..6eb9bae3af91 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -425,6 +425,44 @@ void get_hdr_visual_confirm_color(
}
}
+/* Visual Confirm color definition for VABC */
+void get_vabc_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+ struct dc_link *edp_link = NULL;
+
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link) {
+ if (pipe_ctx->stream->link->connector_signal == SIGNAL_TYPE_EDP)
+ edp_link = pipe_ctx->stream->link;
+ }
+
+ if (edp_link) {
+ switch (edp_link->backlight_control_type) {
+ case BACKLIGHT_CONTROL_PWM:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case BACKLIGHT_CONTROL_AMD_AUX:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case BACKLIGHT_CONTROL_VESA_AUX:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ }
+ } else {
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ }
+}
+
void get_subvp_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 457d60eeb486..c1b79b379447 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -125,6 +125,14 @@ uint32_t dc_link_bandwidth_kbps(
return link->dc->link_srv->dp_link_bandwidth_kbps(link, link_settings);
}
+uint32_t dc_link_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params)
+{
+ return link->dc->link_srv->dp_required_hblank_size_bytes(link,
+ audio_params);
+}
+
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
{
dc->link_srv->get_cur_res_map(dc, map);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 626f75b6ad00..520a34a42827 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -4478,7 +4478,7 @@ static void set_hfvs_info_packet(
static void adaptive_sync_override_dp_info_packets_sdp_line_num(
const struct dc_crtc_timing *timing,
struct enc_sdp_line_num *sdp_line_num,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+ unsigned int vstartup_start)
{
uint32_t asic_blank_start = 0;
uint32_t asic_blank_end = 0;
@@ -4493,8 +4493,8 @@ static void adaptive_sync_override_dp_info_packets_sdp_line_num(
asic_blank_end = (asic_blank_start - tg->v_border_bottom -
tg->v_addressable - tg->v_border_top);
- if (pipe_dlg_param->vstartup_start > asic_blank_end) {
- v_update = (tg->v_total - (pipe_dlg_param->vstartup_start - asic_blank_end));
+ if (vstartup_start > asic_blank_end) {
+ v_update = (tg->v_total - (vstartup_start - asic_blank_end));
sdp_line_num->adaptive_sync_line_num_valid = true;
sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1);
} else {
@@ -4507,7 +4507,7 @@ static void set_adaptive_sync_info_packet(
struct dc_info_packet *info_packet,
const struct dc_stream_state *stream,
struct encoder_info_frame *info_frame,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+ unsigned int vstartup_start)
{
if (!stream->adaptive_sync_infopacket.valid)
return;
@@ -4515,7 +4515,7 @@ static void set_adaptive_sync_info_packet(
adaptive_sync_override_dp_info_packets_sdp_line_num(
&stream->timing,
&info_frame->sdp_line_num,
- pipe_dlg_param);
+ vstartup_start);
*info_packet = stream->adaptive_sync_infopacket;
}
@@ -4548,6 +4548,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
{
enum signal_type signal = SIGNAL_TYPE_NONE;
struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+ unsigned int vstartup_start = 0;
/* default all packets to invalid */
info->avi.valid = false;
@@ -4561,6 +4562,9 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->adaptive_sync.valid = false;
signal = pipe_ctx->stream->signal;
+ if (pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe)
+ vstartup_start = pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe(pipe_ctx);
+
/* HDMi and DP have different info packets*/
if (dc_is_hdmi_signal(signal)) {
set_avi_info_frame(&info->avi, pipe_ctx);
@@ -4582,7 +4586,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_adaptive_sync_info_packet(&info->adaptive_sync,
pipe_ctx->stream,
info,
- &pipe_ctx->pipe_dlg_param);
+ vstartup_start);
}
patch_gamut_packet_checksum(&info->gamut);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 55dc482d9b36..e8134c47fe0d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -37,6 +37,8 @@
#define DC_LOGGER dc->ctx->logger
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+#ifndef MAX
#define MAX(x, y) ((x > y) ? x : y)
#endif
@@ -605,17 +607,6 @@ bool dc_stream_remove_writeback(struct dc *dc,
return true;
}
-bool dc_stream_warmup_writeback(struct dc *dc,
- int num_dwb,
- struct dc_writeback_info *wb_info)
-{
- dc_exit_ips_for_hw_access(dc);
-
- if (dc->hwss.mmhubbub_warmup)
- return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
- else
- return false;
-}
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
{
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ccbb15f1638c..f3471d45b312 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -83,13 +83,6 @@ uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane
/*******************************************************************************
* Public functions
******************************************************************************/
-void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
- uint32_t controller_id)
-{
- plane_state->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
- /*register_flip_interrupt(surface);*/
-}
-
struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
{
struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
@@ -277,4 +270,50 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut)
kref_get(&lut->refcount);
}
+void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
+ bool clear_tiling)
+{
+ struct dc *dc;
+ int i;
+
+ if (!plane_state)
+ return;
+
+ dc = plane_state->ctx->dc;
+ if (!dc || !dc->current_state)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx)
+ continue;
+
+ if (dc->ctx->dce_version >= DCE_VERSION_MAX) {
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && hubp->funcs->hubp_clear_tiling)
+ hubp->funcs->hubp_clear_tiling(hubp);
+
+ /* force page flip to see the new content of the framebuffer */
+ hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
+ &plane_state->address,
+ true);
+ } else {
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ if (!mi)
+ continue;
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && mi->funcs->mem_input_clear_tiling)
+ mi->funcs->mem_input_clear_tiling(mi);
+
+ /* force page flip to see the new content of the framebuffer */
+ mi->funcs->mem_input_program_surface_flip_and_addr(mi,
+ &plane_state->address,
+ true);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 08c5a315b3a6..053481ab69ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.310"
+#define DC_VER "3.2.316"
#define MAX_SURFACES 4
#define MAX_PLANES 6
@@ -463,6 +463,7 @@ struct dc_config {
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
unsigned int disable_ips_in_vpb;
+ bool disable_ips_in_dpms_off;
bool usb4_bw_alloc_support;
bool allow_0_dtb_clk;
bool use_assr_psp_message;
@@ -471,6 +472,7 @@ struct dc_config {
bool disable_hbr_audio_dp2;
bool consolidated_dpia_dp_lt;
bool set_pipe_unlock_order;
+ bool enable_dpia_pre_training;
};
enum visual_confirm {
@@ -487,6 +489,7 @@ enum visual_confirm {
VISUAL_CONFIRM_MCLK_SWITCH = 16,
VISUAL_CONFIRM_FAMS2 = 19,
VISUAL_CONFIRM_HW_CURSOR = 20,
+ VISUAL_CONFIRM_VABC = 21,
};
enum dc_psr_power_opts {
@@ -628,6 +631,8 @@ struct dc_clocks {
int bw_dispclk_khz;
int idle_dramclk_khz;
int idle_fclk_khz;
+ int subvp_prefetch_dramclk_khz;
+ int subvp_prefetch_fclk_khz;
};
struct dc_bw_validation_profile {
@@ -772,7 +777,8 @@ union dpia_debug_options {
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
- uint32_t reserved:25;
+ uint32_t enable_dpia_pre_training:1; /* bit 7 */
+ uint32_t reserved:24;
} bits;
uint32_t raw;
};
@@ -1055,8 +1061,8 @@ struct dc_debug_options {
bool dml21_force_pstate_method;
uint32_t dml21_force_pstate_method_values[MAX_PIPES];
uint32_t dml21_disable_pstate_method_mask;
+ union fw_assisted_mclk_switch_version fams_version;
union dmub_fams2_global_feature_config fams2_config;
- bool enable_legacy_clock_update;
unsigned int force_cositing;
unsigned int disable_spl;
unsigned int force_easf;
@@ -1070,6 +1076,7 @@ struct dc_debug_options {
bool skip_full_updated_if_possible;
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
+ bool force_subvp_df_throttle;
};
@@ -1300,7 +1307,7 @@ struct dc_plane_state {
struct rect clip_rect;
struct plane_size plane_size;
- union dc_tiling_info tiling_info;
+ struct dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
@@ -1371,7 +1378,7 @@ struct dc_plane_state {
struct dc_plane_info {
struct plane_size plane_size;
- union dc_tiling_info tiling_info;
+ struct dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
@@ -1526,6 +1533,7 @@ struct dc_surface_update {
const struct dc_cm2_parameters *cm2_params;
const struct dc_csc_transform *cursor_csc_color_matrix;
unsigned int sdr_white_level_nits;
+ struct dc_bias_and_scale bias_and_scale;
};
/*
@@ -2019,6 +2027,24 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting);
+struct dp_audio_bandwidth_params {
+ const struct dc_crtc_timing *crtc_timing;
+ enum dp_link_encoding link_encoding;
+ uint32_t channel_count;
+ uint32_t sample_rate_hz;
+};
+
+/* The function calculates the minimum size of hblank (in bytes) needed to
+ * support the specified channel count and sample rate combination, given the
+ * link encoding and timing to be used. This calculation is not supported
+ * for 8b/10b SST.
+ *
+ * return - min hblank size in bytes, 0 if 8b/10b SST.
+ */
+uint32_t dc_link_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
/* The function takes a snapshot of current link resource allocation state
* @dc: pointer to dc of the dm calling this
* @map: a dc link resource snapshot defined internally to dc.
@@ -2378,6 +2404,13 @@ struct dc_sink_dsc_caps {
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
+struct dc_sink_hblank_expansion_caps {
+ // 'true' if these are virtual DPCD's HBlank expansion caps (immediately upstream of sink in MST topology),
+ // 'false' if they are sink's HBlank expansion caps
+ bool is_virtual_dpcd_hblank_expansion;
+ struct hblank_expansion_dpcd_caps dpcd_caps;
+};
+
struct dc_sink_fec_caps {
bool is_rx_fec_supported;
bool is_topology_fec_supported;
@@ -2404,6 +2437,7 @@ struct dc_sink {
struct scdc_caps scdc_caps;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
+ struct dc_sink_hblank_expansion_caps hblank_expansion_caps;
bool is_vsc_sdp_colorimetry_supported;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index f90fc154549a..44ff9abe2880 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1245,7 +1245,7 @@ static int count_active_streams(const struct dc *dc)
for (i = 0; i < dc->current_state->stream_count; ++i) {
struct dc_stream_state *stream = dc->current_state->streams[i];
- if (stream && !stream->dpms_off)
+ if (stream && (!stream->dpms_off || dc->config.disable_ips_in_dpms_off))
count += 1;
}
@@ -1694,10 +1694,10 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
{
uint8_t num_cmds = 1;
uint32_t i;
- union dmub_rb_cmd cmd[MAX_STREAMS + 1];
+ union dmub_rb_cmd cmd[2 * MAX_STREAMS + 1];
struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config;
- memset(cmd, 0, sizeof(union dmub_rb_cmd) * (MAX_STREAMS + 1));
+ memset(cmd, 0, sizeof(union dmub_rb_cmd) * (2 * MAX_STREAMS + 1));
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
@@ -1714,17 +1714,26 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* construct per-stream configs */
for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
- struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config;
+ struct dmub_rb_cmd_fams2 *stream_base_cmd = &cmd[i+1].fams2_config;
+ struct dmub_rb_cmd_fams2 *stream_sub_state_cmd = &cmd[i+1+context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config;
/* configure command header */
- stream_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
- stream_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
- stream_cmd->header.multi_cmd_pending = 1;
- /* copy stream static state */
- memcpy(&stream_cmd->config.stream,
- &context->bw_ctx.bw.dcn.fams2_stream_params[i],
- sizeof(struct dmub_fams2_stream_static_state));
+ stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_base_cmd->header.multi_cmd_pending = 1;
+ stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_sub_state_cmd->header.multi_cmd_pending = 1;
+ /* copy stream static base state */
+ memcpy(&stream_base_cmd->config,
+ &context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
+ sizeof(union dmub_cmd_fams2_config));
+ /* copy stream static sub state */
+ memcpy(&stream_sub_state_cmd->config,
+ &context->bw_ctx.bw.dcn.fams2_stream_sub_params[i],
+ sizeof(union dmub_cmd_fams2_config));
}
}
@@ -1735,8 +1744,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
/* set multi pending for global, and unset for last stream cmd */
global_cmd->header.multi_cmd_pending = 1;
- cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
- num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
+ cmd[2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
+ num_cmds += 2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
}
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 8dd6eb044829..94ce8fe74481 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -969,6 +969,21 @@ union dp_sink_video_fallback_formats {
uint8_t raw;
};
+union dp_receive_port0_cap {
+ struct {
+ uint8_t RESERVED :1;
+ uint8_t LOCAL_EDID_PRESENT :1;
+ uint8_t ASSOCIATED_TO_PRECEDING_PORT:1;
+ uint8_t HBLANK_EXPANSION_CAPABLE :1;
+ uint8_t BUFFER_SIZE_UNIT :1;
+ uint8_t BUFFER_SIZE_PER_PORT :1;
+ uint8_t HBLANK_REDUCTION_CAPABLE :1;
+ uint8_t RESERVED2:1;
+ uint8_t BUFFER_SIZE:8;
+ } bits;
+ uint8_t raw[2];
+};
+
union dpcd_max_uncompressed_pixel_rate_cap {
struct {
uint16_t max_uncompressed_pixel_rate_cap :15;
@@ -1193,6 +1208,7 @@ struct dpcd_caps {
struct replay_info pr_info;
uint16_t edp_oled_emission_rate;
+ union dp_receive_port0_cap receive_port0_cap;
};
union dpcd_sink_ext_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 9014c2409817..9d18f1c08079 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -94,6 +94,11 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
const int num_slices_h,
const bool is_dp);
+void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
+ const struct dsc_dec_dpcd_caps *dsc_sink_caps);
+void dc_dsc_dump_encoder_caps(const struct display_stream_compressor *dsc,
+ const struct dc_crtc_timing *timing);
+
/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to DM,
* and DM can choose to OVERRIDE the limitation on CASE BY CASE basis.
* Hardware/specs limitation should not be writable by DM.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index c10567ec1c81..5ac55601a6da 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -341,89 +341,101 @@ enum swizzle_mode_addr3_values {
DC_ADDR3_SW_UNKNOWN = DC_ADDR3_SW_MAX
};
-union dc_tiling_info {
-
- struct {
- /* Specifies the number of memory banks for tiling
- * purposes.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 2,4,8,16
- */
- unsigned int num_banks;
- /* Specifies the number of tiles in the x direction
- * to be incorporated into the same bank.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 1,2,4,8
- */
- unsigned int bank_width;
- unsigned int bank_width_c;
- /* Specifies the number of tiles in the y direction to
- * be incorporated into the same bank.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 1,2,4,8
- */
- unsigned int bank_height;
- unsigned int bank_height_c;
- /* Specifies the macro tile aspect ratio. Only applies
- * to 2D and 3D tiling modes.
- */
- unsigned int tile_aspect;
- unsigned int tile_aspect_c;
- /* Specifies the number of bytes that will be stored
- * contiguously for each tile.
- * If the tile data requires more storage than this
- * amount, it is split into multiple slices.
- * This field must not be larger than
- * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
- * Only applies to 2D and 3D tiling modes.
- * For color render targets, TILE_SPLIT >= 256B.
- */
- enum tile_split_values tile_split;
- enum tile_split_values tile_split_c;
- /* Specifies the addressing within a tile.
- * 0x0 - DISPLAY_MICRO_TILING
- * 0x1 - THIN_MICRO_TILING
- * 0x2 - DEPTH_MICRO_TILING
- * 0x3 - ROTATED_MICRO_TILING
- */
- enum tile_mode_values tile_mode;
- enum tile_mode_values tile_mode_c;
- /* Specifies the number of pipes and how they are
- * interleaved in the surface.
- * Refer to memory addressing document for complete
- * details and constraints.
- */
- unsigned int pipe_config;
- /* Specifies the tiling mode of the surface.
- * THIN tiles use an 8x8x1 tile size.
- * THICK tiles use an 8x8x4 tile size.
- * 2D tiling modes rotate banks for successive Z slices
- * 3D tiling modes rotate pipes and banks for Z slices
- * Refer to memory addressing document for complete
- * details and constraints.
- */
- enum array_mode_values array_mode;
- } gfx8;
+enum dc_gfxversion {
+ DcGfxVersion7 = 0,
+ DcGfxVersion8,
+ DcGfxVersion9,
+ DcGfxVersion10,
+ DcGfxVersion11,
+ DcGfxAddr3,
+ DcGfxVersionUnknown
+};
+
+ struct dc_tiling_info {
+ unsigned int gfxversion; // Specifies which part of the union to use. Must use DalGfxVersion enum
+ union {
+ struct {
+ /* Specifies the number of memory banks for tiling
+ * purposes.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 2,4,8,16
+ */
+ unsigned int num_banks;
+ /* Specifies the number of tiles in the x direction
+ * to be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_width;
+ unsigned int bank_width_c;
+ /* Specifies the number of tiles in the y direction to
+ * be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_height;
+ unsigned int bank_height_c;
+ /* Specifies the macro tile aspect ratio. Only applies
+ * to 2D and 3D tiling modes.
+ */
+ unsigned int tile_aspect;
+ unsigned int tile_aspect_c;
+ /* Specifies the number of bytes that will be stored
+ * contiguously for each tile.
+ * If the tile data requires more storage than this
+ * amount, it is split into multiple slices.
+ * This field must not be larger than
+ * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
+ * Only applies to 2D and 3D tiling modes.
+ * For color render targets, TILE_SPLIT >= 256B.
+ */
+ enum tile_split_values tile_split;
+ enum tile_split_values tile_split_c;
+ /* Specifies the addressing within a tile.
+ * 0x0 - DISPLAY_MICRO_TILING
+ * 0x1 - THIN_MICRO_TILING
+ * 0x2 - DEPTH_MICRO_TILING
+ * 0x3 - ROTATED_MICRO_TILING
+ */
+ enum tile_mode_values tile_mode;
+ enum tile_mode_values tile_mode_c;
+ /* Specifies the number of pipes and how they are
+ * interleaved in the surface.
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ unsigned int pipe_config;
+ /* Specifies the tiling mode of the surface.
+ * THIN tiles use an 8x8x1 tile size.
+ * THICK tiles use an 8x8x4 tile size.
+ * 2D tiling modes rotate banks for successive Z slices
+ * 3D tiling modes rotate pipes and banks for Z slices
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ enum array_mode_values array_mode;
+ } gfx8;
- struct {
- enum swizzle_mode_values swizzle;
- unsigned int num_pipes;
- unsigned int max_compressed_frags;
- unsigned int pipe_interleave;
-
- unsigned int num_banks;
- unsigned int num_shader_engines;
- unsigned int num_rb_per_se;
- bool shaderEnable;
-
- bool meta_linear;
- bool rb_aligned;
- bool pipe_aligned;
- unsigned int num_pkrs;
- } gfx9;/*gfx9, gfx10 and above*/
- struct {
- enum swizzle_mode_addr3_values swizzle;
- } gfx_addr3;/*gfx with addr3 and above*/
+ struct {
+ enum swizzle_mode_values swizzle;
+ unsigned int num_pipes;
+ unsigned int max_compressed_frags;
+ unsigned int pipe_interleave;
+
+ unsigned int num_banks;
+ unsigned int num_shader_engines;
+ unsigned int num_rb_per_se;
+ bool shaderEnable;
+
+ bool meta_linear;
+ bool rb_aligned;
+ bool pipe_aligned;
+ unsigned int num_pkrs;
+ } gfx9;/*gfx9, gfx10 and above*/
+ struct {
+ enum swizzle_mode_addr3_values swizzle;
+ } gfx_addr3;/*gfx with addr3 and above*/
+ };
};
/* Rotation angle */
@@ -975,6 +987,9 @@ struct dc_crtc_timing {
struct dc_crtc_timing_flags flags;
uint32_t dsc_fixed_bits_per_pixel_x16; /* DSC target bitrate in 1/16 of bpp (e.g. 128 -> 8bpp) */
struct dc_dsc_config dsc_cfg;
+
+ /* The number of pixels that HBlank has been expanded by from the original EDID timing. */
+ uint32_t expanded_hblank;
};
enum trigger_delay {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index bd37ec82b42d..fabcefeda288 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -34,4 +34,7 @@ const struct dc_plane_status *dc_plane_get_status(
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
+void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
+ bool clear_tiling);
+
#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 0e310fd48b5c..3518eb1b8cd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -64,6 +64,13 @@ static void populate_inits_from_splinits(struct scl_inits *inits,
inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19);
inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19);
}
+static void populate_splformat_from_format(enum spl_pixel_format *spl_pixel_format, const enum pixel_format pixel_format)
+{
+ if (pixel_format < PIXEL_FORMAT_INVALID)
+ *spl_pixel_format = (enum spl_pixel_format)pixel_format;
+ else
+ *spl_pixel_format = SPL_PIXEL_FORMAT_INVALID;
+}
/// @brief Translate SPL input parameters from pipe context
/// @param pipe_ctx
/// @param spl_in
@@ -89,7 +96,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->callbacks = dcn2_spl_callbacks;
}
// Make format field from spl_in point to plane_res scl_data format
- spl_in->basic_in.format = (enum spl_pixel_format)pipe_ctx->plane_res.scl_data.format;
+ populate_splformat_from_format(&spl_in->basic_in.format, pipe_ctx->plane_res.scl_data.format);
// Make view_format from basic_out point to view_format from stream
spl_in->basic_out.view_format = (enum spl_view_3d)stream->view_format;
// Populate spl input basic input clip rect from plane state clip rect
@@ -108,12 +115,14 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->basic_in.horizontal_mirror = plane_state->horizontal_mirror;
// Calculate horizontal splits and split index
- spl_in->basic_in.mpc_combine_h = resource_get_mpc_slice_count(pipe_ctx);
+ spl_in->basic_in.num_h_slices_recout_width_align.use_recout_width_aligned = false;
+ spl_in->basic_in.num_h_slices_recout_width_align.num_slices_recout_width.mpc_num_h_slices =
+ resource_get_mpc_slice_count(pipe_ctx);
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
- spl_in->basic_in.mpc_combine_v = 0;
+ spl_in->basic_in.mpc_h_slice_index = 0;
else
- spl_in->basic_in.mpc_combine_v = resource_get_mpc_slice_index(pipe_ctx);
+ spl_in->basic_in.mpc_h_slice_index = resource_get_mpc_slice_index(pipe_ctx);
populate_splrect_from_rect(&spl_in->basic_out.odm_slice_rect, &odm_slice_src);
spl_in->basic_out.odm_combine_factor = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 860506c6bda4..3e303c7808fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -447,10 +447,6 @@ enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
-bool dc_stream_warmup_writeback(struct dc *dc,
- int num_dwb,
- struct dc_writeback_info *wb_info);
-
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream);
bool dc_stream_set_dynamic_metadata(struct dc *dc,
@@ -541,17 +537,26 @@ bool dc_stream_get_crtc_position(struct dc *dc,
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
bool dc_stream_forward_crc_window(struct dc_stream_state *stream,
struct rect *rect,
+ uint8_t phy_id,
bool is_stop);
+
+bool dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream,
+ struct crc_window *window,
+ uint8_t phy_id,
+ bool stop);
#endif
bool dc_stream_configure_crc(struct dc *dc,
struct dc_stream_state *stream,
struct crc_params *crc_window,
bool enable,
- bool continuous);
+ bool continuous,
+ uint8_t idx,
+ bool reset);
bool dc_stream_get_crc(struct dc *dc,
struct dc_stream_state *stream,
+ uint8_t idx,
uint32_t *r_cr,
uint32_t *g_y,
uint32_t *b_cb);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9466b63644d5..0c2aa91f0a11 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -874,6 +874,14 @@ struct dsc_dec_dpcd_caps {
bool is_dp; /* Decoded format */
};
+struct hblank_expansion_dpcd_caps {
+ bool expansion_supported;
+ bool reduction_supported;
+ bool buffer_unit_bytes; /* True: buffer size in bytes. False: buffer size in pixels*/
+ bool buffer_per_port; /* True: buffer size per port. False: buffer size per lane*/
+ uint32_t buffer_size; /* Add 1 to value and multiply by 32 */
+};
+
struct dc_golden_table {
uint16_t dc_golden_table_ver;
uint32_t aux_dphy_rx_control0_val;
@@ -931,10 +939,17 @@ enum backlight_control_type {
};
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+#define MAX_CRC_WINDOW_NUM 2
+
struct otg_phy_mux {
uint8_t phy_output_num;
uint8_t otg_output_num;
};
+
+struct crc_window {
+ struct rect rect;
+ bool enable;
+};
#endif
enum dc_detect_reason {
@@ -1051,10 +1066,13 @@ enum replay_FW_Message_type {
union replay_error_status {
struct {
- unsigned char STATE_TRANSITION_ERROR :1;
- unsigned char LINK_CRC_ERROR :1;
- unsigned char DESYNC_ERROR :1;
- unsigned char RESERVED :5;
+ unsigned int STATE_TRANSITION_ERROR :1;
+ unsigned int LINK_CRC_ERROR :1;
+ unsigned int DESYNC_ERROR :1;
+ unsigned int RESERVED_3 :1;
+ unsigned int LOW_RR_INCORRECT_VTOTAL :1;
+ unsigned int NO_DOUBLED_RR :1;
+ unsigned int RESERVED_6_7 :2;
} bits;
unsigned char raw;
};
@@ -1101,6 +1119,8 @@ struct replay_config {
union replay_error_status replay_error_status;
/* Replay Low Hz enable Options */
union replay_low_refresh_rate_enable_options low_rr_enable_options;
+ /* Replay coasting vtotal is within low refresh rate range. */
+ bool low_rr_activated;
};
/* Replay feature flags*/
@@ -1125,10 +1145,12 @@ struct replay_settings {
uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
uint32_t link_off_frame_count;
- /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
- uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+ /* Replay pseudo vtotal for low refresh rate*/
+ uint16_t low_rr_full_screen_video_pseudo_vtotal;
/* Replay last pseudo vtotal set to DMUB */
uint16_t last_pseudo_vtotal;
+ /* Replay desync error */
+ uint32_t replay_desync_error_fail_count;
};
/* To split out "global" and "per-panel" config settings.
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index b700608e4240..077337698e0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1105,6 +1105,9 @@ static bool dcn401_program_pix_clk(
&dto_params);
} else {
+ if (pll_settings->actual_pix_clk_100hz > 6000000UL)
+ return false;
+
/* disables DP DTO when provided with TMDS signal type */
clock_source->ctx->dc->res_pool->dccg->funcs->set_dp_dto(
clock_source->ctx->dc->res_pool->dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index f5e1d9caee4c..1c2009e38aa1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -98,7 +98,7 @@ static enum mi_bits_per_pixel get_mi_bpp(
}
static enum mi_tiling_format get_mi_tiling(
- union dc_tiling_info *tiling_info)
+ struct dc_tiling_info *tiling_info)
{
switch (tiling_info->gfx8.array_mode) {
case DC_ARRAY_1D_TILED_THIN1:
@@ -133,7 +133,7 @@ static bool is_vert_scan(enum dc_rotation_angle rotation)
static void dce_mi_program_pte_vm(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation)
{
struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
@@ -430,7 +430,7 @@ static void dce120_mi_program_display_marks(struct mem_input *mi,
}
static void program_tiling(
- struct dce_mem_input *dce_mi, const union dc_tiling_info *info)
+ struct dce_mem_input *dce_mi, const struct dc_tiling_info *info)
{
if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
REG_UPDATE_6(GRPH_CONTROL,
@@ -481,7 +481,6 @@ static void program_tiling(
}
}
-
static void program_size_and_rotation(
struct dce_mem_input *dce_mi,
enum dc_rotation_angle rotation,
@@ -627,10 +626,31 @@ static void program_grph_pixel_format(
GRPH_PRESCALE_B_SIGN, sign);
}
+static void dce_mi_clear_tiling(
+ struct mem_input *mi)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+
+ if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
+ REG_UPDATE(GRPH_CONTROL,
+ GRPH_SW_MODE, DC_SW_LINEAR);
+ }
+
+ if (dce_mi->masks->GRPH_MICRO_TILE_MODE) { /* GFX8 */
+ REG_UPDATE(GRPH_CONTROL,
+ GRPH_ARRAY_MODE, DC_SW_LINEAR);
+ }
+
+ if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX6 but reuses gfx8 struct */
+ REG_UPDATE(GRPH_CONTROL,
+ GRPH_ARRAY_MODE, DC_SW_LINEAR);
+ }
+}
+
static void dce_mi_program_surface_config(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -650,7 +670,7 @@ static void dce_mi_program_surface_config(
static void dce60_mi_program_surface_config(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation, /* not used in DCE6 */
struct dc_plane_dcc_param *dcc,
@@ -884,7 +904,8 @@ static const struct mem_input_funcs dce_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -897,7 +918,8 @@ static const struct mem_input_funcs dce60_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce60_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
#endif
@@ -910,7 +932,8 @@ static const struct mem_input_funcs dce112_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
static const struct mem_input_funcs dce120_mi_funcs = {
@@ -922,7 +945,8 @@ static const struct mem_input_funcs dce120_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
void dce_mem_input_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..5bb8b78bf250 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -63,7 +63,8 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link)
{
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_1)
return true;
if (link->replay_settings.replay_feature_enabled)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index cae18f8c1c9a..88c75c243bf8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -390,8 +390,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
- else
- link->psr_settings.force_ffu_mode = 0;
+
copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 8a3fbf95c48f..2c43c2422638 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -162,7 +162,7 @@ static void enable(struct dce_mem_input *mem_input110)
static void program_tiling(
struct dce_mem_input *mem_input110,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
uint32_t value = 0;
@@ -523,7 +523,7 @@ static const unsigned int dvmm_Hw_Setting_Linear[4][9] = {
/* Helper to get table entry from surface info */
static const unsigned int *get_dvmm_hw_setting(
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum surface_pixel_format format,
bool chroma)
{
@@ -563,7 +563,7 @@ static const unsigned int *get_dvmm_hw_setting(
static void dce_mem_input_v_program_pte_vm(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation)
{
struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
@@ -636,7 +636,7 @@ static void dce_mem_input_v_program_pte_vm(
static void dce_mem_input_v_program_surface_config(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index fa422a8cbced..61b0807693fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -2127,70 +2127,131 @@ bool dce110_configure_crc(struct timing_generator *tg,
cntl_addr = CRTC_REG(mmCRTC_CRC_CNTL);
- /* First, disable CRC before we configure it. */
- dm_write_reg(tg->ctx, cntl_addr, 0);
+ if (!params->enable || params->reset)
+ /* First, disable CRC before we configure it. */
+ dm_write_reg(tg->ctx, cntl_addr, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
- set_reg_field_value(value, params->windowa_x_start,
- CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_START);
- set_reg_field_value(value, params->windowa_x_end,
- CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window A y axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
- set_reg_field_value(value, params->windowa_y_start,
- CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_START);
- set_reg_field_value(value, params->windowa_y_end,
- CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window B x axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
- set_reg_field_value(value, params->windowb_x_start,
- CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_START);
- set_reg_field_value(value, params->windowb_x_end,
- CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window B y axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
- set_reg_field_value(value, params->windowb_y_start,
- CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_START);
- set_reg_field_value(value, params->windowb_y_end,
- CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- value = 0;
- set_reg_field_value(value, params->continuous_mode ? 1 : 0,
- CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
- set_reg_field_value(value, params->selection,
- CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
- set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
- dm_write_reg(tg->ctx, cntl_addr, value);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
+ set_reg_field_value(value, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START);
+ set_reg_field_value(value, params->windowa_x_end,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window A y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
+ set_reg_field_value(value, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START);
+ set_reg_field_value(value, params->windowa_y_end,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
+ set_reg_field_value(value, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START);
+ set_reg_field_value(value, params->windowb_x_end,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
+ set_reg_field_value(value, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START);
+ set_reg_field_value(value, params->windowb_y_end,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Set crc mode and selection, and enable.*/
+ value = 0;
+ set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+ CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+ set_reg_field_value(value, params->selection,
+ CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
+ set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+ dm_write_reg(tg->ctx, cntl_addr, value);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWA_X_CONTROL);
+ set_reg_field_value(value, params->windowa_x_start,
+ CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_START);
+ set_reg_field_value(value, params->windowa_x_end,
+ CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window A y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWA_Y_CONTROL);
+ set_reg_field_value(value, params->windowa_y_start,
+ CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_START);
+ set_reg_field_value(value, params->windowa_y_end,
+ CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWB_X_CONTROL);
+ set_reg_field_value(value, params->windowb_x_start,
+ CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_START);
+ set_reg_field_value(value, params->windowb_x_end,
+ CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWB_Y_CONTROL);
+ set_reg_field_value(value, params->windowb_y_start,
+ CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_START);
+ set_reg_field_value(value, params->windowb_y_end,
+ CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Set crc mode and selection, and enable.*/
+ value = 0;
+ set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+ CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+ set_reg_field_value(value, params->selection,
+ CRTC_CRC_CNTL, CRTC_CRC1_SELECT);
+ set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+ dm_write_reg(tg->ctx, cntl_addr, value);
+ break;
+ default:
+ return false;
+ }
return true;
}
-bool dce110_get_crc(struct timing_generator *tg,
+bool dce110_get_crc(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t addr = 0;
@@ -2206,14 +2267,30 @@ bool dce110_get_crc(struct timing_generator *tg,
if (!field)
return false;
- addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
- value = dm_read_reg(tg->ctx, addr);
- *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
- *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
+ switch (idx) {
+ case 0:
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
+ value = dm_read_reg(tg->ctx, addr);
+ *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
- addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
- value = dm_read_reg(tg->ctx, addr);
- *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
+ value = dm_read_reg(tg->ctx, addr);
+ *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
+ break;
+ case 1:
+ addr = CRTC_REG(mmCRTC_CRC1_DATA_RG);
+ value = dm_read_reg(tg->ctx, addr);
+ *r_cr = get_reg_field_value(value, CRTC_CRC1_DATA_RG, CRC1_R_CR);
+ *g_y = get_reg_field_value(value, CRTC_CRC1_DATA_RG, CRC1_G_Y);
+
+ addr = CRTC_REG(mmCRTC_CRC1_DATA_B);
+ value = dm_read_reg(tg->ctx, addr);
+ *b_cb = get_reg_field_value(value, CRTC_CRC1_DATA_B, CRC1_B_CB);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index ee4de740aceb..e4f5cad64f32 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -286,7 +286,7 @@ bool dce110_arm_vert_intr(
bool dce110_configure_crc(struct timing_generator *tg,
const struct crc_params *params);
-bool dce110_get_crc(struct timing_generator *tg,
+bool dce110_get_crc(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
bool dce110_is_two_pixels_per_container(const struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index fcf59348eb62..31c4f44ceaac 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -1100,45 +1100,79 @@ static bool dce120_configure_crc(struct timing_generator *tg,
if (!dce120_is_tg_enabled(tg))
return false;
- /* First, disable CRC before we configure it. */
- dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
- tg110->offsets.crtc, 0);
+ if (!params->enable || params->reset)
+ /* First, disable CRC before we configure it. */
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
+ tg110->offsets.crtc, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start,
- CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end);
-
- /* Window A y axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end);
-
- /* Window B x axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start,
- CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end);
-
- /* Window B y axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
- CRTC_CRC_EN, params->continuous_mode ? 1 : 0,
- CRTC_CRC0_SELECT, params->selection,
- CRTC_CRC_EN, 1);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
+ CRTC_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ CRTC_CRC0_SELECT, params->selection,
+ CRTC_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ CRTC_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ CRTC_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ CRTC_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ CRTC_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable */
+ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
+ CRTC_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ CRTC_CRC1_SELECT, params->selection,
+ CRTC_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
-static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr,
- uint32_t *g_y, uint32_t *b_cb)
+static bool dce120_get_crc(struct timing_generator *tg, uint8_t idx,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t value, field;
@@ -1151,14 +1185,30 @@ static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr,
if (!field)
return false;
- value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG,
- tg110->offsets.crtc);
- *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR);
- *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y);
+ switch (idx) {
+ case 0:
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG,
+ tg110->offsets.crtc);
+ *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y);
- value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B,
- tg110->offsets.crtc);
- *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB);
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B,
+ tg110->offsets.crtc);
+ *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB);
+ break;
+ case 1:
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC1_DATA_RG,
+ tg110->offsets.crtc);
+ *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_RG, CRC1_R_CR);
+ *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_RG, CRC1_G_Y);
+
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC1_DATA_B,
+ tg110->offsets.crtc);
+ *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_B, CRC1_B_CB);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 573898984726..f9961a6446f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -168,31 +168,33 @@ void dcn31_panel_cntl_construct(
struct dcn31_panel_cntl *dcn31_panel_cntl,
const struct panel_cntl_init_data *init_data)
{
- uint8_t pwrseq_inst = 0xF;
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
dcn31_panel_cntl->base.ctx = init_data->ctx;
dcn31_panel_cntl->base.inst = init_data->inst;
- switch (init_data->eng_id) {
- case ENGINE_ID_DIGA:
- pwrseq_inst = 0;
- break;
- case ENGINE_ID_DIGB:
- pwrseq_inst = 1;
- break;
- default:
- DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
- ASSERT(false);
- break;
- }
-
- if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1)
+ if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1) {
//If supported, power sequencer mapping shall follow the DIG instance
+ uint8_t pwrseq_inst = 0xF;
+
+ switch (init_data->eng_id) {
+ case ENGINE_ID_DIGA:
+ pwrseq_inst = 0;
+ break;
+ case ENGINE_ID_DIGB:
+ pwrseq_inst = 1;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
+ ASSERT(false);
+ break;
+ }
+
dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
- else
+ } else {
/* If not supported, pwrseq will be assigned in order,
* so first pwrseq will be assigned to first panel instance (legacy behavior)
*/
dcn31_panel_cntl->base.pwrseq_inst = dcn31_panel_cntl->base.inst;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
index b2cea59ba5d4..9a92f73d5b7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
@@ -653,8 +653,9 @@ void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_lin
if (!query_dp_alt_from_dmub(enc, &cmd))
return;
- if (cmd.query_dp_alt.data.is_usb &&
- cmd.query_dp_alt.data.is_dp4 == 0)
+ if (cmd.query_dp_alt.data.is_dp_alt_disable == 0 &&
+ cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
index d4a3e811aa39..ea0c9a9d0bd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
@@ -28,6 +28,7 @@
#include "link_encoder.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn35_dio_link_encoder.h"
+#include "dc_dmub_srv.h"
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
@@ -159,6 +160,8 @@ static const struct link_encoder_funcs dcn35_link_enc_funcs = {
.is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
+ .enable_dpia_output = dcn35_link_encoder_enable_dpia_output,
+ .disable_dpia_output = dcn35_link_encoder_disable_dpia_output,
};
void dcn35_link_encoder_construct(
@@ -265,3 +268,80 @@ void dcn35_link_encoder_construct(
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
+
+/* DPIA equivalent of link_transmitter_control. */
+static bool link_dpia_control(struct dc_context *dc_ctx,
+ struct dmub_cmd_dig_dpia_control_data *dpia_control)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.dig1_dpia_control.header.type = DMUB_CMD__DPIA;
+ cmd.dig1_dpia_control.header.sub_type =
+ DMUB_CMD__DPIA_DIG1_DPIA_CONTROL;
+ cmd.dig1_dpia_control.header.payload_bytes =
+ sizeof(cmd.dig1_dpia_control) -
+ sizeof(cmd.dig1_dpia_control.header);
+
+ cmd.dig1_dpia_control.dpia_control = *dpia_control;
+
+ dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+static void link_encoder_disable(struct dcn10_link_encoder *enc10)
+{
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+}
+
+void dcn35_link_encoder_enable_dpia_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = digmode;
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin unused by DPIA. */
+ dpia_control.hpdsel = 6;
+ dpia_control.dpia_id = dpia_id;
+ dpia_control.fec_rdy = fec_rdy;
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+}
+
+void dcn35_link_encoder_disable_dpia_output(
+ struct link_encoder *enc,
+ uint8_t dpia_id,
+ uint8_t digmode)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc))
+ return;
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_DISABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = digmode;
+ dpia_control.dpia_id = dpia_id;
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+
+ link_encoder_disable(enc10);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
index d546a3676304..f9d4221f4b43 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
@@ -144,4 +144,22 @@ bool dcn35_is_dig_enabled(struct link_encoder *enc);
enum signal_type dcn35_get_dig_mode(struct link_encoder *enc);
void dcn35_link_encoder_setup(struct link_encoder *enc, enum signal_type signal);
+/*
+ * Enable DP transmitter and its encoder for dpia port.
+ */
+void dcn35_link_encoder_enable_dpia_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy);
+
+/*
+ * Disable transmitter and its encoder for dpia port.
+ */
+void dcn35_link_encoder_disable_dpia_output(
+ struct link_encoder *enc,
+ uint8_t dpia_id,
+ uint8_t digmode);
+
#endif /* __DC_LINK_ENCODER__DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 2e4a46f1b499..5efddd48d5c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -158,6 +158,11 @@ bool dm_helpers_dp_write_dsc_enable(
const struct dc_stream_state *stream,
bool enable
);
+
+bool dm_helpers_dp_write_hblank_reduction(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream);
+
bool dm_helpers_is_dp_sink_present(
struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index 39525721c976..f1235bf9a596 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1312,138 +1312,6 @@ bool dcn_validate_bandwidth(
return false;
}
-static unsigned int dcn_find_normalized_clock_vdd_Level(
- const struct dc *dc,
- enum dm_pp_clock_type clocks_type,
- int clocks_in_khz)
-{
- int vdd_level = dcn_bw_v_min0p65;
-
- if (clocks_in_khz == 0)/*todo some clock not in the considerations*/
- return vdd_level;
-
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
-
- case DM_PP_CLOCK_TYPE_DPPCLK:
- if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
-
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- {
- unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
-
- if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- }
- break;
-
- case DM_PP_CLOCK_TYPE_DCFCLK:
- if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
-
- default:
- break;
- }
- return vdd_level;
-}
-
-unsigned int dcn_find_dcfclk_suits_all(
- const struct dc *dc,
- struct dc_clocks *clocks)
-{
- unsigned vdd_level, vdd_level_temp;
- unsigned dcf_clk;
-
- /*find a common supported voltage level*/
- vdd_level = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
-
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
-
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
-
- /*find that level conresponding dcfclk*/
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
- if (vdd_level == dcn_bw_v_max0p91) {
- BREAK_TO_DEBUGGER();
- dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
- } else if (vdd_level == dcn_bw_v_max0p9)
- dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
- else if (vdd_level == dcn_bw_v_nom0p8)
- dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000;
- else if (vdd_level == dcn_bw_v_mid0p72)
- dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000;
- else
- dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
-
- DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk);
- return dcf_clk;
-}
-
void dcn_bw_update_from_pplib_fclks(
struct dc *dc,
struct dm_pp_clock_levels_with_voltage *fclks)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 76d3bb3c9155..8d4873f80df0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -1562,6 +1562,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int)(dst_y_per_row_vblank * (double)htotal
* ref_freq_to_pix_freq / (double)dpte_groups_per_row_ub_l);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 86ac7d59fd32..0748ef36a16a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1595,6 +1595,7 @@ double dml32_TruncToValidBPP(
unsigned int NonDSCBPP0;
unsigned int NonDSCBPP1;
unsigned int NonDSCBPP2;
+ unsigned int NonDSCBPP3 = BPP_INVALID;
if (Format == dm_420) {
NonDSCBPP0 = 12;
@@ -1603,6 +1604,7 @@ double dml32_TruncToValidBPP(
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
} else if (Format == dm_444) {
+ NonDSCBPP3 = 18;
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
NonDSCBPP2 = 36;
@@ -1667,6 +1669,8 @@ double dml32_TruncToValidBPP(
return NonDSCBPP1;
else if (MaxLinkBPP >= NonDSCBPP0)
return 16.0;
+ else if ((Output == dm_dp2p0 || Output == dm_dp) && NonDSCBPP3 != BPP_INVALID && MaxLinkBPP >= NonDSCBPP3)
+ return NonDSCBPP3; // Special case to allow 6bpc RGB for DP connections.
else
return BPP_INVALID;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index beed7adbbd43..47d785204f29 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 1,
+ .do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index c4378e620cbf..91c4f3b4bd5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -29,7 +29,11 @@ dml2_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+frame_warn_flag := -Wframe-larger-than=4096
+else
frame_warn_flag := -Wframe-larger-than=3072
+endif
else
frame_warn_flag := -Wframe-larger-than=2048
endif
@@ -73,9 +77,8 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
@@ -94,9 +97,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
@@ -113,9 +115,8 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_r
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags)
-DML21 := src/dml2_top/dml_top.o
-DML21 += src/dml2_top/dml_top_mcache.o
-DML21 += src/dml2_top/dml2_top_optimization.o
+DML21 := src/dml2_top/dml2_top_interfaces.o
+DML21 += src/dml2_top/dml2_top_soc15.o
DML21 += src/inc/dml2_debug.o
DML21 += src/dml2_core/dml2_core_dcn4.o
DML21 += src/dml2_core/dml2_core_factory.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 8dabb1ac0b68..35bc917631ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -6301,9 +6301,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.meta_row_bandwidth_this_state,
mode_lib->ms.dpte_row_bandwidth_this_state,
mode_lib->ms.NoOfDPPThisState,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor);
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j]);
s->VMDataOnlyReturnBWPerState = dml_get_return_bw_mbps_vm_only(
&mode_lib->ms.soc,
@@ -6434,7 +6434,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
/* Output */
&mode_lib->ms.UrgentBurstFactorCursorPre[k],
&mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChroma[k],
+ &mode_lib->ms.UrgentBurstFactorChromaPre[k],
&mode_lib->ms.NotUrgentLatencyHidingPre[k]);
mode_lib->ms.cursor_bw_pre[k] = mode_lib->ms.cache_display_cfg.plane.NumberOfCursors[k] * mode_lib->ms.cache_display_cfg.plane.CursorWidth[k] *
@@ -6458,9 +6458,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cursor_bw_pre,
mode_lib->ms.prefetch_vmrow_bw,
mode_lib->ms.NoOfDPPThisState,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j],
mode_lib->ms.UrgentBurstFactorLumaPre,
mode_lib->ms.UrgentBurstFactorChromaPre,
mode_lib->ms.UrgentBurstFactorCursorPre,
@@ -6517,9 +6517,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cursor_bw,
mode_lib->ms.cursor_bw_pre,
mode_lib->ms.NoOfDPPThisState,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j],
mode_lib->ms.UrgentBurstFactorLumaPre,
mode_lib->ms.UrgentBurstFactorChromaPre,
mode_lib->ms.UrgentBurstFactorCursorPre);
@@ -6586,9 +6586,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cursor_bw_pre,
mode_lib->ms.prefetch_vmrow_bw,
mode_lib->ms.NoOfDPP[j], // VBA_ERROR DPPPerSurface is not assigned at this point, should use NoOfDpp here
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j],
mode_lib->ms.UrgentBurstFactorLumaPre,
mode_lib->ms.UrgentBurstFactorChromaPre,
mode_lib->ms.UrgentBurstFactorCursorPre,
@@ -7809,9 +7809,9 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
mode_lib->ms.DETBufferSizeYThisState[k],
mode_lib->ms.DETBufferSizeCThisState[k],
/* Output */
- &mode_lib->ms.UrgentBurstFactorCursor[k],
- &mode_lib->ms.UrgentBurstFactorLuma[k],
- &mode_lib->ms.UrgentBurstFactorChroma[k],
+ &mode_lib->ms.UrgentBurstFactorCursor[j][k],
+ &mode_lib->ms.UrgentBurstFactorLuma[j][k],
+ &mode_lib->ms.UrgentBurstFactorChroma[j][k],
&mode_lib->ms.NotUrgentLatencyHiding[k]);
}
@@ -8318,7 +8318,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
if (clk_cfg->dcfclk_option != dml_use_override_freq)
locals->Dcfclk = mode_lib->ms.DCFCLK;
else
- locals->Dcfclk = clk_cfg->dcfclk_freq_mhz;
+ locals->Dcfclk = clk_cfg->dcfclk_mhz;
#ifdef __DML_VBA_DEBUG__
dml_print_dml_policy(&mode_lib->ms.policy);
@@ -8371,7 +8371,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
if (clk_cfg->dispclk_option == dml_use_required_freq)
locals->Dispclk = locals->Dispclk_calculated;
else if (clk_cfg->dispclk_option == dml_use_override_freq)
- locals->Dispclk = clk_cfg->dispclk_freq_mhz;
+ locals->Dispclk = clk_cfg->dispclk_mhz;
else
locals->Dispclk = mode_lib->ms.state.dispclk_mhz;
#ifdef __DML_VBA_DEBUG__
@@ -8412,7 +8412,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
if (clk_cfg->dppclk_option[k] == dml_use_required_freq)
locals->Dppclk[k] = locals->Dppclk_calculated[k];
else if (clk_cfg->dppclk_option[k] == dml_use_override_freq)
- locals->Dppclk[k] = clk_cfg->dppclk_freq_mhz[k];
+ locals->Dppclk[k] = clk_cfg->dppclk_mhz[k];
else
locals->Dppclk[k] = mode_lib->ms.state.dppclk_mhz;
#ifdef __DML_VBA_DEBUG__
@@ -9190,6 +9190,8 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
&locals->FractionOfUrgentBandwidth,
&s->dummy_boolean[0]); // dml_bool_t *PrefetchBandwidthSupport
+
+
if (s->VRatioPrefetchMoreThanMax != false || s->DestinationLineTimesForPrefetchLessThan2 != false) {
dml_print("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
dml_print("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
@@ -9204,6 +9206,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
}
}
+
if (locals->PrefetchModeSupported == true && mode_lib->ms.support.ImmediateFlipSupport == true) {
locals->BandwidthAvailableForImmediateFlip = CalculateBandwidthAvailableForImmediateFlip(
mode_lib->ms.num_active_planes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
index f951936bb579..dd3f43181a6e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
@@ -28,6 +28,7 @@
#define __DISPLAY_MODE_CORE_STRUCT_H__
#include "display_mode_lib_defines.h"
+#include "dml_top_display_cfg_types.h"
enum dml_project_id {
dml_project_invalid = 0,
@@ -49,7 +50,9 @@ enum dml_use_mall_for_pstate_change_mode {
dml_use_mall_pstate_change_disable = 0,
dml_use_mall_pstate_change_full_frame = 1,
dml_use_mall_pstate_change_sub_viewport = 2,
- dml_use_mall_pstate_change_phantom_pipe = 3
+ dml_use_mall_pstate_change_phantom_pipe = 3,
+ dml_use_mall_pstate_change_phantom_pipe_no_data_return = 4,
+ dml_use_mall_pstate_change_imall = 5
};
enum dml_use_mall_for_static_screen_mode {
dml_use_mall_static_screen_disable = 0,
@@ -171,7 +174,11 @@ enum dml_swizzle_mode {
dml_sw_256kb_z_x = 28,
dml_sw_256kb_s_x = 29,
dml_sw_256kb_d_x = 30,
- dml_sw_256kb_r_x = 31
+ dml_sw_256kb_r_x = 31,
+ dml_sw_256b_2d = 32,
+ dml_sw_4kb_2d = 33,
+ dml_sw_64kb_2d = 34,
+ dml_sw_256kb_2d = 35
};
enum dml_lb_depth {
dml_lb_6 = 0,
@@ -223,24 +230,28 @@ enum dml_mpc_use_policy {
dml_mpc_disabled = 0,
dml_mpc_as_possible = 1,
dml_mpc_as_needed_for_voltage = 2,
- dml_mpc_as_needed_for_pstate_and_voltage = 3
+ dml_mpc_as_needed_for_pstate_and_voltage = 3,
+ dml_mpc_as_needed = 4,
+ dml_mpc_2to1 = 5
};
enum dml_odm_use_policy {
dml_odm_use_policy_bypass = 0,
dml_odm_use_policy_combine_as_needed = 1,
dml_odm_use_policy_combine_2to1 = 2,
- dml_odm_use_policy_combine_4to1 = 3,
- dml_odm_use_policy_split_1to2 = 4,
- dml_odm_use_policy_mso_1to2 = 5,
- dml_odm_use_policy_mso_1to4 = 6
+ dml_odm_use_policy_combine_3to1 = 3,
+ dml_odm_use_policy_combine_4to1 = 4,
+ dml_odm_use_policy_split_1to2 = 5,
+ dml_odm_use_policy_mso_1to2 = 6,
+ dml_odm_use_policy_mso_1to4 = 7
};
enum dml_odm_mode {
dml_odm_mode_bypass = 0,
dml_odm_mode_combine_2to1 = 1,
- dml_odm_mode_combine_4to1 = 2,
- dml_odm_mode_split_1to2 = 3,
- dml_odm_mode_mso_1to2 = 4,
- dml_odm_mode_mso_1to4 = 5
+ dml_odm_mode_combine_3to1 = 2,
+ dml_odm_mode_combine_4to1 = 3,
+ dml_odm_mode_split_1to2 = 4,
+ dml_odm_mode_mso_1to2 = 5,
+ dml_odm_mode_mso_1to4 = 6
};
enum dml_writeback_configuration {
dml_whole_buffer_for_single_stream_no_interleave = 0,
@@ -289,6 +300,17 @@ struct soc_state_bounding_box_st {
dml_float_t fclk_change_latency_us;
dml_float_t usr_retraining_latency_us;
dml_bool_t use_ideal_dram_bw_strobe;
+ dml_float_t g6_temp_read_blackout_us;
+
+ struct {
+ dml_uint_t urgent_ramp_uclk_cycles;
+ dml_uint_t trip_to_memory_uclk_cycles;
+ dml_uint_t meta_trip_to_memory_uclk_cycles;
+ dml_uint_t maximum_latency_when_urgent_uclk_cycles;
+ dml_uint_t average_latency_when_urgent_uclk_cycles;
+ dml_uint_t maximum_latency_when_non_urgent_uclk_cycles;
+ dml_uint_t average_latency_when_non_urgent_uclk_cycles;
+ } dml_dcn401_uclk_dpm_dependent_soc_qos_params;
};
struct soc_bounding_box_st {
@@ -297,7 +319,7 @@ struct soc_bounding_box_st {
dml_float_t pcierefclk_mhz;
dml_float_t refclk_mhz;
dml_float_t amclk_mhz;
- dml_float_t max_outstanding_reqs;
+ dml_uint_t max_outstanding_reqs;
dml_float_t pct_ideal_sdp_bw_after_urgent;
dml_float_t pct_ideal_fabric_bw_after_urgent;
dml_float_t pct_ideal_dram_bw_after_urgent_pixel_only;
@@ -308,6 +330,16 @@ struct soc_bounding_box_st {
dml_float_t max_avg_fabric_bw_use_normal_percent;
dml_float_t max_avg_dram_bw_use_normal_percent;
dml_float_t max_avg_dram_bw_use_normal_strobe_percent;
+
+ dml_float_t svp_prefetch_pct_ideal_sdp_bw_after_urgent;
+ dml_float_t svp_prefetch_pct_ideal_fabric_bw_after_urgent;
+ dml_float_t svp_prefetch_pct_ideal_dram_bw_after_urgent_pixel_only;
+ dml_float_t svp_prefetch_pct_ideal_dram_bw_after_urgent_pixel_and_vm;
+ dml_float_t svp_prefetch_pct_ideal_dram_bw_after_urgent_vm_only;
+ dml_float_t svp_prefetch_max_avg_sdp_bw_use_normal_percent;
+ dml_float_t svp_prefetch_max_avg_fabric_bw_use_normal_percent;
+ dml_float_t svp_prefetch_max_avg_dram_bw_use_normal_percent;
+
dml_uint_t round_trip_ping_latency_dcfclk_cycles;
dml_uint_t urgent_out_of_order_return_per_channel_pixel_only_bytes;
dml_uint_t urgent_out_of_order_return_per_channel_pixel_and_vm_bytes;
@@ -324,6 +356,26 @@ struct soc_bounding_box_st {
dml_uint_t mall_allocated_for_dcn_mbytes;
dml_float_t dispclk_dppclk_vco_speed_mhz;
dml_bool_t do_urgent_latency_adjustment;
+
+ dml_uint_t mem_word_bytes;
+ dml_uint_t num_dcc_mcaches;
+ dml_uint_t mcache_size_bytes;
+ dml_uint_t mcache_line_size_bytes;
+
+ struct {
+ dml_bool_t UseNewDCN401SOCParameters;
+ dml_uint_t df_qos_response_time_fclk_cycles;
+ dml_uint_t max_round_trip_to_furthest_cs_fclk_cycles;
+ dml_uint_t mall_overhead_fclk_cycles;
+ dml_uint_t meta_trip_adder_fclk_cycles;
+ dml_uint_t average_transport_distance_fclk_cycles;
+ dml_float_t umc_urgent_ramp_latency_margin;
+ dml_float_t umc_max_latency_margin;
+ dml_float_t umc_average_latency_margin;
+ dml_float_t fabric_max_transport_latency_margin;
+ dml_float_t fabric_average_transport_latency_margin;
+ } dml_dcn401_soc_qos_params;
+
};
struct ip_params_st {
@@ -515,6 +567,10 @@ struct dml_plane_cfg_st {
dml_uint_t CursorWidth[__DML_NUM_PLANES__];
dml_uint_t CursorBPP[__DML_NUM_PLANES__];
+ dml_bool_t setup_for_tdlut[__DML_NUM_PLANES__];
+ enum dml2_tdlut_addressing_mode tdlut_addressing_mode[__DML_NUM_PLANES__];
+ enum dml2_tdlut_width_mode tdlut_width_mode[__DML_NUM_PLANES__];
+
enum dml_use_mall_for_static_screen_mode UseMALLForStaticScreen[__DML_NUM_PLANES__];
enum dml_use_mall_for_pstate_change_mode UseMALLForPStateChange[__DML_NUM_PLANES__];
@@ -604,6 +660,17 @@ struct dml_hw_resource_st {
dml_float_t DLGRefClkFreqMHz; /// <brief DLG Global Reference timer
};
+/// @brief To control the clk usage for model programming
+struct dml_clk_cfg_st {
+ enum dml_clk_cfg_policy dcfclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
+ enum dml_clk_cfg_policy dispclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
+ enum dml_clk_cfg_policy dppclk_option[__DML_NUM_PLANES__];
+
+ dml_float_t dcfclk_mhz;
+ dml_float_t dispclk_mhz;
+ dml_float_t dppclk_mhz[__DML_NUM_PLANES__];
+}; // dml_clk_cfg_st
+
/// @brief DML display configuration.
/// Describe how to display a surface in multi-plane setup and output to different output and writeback using the specified timgin
struct dml_display_cfg_st {
@@ -616,19 +683,9 @@ struct dml_display_cfg_st {
unsigned int num_timings;
struct dml_hw_resource_st hw; //< brief for mode programming
+ struct dml_clk_cfg_st clk_overrides; //< brief for mode programming clk override
}; // dml_display_cfg_st
-/// @brief To control the clk usage for model programming
-struct dml_clk_cfg_st {
- enum dml_clk_cfg_policy dcfclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
- enum dml_clk_cfg_policy dispclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
- enum dml_clk_cfg_policy dppclk_option[__DML_NUM_PLANES__];
-
- dml_float_t dcfclk_freq_mhz;
- dml_float_t dispclk_freq_mhz;
- dml_float_t dppclk_freq_mhz[__DML_NUM_PLANES__];
-}; // dml_clk_cfg_st
-
/// @brief DML mode evaluation and programming policy
/// Those knobs that affect mode support and mode programming
struct dml_mode_eval_policy_st {
@@ -884,11 +941,11 @@ struct mode_support_st {
dml_uint_t meta_row_height[__DML_NUM_PLANES__];
dml_uint_t meta_row_height_chroma[__DML_NUM_PLANES__];
dml_float_t UrgLatency;
- dml_float_t UrgentBurstFactorCursor[__DML_NUM_PLANES__];
+ dml_float_t UrgentBurstFactorCursor[2][__DML_NUM_PLANES__];
dml_float_t UrgentBurstFactorCursorPre[__DML_NUM_PLANES__];
- dml_float_t UrgentBurstFactorLuma[__DML_NUM_PLANES__];
+ dml_float_t UrgentBurstFactorLuma[2][__DML_NUM_PLANES__];
dml_float_t UrgentBurstFactorLumaPre[__DML_NUM_PLANES__];
- dml_float_t UrgentBurstFactorChroma[__DML_NUM_PLANES__];
+ dml_float_t UrgentBurstFactorChroma[2][__DML_NUM_PLANES__];
dml_float_t UrgentBurstFactorChromaPre[__DML_NUM_PLANES__];
dml_float_t MaximumSwathWidthInLineBufferLuma;
dml_float_t MaximumSwathWidthInLineBufferChroma;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
index c247aee89caf..89890c88fd66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
@@ -690,12 +690,12 @@ __DML_DLL_EXPORT__ void dml_print_clk_cfg(const struct dml_clk_cfg_st *clk_cfg)
dml_print("DML: clk_cfg: dcfclk_option = %d\n", clk_cfg->dcfclk_option);
dml_print("DML: clk_cfg: dispclk_option = %d\n", clk_cfg->dispclk_option);
- dml_print("DML: clk_cfg: dcfclk_freq_mhz = %f\n", clk_cfg->dcfclk_freq_mhz);
- dml_print("DML: clk_cfg: dispclk_freq_mhz = %f\n", clk_cfg->dispclk_freq_mhz);
+ dml_print("DML: clk_cfg: dcfclk_mhz = %f\n", clk_cfg->dcfclk_mhz);
+ dml_print("DML: clk_cfg: dispclk_mhz = %f\n", clk_cfg->dispclk_mhz);
for (dml_uint_t i = 0; i < DCN_DML__NUM_PLANE; i++) {
dml_print("DML: clk_cfg: i=%d, dppclk_option = %d\n", i, clk_cfg->dppclk_option[i]);
- dml_print("DML: clk_cfg: i=%d, dppclk_freq_mhz = %f\n", i, clk_cfg->dppclk_freq_mhz[i]);
+ dml_print("DML: clk_cfg: i=%d, dppclk_mhz = %f\n", i, clk_cfg->dppclk_mhz[i]);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index c6a5a8614679..b9c6b45f6872 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -10,7 +10,6 @@
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
#include "bounding_boxes/dcn4_soc_bb.h"
-#include "bounding_boxes/dcn3_soc_bb.h"
static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
@@ -20,10 +19,6 @@ static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_
const struct dml2_soc_qos_parameters *qos_params;
switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
- soc_bb = &dml2_socbb_dcn31;
- qos_params = &dml_dcn31_soc_qos_params;
- break;
case DCN_VERSION_4_01:
default:
if (config->bb_from_dmub)
@@ -60,9 +55,6 @@ static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_ini
const struct dml2_ip_capabilities *ip_caps;
switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
- ip_caps = &dml2_dcn31_max_ip_caps;
- break;
case DCN_VERSION_4_01:
default:
ip_caps = &dml2_dcn401_max_ip_caps;
@@ -302,12 +294,17 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
(in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10;
- if (in_dc->ctx->dc_bios->vram_info.num_chans) {
+ if (dc_bw_params->num_channels) {
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
+ } else if (in_dc->ctx->dc_bios->vram_info.num_chans) {
dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans;
dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
}
- if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
+ if (dc_bw_params->dram_channel_width_bytes) {
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+ } else if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
}
@@ -721,11 +718,21 @@ static void populate_dml21_surface_config_from_plane_state(
surface->dcc.informative.fraction_of_zero_size_request_plane1 = plane_state->dcc.independent_64b_blks_c;
surface->dcc.plane0.pitch = plane_state->dcc.meta_pitch;
surface->dcc.plane1.pitch = plane_state->dcc.meta_pitch_c;
- if (in_dc->ctx->dce_version < DCN_VERSION_4_01) {
- /* needed for N-1 testing */
+
+ // Update swizzle / array mode based on the gfx_format
+ switch (plane_state->tiling_info.gfxversion) {
+ case DcGfxVersion7:
+ case DcGfxVersion8:
+ // Placeholder for programming the array_mode
+ break;
+ case DcGfxVersion9:
+ case DcGfxVersion10:
+ case DcGfxVersion11:
surface->tiling = gfx9_to_dml2_swizzle_mode(plane_state->tiling_info.gfx9.swizzle);
- } else {
+ break;
+ case DcGfxAddr3:
surface->tiling = gfx_addr3_to_dml2_swizzle_mode(plane_state->tiling_info.gfx_addr3.swizzle);
+ break;
}
}
@@ -1077,28 +1084,8 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0;
context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz;
context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
-}
-
-void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx)
-{
- struct dml2_core_internal_display_mode_lib *mode_lib = &in_ctx->v21.dml_init.dml2_instance->core_instance.clean_me_up.mode_lib;
- double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
-
- if (reg_set_idx >= DML2_DCHUB_WATERMARK_SET_NUM) {
- /* invalid register set index */
- return;
- }
-
- /* convert to legacy format (time in ns) */
- watermark->urgent_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->pte_meta_urgent_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.cstate_enter_plus_exit_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].sr_enter / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.cstate_exit_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].sr_exit / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.pstate_change_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].uclk_pstate / refclk_freq_in_mhz) * 1000.0;
- watermark->urgent_latency_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.fclk_pstate_change_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].fclk_pstate / refclk_freq_in_mhz) * 1000.0;
- watermark->frac_urg_bw_flip = in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].frac_urg_bw_flip;
- watermark->frac_urg_bw_nom = in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].frac_urg_bw_nom;
+ context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz;
+ context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
@@ -1144,53 +1131,6 @@ void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_se
}
}
-
-void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming)
-{
- unsigned int hactive, vactive, hblank_start, vblank_start, hblank_end, vblank_end;
- struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
-
- hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right + pipe_ctx->hblank_borrow;
- vactive = timing->v_addressable + timing->v_border_bottom + timing->v_border_top;
- hblank_start = pipe_ctx->stream->timing.h_total - pipe_ctx->stream->timing.h_front_porch;
- vblank_start = pipe_ctx->stream->timing.v_total - pipe_ctx->stream->timing.v_front_porch;
-
- hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right - pipe_ctx->hblank_borrow;
- vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
-
- if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
- /* phantom has its own global sync */
- global_sync = &stream_programming->phantom_stream.global_sync;
- }
-
- pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4x.vstartup_lines;
- pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4x.vupdate_offset_pixels;
- pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4x.vupdate_vupdate_width_pixels;
- pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4x.vready_offset_pixels;
- pipe_ctx->pipe_dlg_param.pstate_keepout = global_sync->dcn4x.pstate_keepout_start_lines;
-
- pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst;
-
- pipe_ctx->pipe_dlg_param.hactive = hactive;
- pipe_ctx->pipe_dlg_param.vactive = vactive;
- pipe_ctx->pipe_dlg_param.htotal = pipe_ctx->stream->timing.h_total;
- pipe_ctx->pipe_dlg_param.vtotal = pipe_ctx->stream->timing.v_total;
- pipe_ctx->pipe_dlg_param.hblank_end = hblank_end;
- pipe_ctx->pipe_dlg_param.vblank_end = vblank_end;
- pipe_ctx->pipe_dlg_param.hblank_start = hblank_start;
- pipe_ctx->pipe_dlg_param.vblank_start = vblank_start;
- pipe_ctx->pipe_dlg_param.vfront_porch = pipe_ctx->stream->timing.v_front_porch;
- pipe_ctx->pipe_dlg_param.pixel_rate_mhz = pipe_ctx->stream->timing.pix_clk_100hz / 10000.00;
- pipe_ctx->pipe_dlg_param.refresh_rate = ((timing->pix_clk_100hz * 100) / timing->h_total) / timing->v_total;
- pipe_ctx->pipe_dlg_param.vtotal_max = pipe_ctx->stream->adjust.v_total_max;
- pipe_ctx->pipe_dlg_param.vtotal_min = pipe_ctx->stream->adjust.v_total_min;
- pipe_ctx->pipe_dlg_param.recout_height = pipe_ctx->plane_res.scl_data.recout.height;
- pipe_ctx->pipe_dlg_param.recout_width = pipe_ctx->plane_res.scl_data.recout.width;
- pipe_ctx->pipe_dlg_param.full_recout_height = pipe_ctx->plane_res.scl_data.recout.height;
- pipe_ctx->pipe_dlg_param.full_recout_width = pipe_ctx->plane_res.scl_data.recout.width;
-}
-
void dml21_map_hw_resources(struct dml2_context *dml_ctx)
{
unsigned int i = 0;
@@ -1226,22 +1166,22 @@ void dml21_set_dc_p_state_type(
bool sub_vp_enabled)
{
switch (stream_programming->uclk_pstate_method) {
- case dml2_uclk_pstate_support_method_vactive:
- case dml2_uclk_pstate_support_method_fw_vactive_drr:
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
break;
- case dml2_uclk_pstate_support_method_vblank:
- case dml2_uclk_pstate_support_method_fw_vblank_drr:
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
if (sub_vp_enabled)
pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
else
pipe_ctx->p_state_type = P_STATE_V_BLANK;
break;
- case dml2_uclk_pstate_support_method_fw_subvp_phantom:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
pipe_ctx->p_state_type = P_STATE_SUB_VP;
break;
- case dml2_uclk_pstate_support_method_fw_drr:
+ case dml2_pstate_method_fw_drr:
if (sub_vp_enabled)
pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 476a7f6e4875..069b939c672a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -21,8 +21,6 @@ void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_
void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
-void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming);
-void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx);
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index 51d491bffa32..1e56d995cd0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -142,108 +142,21 @@ int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
return num_pipes;
}
-
-void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- struct pipe_ctx *out)
+void dml21_pipe_populate_global_sync(struct dml2_context *dml_ctx,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming)
{
- memset(&out->rq_regs, 0, sizeof(out->rq_regs));
- out->rq_regs.rq_regs_l.chunk_size = rq_regs->rq_regs_l.chunk_size;
- out->rq_regs.rq_regs_l.min_chunk_size = rq_regs->rq_regs_l.min_chunk_size;
- //out->rq_regs.rq_regs_l.meta_chunk_size = rq_regs->rq_regs_l.meta_chunk_size;
- //out->rq_regs.rq_regs_l.min_meta_chunk_size = rq_regs->rq_regs_l.min_meta_chunk_size;
- out->rq_regs.rq_regs_l.dpte_group_size = rq_regs->rq_regs_l.dpte_group_size;
- out->rq_regs.rq_regs_l.mpte_group_size = rq_regs->rq_regs_l.mpte_group_size;
- out->rq_regs.rq_regs_l.swath_height = rq_regs->rq_regs_l.swath_height;
- out->rq_regs.rq_regs_l.pte_row_height_linear = rq_regs->rq_regs_l.pte_row_height_linear;
-
- out->rq_regs.rq_regs_c.chunk_size = rq_regs->rq_regs_c.chunk_size;
- out->rq_regs.rq_regs_c.min_chunk_size = rq_regs->rq_regs_c.min_chunk_size;
- //out->rq_regs.rq_regs_c.meta_chunk_size = rq_regs->rq_regs_c.meta_chunk_size;
- //out->rq_regs.rq_regs_c.min_meta_chunk_size = rq_regs->rq_regs_c.min_meta_chunk_size;
- out->rq_regs.rq_regs_c.dpte_group_size = rq_regs->rq_regs_c.dpte_group_size;
- out->rq_regs.rq_regs_c.mpte_group_size = rq_regs->rq_regs_c.mpte_group_size;
- out->rq_regs.rq_regs_c.swath_height = rq_regs->rq_regs_c.swath_height;
- out->rq_regs.rq_regs_c.pte_row_height_linear = rq_regs->rq_regs_c.pte_row_height_linear;
-
- out->rq_regs.drq_expansion_mode = rq_regs->drq_expansion_mode;
- out->rq_regs.prq_expansion_mode = rq_regs->prq_expansion_mode;
- //out->rq_regs.mrq_expansion_mode = rq_regs->mrq_expansion_mode;
- out->rq_regs.crq_expansion_mode = rq_regs->crq_expansion_mode;
- out->rq_regs.plane1_base_address = rq_regs->plane1_base_address;
- out->unbounded_req = rq_regs->unbounded_request_enabled;
-
- memset(&out->dlg_regs, 0, sizeof(out->dlg_regs));
- out->dlg_regs.refcyc_h_blank_end = disp_dlg_regs->refcyc_h_blank_end;
- out->dlg_regs.dlg_vblank_end = disp_dlg_regs->dlg_vblank_end;
- out->dlg_regs.min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
- out->dlg_regs.refcyc_per_htotal = disp_dlg_regs->refcyc_per_htotal;
- out->dlg_regs.refcyc_x_after_scaler = disp_dlg_regs->refcyc_x_after_scaler;
- out->dlg_regs.dst_y_after_scaler = disp_dlg_regs->dst_y_after_scaler;
- out->dlg_regs.dst_y_prefetch = disp_dlg_regs->dst_y_prefetch;
- out->dlg_regs.dst_y_per_vm_vblank = disp_dlg_regs->dst_y_per_vm_vblank;
- out->dlg_regs.dst_y_per_row_vblank = disp_dlg_regs->dst_y_per_row_vblank;
- out->dlg_regs.dst_y_per_vm_flip = disp_dlg_regs->dst_y_per_vm_flip;
- out->dlg_regs.dst_y_per_row_flip = disp_dlg_regs->dst_y_per_row_flip;
- out->dlg_regs.ref_freq_to_pix_freq = disp_dlg_regs->ref_freq_to_pix_freq;
- out->dlg_regs.vratio_prefetch = disp_dlg_regs->vratio_prefetch;
- out->dlg_regs.vratio_prefetch_c = disp_dlg_regs->vratio_prefetch_c;
- out->dlg_regs.refcyc_per_tdlut_group = disp_dlg_regs->refcyc_per_tdlut_group;
- out->dlg_regs.refcyc_per_pte_group_vblank_l = disp_dlg_regs->refcyc_per_pte_group_vblank_l;
- out->dlg_regs.refcyc_per_pte_group_vblank_c = disp_dlg_regs->refcyc_per_pte_group_vblank_c;
- //out->dlg_regs.refcyc_per_meta_chunk_vblank_l = disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;
- //out->dlg_regs.refcyc_per_meta_chunk_vblank_c = disp_dlg_regs->refcyc_per_meta_chunk_vblank_c;
- out->dlg_regs.refcyc_per_pte_group_flip_l = disp_dlg_regs->refcyc_per_pte_group_flip_l;
- out->dlg_regs.refcyc_per_pte_group_flip_c = disp_dlg_regs->refcyc_per_pte_group_flip_c;
- //out->dlg_regs.refcyc_per_meta_chunk_flip_l = disp_dlg_regs->refcyc_per_meta_chunk_flip_l;
- //out->dlg_regs.refcyc_per_meta_chunk_flip_c = disp_dlg_regs->refcyc_per_meta_chunk_flip_c;
- out->dlg_regs.dst_y_per_pte_row_nom_l = disp_dlg_regs->dst_y_per_pte_row_nom_l;
- out->dlg_regs.dst_y_per_pte_row_nom_c = disp_dlg_regs->dst_y_per_pte_row_nom_c;
- out->dlg_regs.refcyc_per_pte_group_nom_l = disp_dlg_regs->refcyc_per_pte_group_nom_l;
- out->dlg_regs.refcyc_per_pte_group_nom_c = disp_dlg_regs->refcyc_per_pte_group_nom_c;
- //out->dlg_regs.dst_y_per_meta_row_nom_l = disp_dlg_regs->dst_y_per_meta_row_nom_l;
- //out->dlg_regs.dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_c;
- //out->dlg_regs.refcyc_per_meta_chunk_nom_l = disp_dlg_regs->refcyc_per_meta_chunk_nom_l;
- //out->dlg_regs.refcyc_per_meta_chunk_nom_c = disp_dlg_regs->refcyc_per_meta_chunk_nom_c;
- out->dlg_regs.refcyc_per_line_delivery_pre_l = disp_dlg_regs->refcyc_per_line_delivery_pre_l;
- out->dlg_regs.refcyc_per_line_delivery_pre_c = disp_dlg_regs->refcyc_per_line_delivery_pre_c;
- out->dlg_regs.refcyc_per_line_delivery_l = disp_dlg_regs->refcyc_per_line_delivery_l;
- out->dlg_regs.refcyc_per_line_delivery_c = disp_dlg_regs->refcyc_per_line_delivery_c;
- out->dlg_regs.refcyc_per_vm_group_vblank = disp_dlg_regs->refcyc_per_vm_group_vblank;
- out->dlg_regs.refcyc_per_vm_group_flip = disp_dlg_regs->refcyc_per_vm_group_flip;
- out->dlg_regs.refcyc_per_vm_req_vblank = disp_dlg_regs->refcyc_per_vm_req_vblank;
- out->dlg_regs.refcyc_per_vm_req_flip = disp_dlg_regs->refcyc_per_vm_req_flip;
- out->dlg_regs.dst_y_offset_cur0 = disp_dlg_regs->dst_y_offset_cur0;
- out->dlg_regs.chunk_hdl_adjust_cur0 = disp_dlg_regs->chunk_hdl_adjust_cur0;
- //out->dlg_regs.dst_y_offset_cur1 = disp_dlg_regs->dst_y_offset_cur1;
- //out->dlg_regs.chunk_hdl_adjust_cur1 = disp_dlg_regs->chunk_hdl_adjust_cur1;
- out->dlg_regs.vready_after_vcount0 = disp_dlg_regs->vready_after_vcount0;
- out->dlg_regs.dst_y_delta_drq_limit = disp_dlg_regs->dst_y_delta_drq_limit;
- out->dlg_regs.refcyc_per_vm_dmdata = disp_dlg_regs->refcyc_per_vm_dmdata;
- out->dlg_regs.dmdata_dl_delta = disp_dlg_regs->dmdata_dl_delta;
-
- memset(&out->ttu_regs, 0, sizeof(out->ttu_regs));
- out->ttu_regs.qos_level_low_wm = disp_ttu_regs->qos_level_low_wm;
- out->ttu_regs.qos_level_high_wm = disp_ttu_regs->qos_level_high_wm;
- out->ttu_regs.min_ttu_vblank = disp_ttu_regs->min_ttu_vblank;
- out->ttu_regs.qos_level_flip = disp_ttu_regs->qos_level_flip;
- out->ttu_regs.refcyc_per_req_delivery_l = disp_ttu_regs->refcyc_per_req_delivery_l;
- out->ttu_regs.refcyc_per_req_delivery_c = disp_ttu_regs->refcyc_per_req_delivery_c;
- out->ttu_regs.refcyc_per_req_delivery_cur0 = disp_ttu_regs->refcyc_per_req_delivery_cur0;
- //out->ttu_regs.refcyc_per_req_delivery_cur1 = disp_ttu_regs->refcyc_per_req_delivery_cur1;
- out->ttu_regs.refcyc_per_req_delivery_pre_l = disp_ttu_regs->refcyc_per_req_delivery_pre_l;
- out->ttu_regs.refcyc_per_req_delivery_pre_c = disp_ttu_regs->refcyc_per_req_delivery_pre_c;
- out->ttu_regs.refcyc_per_req_delivery_pre_cur0 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur0;
- //out->ttu_regs.refcyc_per_req_delivery_pre_cur1 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur1;
- out->ttu_regs.qos_level_fixed_l = disp_ttu_regs->qos_level_fixed_l;
- out->ttu_regs.qos_level_fixed_c = disp_ttu_regs->qos_level_fixed_c;
- out->ttu_regs.qos_level_fixed_cur0 = disp_ttu_regs->qos_level_fixed_cur0;
- //out->ttu_regs.qos_level_fixed_cur1 = disp_ttu_regs->qos_level_fixed_cur1;
- out->ttu_regs.qos_ramp_disable_l = disp_ttu_regs->qos_ramp_disable_l;
- out->ttu_regs.qos_ramp_disable_c = disp_ttu_regs->qos_ramp_disable_c;
- out->ttu_regs.qos_ramp_disable_cur0 = disp_ttu_regs->qos_ramp_disable_cur0;
- //out->ttu_regs.qos_ramp_disable_cur1 = disp_ttu_regs->qos_ramp_disable_cur1;
+ union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
+
+ if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
+ /* phantom has its own global sync */
+ global_sync = &stream_programming->phantom_stream.global_sync;
+ }
+
+ memcpy(&pipe_ctx->global_sync,
+ global_sync,
+ sizeof(union dml2_global_sync_programming));
}
void dml21_populate_mall_allocation_size(struct dc_state *context,
@@ -301,28 +214,16 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex
{
unsigned int pipe_reg_index = 0;
- dml21_populate_pipe_ctx_dlg_params(dml_ctx, context, pipe_ctx, stream_prog);
+ dml21_pipe_populate_global_sync(dml_ctx, context, pipe_ctx, stream_prog);
find_pipe_regs_idx(dml_ctx, pipe_ctx, &pipe_reg_index);
if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
memcpy(&pipe_ctx->hubp_regs, pln_prog->phantom_plane.pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = false;
-
- /* legacy only, should be removed later */
- dml21_update_pipe_ctx_dchub_regs(&pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->rq_regs,
- &pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->dlg_regs,
- &pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
-
pipe_ctx->det_buffer_size_kb = 0;
} else {
memcpy(&pipe_ctx->hubp_regs, pln_prog->pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = pln_prog->pipe_regs[pipe_reg_index]->rq_regs.unbounded_request_enabled;
-
- /* legacy only, should be removed later */
- dml21_update_pipe_ctx_dchub_regs(&pln_prog->pipe_regs[pipe_reg_index]->rq_regs,
- &pln_prog->pipe_regs[pipe_reg_index]->dlg_regs,
- &pln_prog->pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
-
pipe_ctx->det_buffer_size_kb = pln_prog->pipe_regs[pipe_reg_index]->det_size * 64;
}
@@ -482,7 +383,8 @@ void dml21_build_fams2_programming(const struct dc *dc,
unsigned int num_fams2_streams = 0;
/* reset fams2 data */
- memset(&context->bw_ctx.bw.dcn.fams2_stream_params, 0, sizeof(struct dmub_fams2_stream_static_state) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
if (dml_ctx->v21.mode_programming.programming->fams2_required) {
@@ -490,8 +392,10 @@ void dml21_build_fams2_programming(const struct dc *dc,
int dml_stream_idx;
struct dc_stream_state *phantom_stream;
struct dc_stream_status *phantom_status;
+ enum fams2_stream_type type = 0;
- struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[num_fams2_streams];
+ union dmub_cmd_fams2_config *static_base_state = &context->bw_ctx.bw.dcn.fams2_stream_base_params[num_fams2_streams];
+ union dmub_cmd_fams2_config *static_sub_state = &context->bw_ctx.bw.dcn.fams2_stream_sub_params[num_fams2_streams];
struct dc_stream_state *stream = context->streams[i];
@@ -508,28 +412,38 @@ void dml21_build_fams2_programming(const struct dc *dc,
}
/* copy static state from PMO */
- memcpy(static_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params,
- sizeof(struct dmub_fams2_stream_static_state));
-
- /* get information from context */
- static_state->num_planes = context->stream_status[i].plane_count;
- static_state->otg_inst = context->stream_status[i].primary_otg_inst;
-
- /* populate pipe masks for planes */
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- for (k = 0; k < dc->res_pool->pipe_count; k++) {
- if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
- static_state->pipe_mask |= (1 << k);
- static_state->plane_pipe_masks[j] |= (1 << k);
+ memcpy(static_base_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params,
+ sizeof(union dmub_cmd_fams2_config));
+ memcpy(static_sub_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
+ sizeof(union dmub_cmd_fams2_config));
+
+ switch (dc->debug.fams_version.minor) {
+ case 1:
+ default:
+ type = static_base_state->stream_v1.base.type;
+
+ /* get information from context */
+ static_base_state->stream_v1.base.num_planes = context->stream_status[i].plane_count;
+ static_base_state->stream_v1.base.otg_inst = context->stream_status[i].primary_otg_inst;
+
+ /* populate pipe masks for planes */
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (context->res_ctx.pipe_ctx[k].stream &&
+ context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
+ static_base_state->stream_v1.base.pipe_mask |= (1 << k);
+ static_base_state->stream_v1.base.plane_pipe_masks[j] |= (1 << k);
+ }
}
}
}
+
/* get per method programming */
- switch (static_state->type) {
+ switch (type) {
case FAMS2_STREAM_TYPE_VBLANK:
case FAMS2_STREAM_TYPE_VACTIVE:
case FAMS2_STREAM_TYPE_DRR:
@@ -543,16 +457,27 @@ void dml21_build_fams2_programming(const struct dc *dc,
/* phantom status should always be present */
ASSERT(phantom_status);
- static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+ if (!phantom_status)
+ break;
- /* populate pipe masks for phantom planes */
- for (j = 0; j < phantom_status->plane_count; j++) {
- for (k = 0; k < dc->res_pool->pipe_count; k++) {
- if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
- static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k);
- static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ switch (dc->debug.fams_version.minor) {
+ case 1:
+ default:
+ static_sub_state->stream_v1.sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+
+ /* populate pipe masks for phantom planes */
+ for (j = 0; j < phantom_status->plane_count; j++) {
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (context->res_ctx.pipe_ctx[k].stream &&
+ context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
+ switch (dc->debug.fams_version.minor) {
+ case 1:
+ default:
+ static_sub_state->stream_v1.sub_state.subvp.phantom_pipe_mask |= (1 << k);
+ static_sub_state->stream_v1.sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ }
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
index d5153fbac921..4bff52eaaef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
@@ -18,10 +18,10 @@ struct dml2_display_ttu_regs;
int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
int dml21_find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int plane_id);
bool dml21_get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, unsigned int *plane_id);
-void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- struct pipe_ctx *out);
+void dml21_pipe_populate_global_sync(struct dml2_context *dml_ctx,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming);
void dml21_populate_mall_allocation_size(struct dc_state *context,
struct dml2_context *in_ctx,
struct dml2_per_plane_programming *pln_prog,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index bbc28b9a15a3..fb80ba9287b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -75,7 +75,6 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
{
switch (in_dc->ctx->dce_version) {
case DCN_VERSION_4_01:
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
(*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
default:
@@ -233,13 +232,6 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml21_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml_ctx, in_dc->res_pool->pipe_count);
dml21_copy_clocks_to_dc_state(dml_ctx, context);
dml21_extract_watermark_sets(in_dc, &context->bw_ctx.bw.dcn.watermarks, dml_ctx);
- if (in_dc->ctx->dce_version == DCN_VERSION_3_2) {
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.a, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.b, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.c, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.d, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- }
-
dml21_build_fams2_programming(in_dc, context, dml_ctx);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
deleted file mode 100644
index d82c681a5402..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * Copyright 2022 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DML_DML_DCN3_SOC_BB__
-#define __DML_DML_DCN3_SOC_BB__
-
-#include "dml_top_soc_parameter_types.h"
-
-static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = {
- .derate_table = {
- .system_active_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .system_active_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .dcn_mall_prefetch_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .dcn_mall_prefetch_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .system_idle_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 100,
- },
- },
- .writeback = {
- .base_latency_us = 12,
- .scaling_factor_us = 0,
- .scaling_factor_mhz = 0,
- },
- .qos_params = {
- .dcn4x = {
- .df_qos_response_time_fclk_cycles = 300,
- .max_round_trip_to_furthest_cs_fclk_cycles = 350,
- .mall_overhead_fclk_cycles = 50,
- .meta_trip_adder_fclk_cycles = 36,
- .average_transport_distance_fclk_cycles = 257,
- .umc_urgent_ramp_latency_margin = 50,
- .umc_max_latency_margin = 30,
- .umc_average_latency_margin = 20,
- .fabric_max_transport_latency_margin = 20,
- .fabric_average_transport_latency_margin = 10,
-
- .per_uclk_dpm_params = {
- {
- .minimum_uclk_khz = 97,
- .urgent_ramp_uclk_cycles = 472,
- .trip_to_memory_uclk_cycles = 827,
- .meta_trip_to_memory_uclk_cycles = 827,
- .maximum_latency_when_urgent_uclk_cycles = 72,
- .average_latency_when_urgent_uclk_cycles = 61,
- .maximum_latency_when_non_urgent_uclk_cycles = 827,
- .average_latency_when_non_urgent_uclk_cycles = 118,
- },
- {
- .minimum_uclk_khz = 435,
- .urgent_ramp_uclk_cycles = 546,
- .trip_to_memory_uclk_cycles = 848,
- .meta_trip_to_memory_uclk_cycles = 848,
- .maximum_latency_when_urgent_uclk_cycles = 146,
- .average_latency_when_urgent_uclk_cycles = 90,
- .maximum_latency_when_non_urgent_uclk_cycles = 848,
- .average_latency_when_non_urgent_uclk_cycles = 135,
- },
- {
- .minimum_uclk_khz = 731,
- .urgent_ramp_uclk_cycles = 632,
- .trip_to_memory_uclk_cycles = 874,
- .meta_trip_to_memory_uclk_cycles = 874,
- .maximum_latency_when_urgent_uclk_cycles = 232,
- .average_latency_when_urgent_uclk_cycles = 124,
- .maximum_latency_when_non_urgent_uclk_cycles = 874,
- .average_latency_when_non_urgent_uclk_cycles = 155,
- },
- {
- .minimum_uclk_khz = 1187,
- .urgent_ramp_uclk_cycles = 716,
- .trip_to_memory_uclk_cycles = 902,
- .meta_trip_to_memory_uclk_cycles = 902,
- .maximum_latency_when_urgent_uclk_cycles = 316,
- .average_latency_when_urgent_uclk_cycles = 160,
- .maximum_latency_when_non_urgent_uclk_cycles = 902,
- .average_latency_when_non_urgent_uclk_cycles = 177,
- },
- },
- },
- },
- .qos_type = dml2_qos_param_type_dcn4x,
-};
-
-static const struct dml2_soc_bb dml2_socbb_dcn31 = {
- .clk_table = {
- .uclk = {
- .clk_values_khz = {97000, 435000, 731000, 1187000},
- .num_clk_values = 4,
- },
- .fclk = {
- .clk_values_khz = {300000, 2500000},
- .num_clk_values = 2,
- },
- .dcfclk = {
- .clk_values_khz = {200000, 1800000},
- .num_clk_values = 2,
- },
- .dispclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .dppclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .dtbclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .phyclk = {
- .clk_values_khz = {810000, 810000},
- .num_clk_values = 2,
- },
- .socclk = {
- .clk_values_khz = {300000, 1600000},
- .num_clk_values = 2,
- },
- .dscclk = {
- .clk_values_khz = {666667, 666667},
- .num_clk_values = 2,
- },
- .phyclk_d18 = {
- .clk_values_khz = {625000, 625000},
- .num_clk_values = 2,
- },
- .phyclk_d32 = {
- .clk_values_khz = {2000000, 2000000},
- .num_clk_values = 2,
- },
- .dram_config = {
- .channel_width_bytes = 2,
- .channel_count = 16,
- .transactions_per_clock = 16,
- },
- },
-
- .qos_parameters = {
- .derate_table = {
- .system_active_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .system_active_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .dcn_mall_prefetch_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .dcn_mall_prefetch_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .system_idle_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 100,
- },
- },
- .writeback = {
- .base_latency_us = 0,
- .scaling_factor_us = 0,
- .scaling_factor_mhz = 0,
- },
- .qos_params = {
- .dcn4x = {
- .df_qos_response_time_fclk_cycles = 300,
- .max_round_trip_to_furthest_cs_fclk_cycles = 350,
- .mall_overhead_fclk_cycles = 50,
- .meta_trip_adder_fclk_cycles = 36,
- .average_transport_distance_fclk_cycles = 260,
- .umc_urgent_ramp_latency_margin = 50,
- .umc_max_latency_margin = 30,
- .umc_average_latency_margin = 20,
- .fabric_max_transport_latency_margin = 20,
- .fabric_average_transport_latency_margin = 10,
-
- .per_uclk_dpm_params = {
- {
- // State 1
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 472,
- .trip_to_memory_uclk_cycles = 827,
- .meta_trip_to_memory_uclk_cycles = 827,
- .maximum_latency_when_urgent_uclk_cycles = 72,
- .average_latency_when_urgent_uclk_cycles = 72,
- .maximum_latency_when_non_urgent_uclk_cycles = 827,
- .average_latency_when_non_urgent_uclk_cycles = 117,
- },
- {
- // State 2
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 546,
- .trip_to_memory_uclk_cycles = 848,
- .meta_trip_to_memory_uclk_cycles = 848,
- .maximum_latency_when_urgent_uclk_cycles = 146,
- .average_latency_when_urgent_uclk_cycles = 146,
- .maximum_latency_when_non_urgent_uclk_cycles = 848,
- .average_latency_when_non_urgent_uclk_cycles = 133,
- },
- {
- // State 3
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 564,
- .trip_to_memory_uclk_cycles = 853,
- .meta_trip_to_memory_uclk_cycles = 853,
- .maximum_latency_when_urgent_uclk_cycles = 164,
- .average_latency_when_urgent_uclk_cycles = 164,
- .maximum_latency_when_non_urgent_uclk_cycles = 853,
- .average_latency_when_non_urgent_uclk_cycles = 136,
- },
- {
- // State 4
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 613,
- .trip_to_memory_uclk_cycles = 869,
- .meta_trip_to_memory_uclk_cycles = 869,
- .maximum_latency_when_urgent_uclk_cycles = 213,
- .average_latency_when_urgent_uclk_cycles = 213,
- .maximum_latency_when_non_urgent_uclk_cycles = 869,
- .average_latency_when_non_urgent_uclk_cycles = 149,
- },
- {
- // State 5
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 632,
- .trip_to_memory_uclk_cycles = 874,
- .meta_trip_to_memory_uclk_cycles = 874,
- .maximum_latency_when_urgent_uclk_cycles = 232,
- .average_latency_when_urgent_uclk_cycles = 232,
- .maximum_latency_when_non_urgent_uclk_cycles = 874,
- .average_latency_when_non_urgent_uclk_cycles = 153,
- },
- {
- // State 6
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 665,
- .trip_to_memory_uclk_cycles = 885,
- .meta_trip_to_memory_uclk_cycles = 885,
- .maximum_latency_when_urgent_uclk_cycles = 265,
- .average_latency_when_urgent_uclk_cycles = 265,
- .maximum_latency_when_non_urgent_uclk_cycles = 885,
- .average_latency_when_non_urgent_uclk_cycles = 161,
- },
- {
- // State 7
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 689,
- .trip_to_memory_uclk_cycles = 895,
- .meta_trip_to_memory_uclk_cycles = 895,
- .maximum_latency_when_urgent_uclk_cycles = 289,
- .average_latency_when_urgent_uclk_cycles = 289,
- .maximum_latency_when_non_urgent_uclk_cycles = 895,
- .average_latency_when_non_urgent_uclk_cycles = 167,
- },
- {
- // State 8
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 716,
- .trip_to_memory_uclk_cycles = 902,
- .meta_trip_to_memory_uclk_cycles = 902,
- .maximum_latency_when_urgent_uclk_cycles = 316,
- .average_latency_when_urgent_uclk_cycles = 316,
- .maximum_latency_when_non_urgent_uclk_cycles = 902,
- .average_latency_when_non_urgent_uclk_cycles = 174,
- },
- },
- },
- },
- .qos_type = dml2_qos_param_type_dcn4x,
- },
-
- .power_management_parameters = {
- .dram_clk_change_blackout_us = 400,
- .fclk_change_blackout_us = 0,
- .g7_ppt_blackout_us = 0,
- .stutter_enter_plus_exit_latency_us = 50,
- .stutter_exit_latency_us = 43,
- .z8_stutter_enter_plus_exit_latency_us = 0,
- .z8_stutter_exit_latency_us = 0,
- },
-
- .vmin_limit = {
- .dispclk_khz = 600 * 1000,
- },
-
- .dprefclk_mhz = 700,
- .xtalclk_mhz = 100,
- .pcie_refclk_mhz = 100,
- .dchub_refclk_mhz = 50,
- .mall_allocated_for_dcn_mbytes = 64,
- .max_outstanding_reqs = 512,
- .fabric_datapath_to_dcn_data_return_bytes = 64,
- .return_bus_width_bytes = 64,
- .hostvm_min_page_size_kbytes = 0,
- .gpuvm_min_page_size_kbytes = 256,
- .phy_downspread_percent = 0,
- .dcn_downspread_percent = 0,
- .dispclk_dppclk_vco_speed_mhz = 4500,
- .do_urgent_latency_adjustment = 0,
- .mem_word_bytes = 32,
- .num_dcc_mcaches = 8,
- .mcache_size_bytes = 2048,
- .mcache_line_size_bytes = 32,
- .max_fclk_for_uclk_dpm_khz = 1250 * 1000,
-};
-
-static const struct dml2_ip_capabilities dml2_dcn31_max_ip_caps = {
- .pipe_count = 4,
- .otg_count = 4,
- .num_dsc = 4,
- .max_num_dp2p0_streams = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_dp2p0_outputs = 4,
- .rob_buffer_size_kbytes = 192,
- .config_return_buffer_size_in_kbytes = 1152,
- .meta_fifo_size_in_kentries = 22,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .subvp_drr_scheduling_margin_us = 100,
- .subvp_prefetch_end_to_mall_start_us = 15,
- .subvp_fw_processing_delay = 15,
-
- .fams2 = {
- .max_allow_delay_us = 100 * 1000,
- .scheduling_delay_us = 50,
- .vertical_interrupt_ack_delay_us = 18,
- .allow_programming_delay_us = 18,
- .min_allow_width_us = 20,
- .subvp_df_throttle_delay_us = 100,
- .subvp_programming_delay_us = 18,
- .subvp_prefetch_to_mall_delay_us = 18,
- .drr_programming_delay_us = 18,
- },
-};
-
-#endif /* __DML_DML_DCN3_SOC_BB__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index 8ef7977841de..793e1c038efd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -344,6 +344,7 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.config_return_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 22,
.compressed_buffer_segment_size_in_kbytes = 64,
+ .cursor_buffer_size = 24,
.max_flip_time_us = 80,
.max_flip_time_lines = 32,
.hostvm_mode = 0,
@@ -354,7 +355,7 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.fams2 = {
.max_allow_delay_us = 100 * 1000,
- .scheduling_delay_us = 125,
+ .scheduling_delay_us = 550,
.vertical_interrupt_ack_delay_us = 40,
.allow_programming_delay_us = 18,
.min_allow_width_us = 20,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index b132f676a68d..5e1ab6d97640 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -10,9 +10,10 @@
#define DML2_MAX_PLANES 8
#define DML2_MAX_DCN_PIPES 8
#define DML2_MAX_MCACHES 8 // assume plane is going to be supported by a max of 8 mcaches
+#define DML2_MAX_WRITEBACK 3
enum dml2_swizzle_mode {
- dml2_sw_linear,
+ dml2_sw_linear, // SW_LINEAR accepts 256 byte aligned pitch and also 128 byte aligned pitch if DCC is not enabled
dml2_sw_256b_2d,
dml2_sw_4kb_2d,
dml2_sw_64kb_2d,
@@ -24,7 +25,8 @@ enum dml2_swizzle_mode {
dml2_gfx11_sw_64kb_d_x,
dml2_gfx11_sw_64kb_r_x,
dml2_gfx11_sw_256kb_d_x,
- dml2_gfx11_sw_256kb_r_x
+ dml2_gfx11_sw_256kb_r_x,
+
};
enum dml2_source_format_class {
@@ -38,7 +40,13 @@ enum dml2_source_format_class {
dml2_rgbe_alpha = 9,
dml2_rgbe = 10,
dml2_mono_8 = 11,
- dml2_mono_16 = 12
+ dml2_mono_16 = 12,
+ dml2_422_planar_8 = 13,
+ dml2_422_planar_10 = 14,
+ dml2_422_planar_12 = 15,
+ dml2_422_packed_8 = 16,
+ dml2_422_packed_10 = 17,
+ dml2_422_packed_12 = 18
};
enum dml2_rotation_angle {
@@ -121,15 +129,6 @@ enum dml2_dsc_enable_option {
dml2_dsc_enable_if_necessary = 2
};
-enum dml2_pstate_support_method {
- dml2_pstate_method_uninitialized,
- dml2_pstate_method_not_supported,
- dml2_pstate_method_vactive,
- dml2_pstate_method_vblank,
- dml2_pstate_method_svp,
- dml2_pstate_method_drr
-};
-
enum dml2_tdlut_addressing_mode {
dml2_tdlut_sw_linear = 0,
dml2_tdlut_simple_linear = 1
@@ -287,22 +286,23 @@ struct dml2_link_output_cfg {
bool validate_output; // Do not validate the link configuration for this display stream.
};
-struct dml2_writeback_cfg {
- bool enable;
+struct dml2_writeback_info {
enum dml2_source_format_class pixel_format;
- unsigned int active_writebacks_per_surface;
+ unsigned long input_width;
+ unsigned long input_height;
+ unsigned long output_width;
+ unsigned long output_height;
+ unsigned long v_taps;
+ unsigned long h_taps;
+ unsigned long v_taps_chroma;
+ unsigned long h_taps_chroma;
+ double h_ratio;
+ double v_ratio;
+};
- struct {
- bool enabled;
- unsigned long input_width;
- unsigned long input_height;
- unsigned long output_width;
- unsigned long output_height;
- unsigned long v_taps;
- unsigned long h_taps;
- double h_ratio;
- double v_ratio;
- } scaling_info;
+struct dml2_writeback_cfg {
+ unsigned int active_writebacks_per_stream;
+ struct dml2_writeback_info writeback_stream[DML2_MAX_WRITEBACK];
};
struct dml2_plane_parameters {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index ebd8abe894a9..5f0bc42d1d2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -167,11 +167,13 @@ struct dml2_ip_capabilities {
unsigned int max_num_dp2p0_streams;
unsigned int max_num_hdmi_frl_outputs;
unsigned int max_num_dp2p0_outputs;
+ unsigned int max_num_wb;
unsigned int rob_buffer_size_kbytes;
unsigned int config_return_buffer_size_in_kbytes;
unsigned int config_return_buffer_segment_size_in_kbytes;
unsigned int meta_fifo_size_in_kentries;
unsigned int compressed_buffer_segment_size_in_kbytes;
+ unsigned int cursor_buffer_size;
unsigned int max_flip_time_us;
unsigned int max_flip_time_lines;
unsigned int hostvm_mode;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index eeb96c455658..d2d053f2354d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -26,20 +26,14 @@ enum dml2_project_id {
dml2_project_dcn4x_stage2_auto_drr_svp = 3,
};
-enum dml2_dram_clock_change_support {
- dml2_dram_clock_change_vactive = 0,
- dml2_dram_clock_change_vblank = 1,
- dml2_dram_clock_change_vblank_and_vactive = 2,
- dml2_dram_clock_change_drr = 3,
- dml2_dram_clock_change_mall_svp = 4,
- dml2_dram_clock_change_mall_full_frame = 6,
- dml2_dram_clock_change_unsupported = 7
-};
-
-enum dml2_fclock_change_support {
- dml2_fclock_change_vactive = 0,
- dml2_fclock_change_vblank = 1,
- dml2_fclock_change_unsupported = 2
+enum dml2_pstate_change_support {
+ dml2_pstate_change_vactive = 0,
+ dml2_pstate_change_vblank = 1,
+ dml2_pstate_change_vblank_and_vactive = 2,
+ dml2_pstate_change_drr = 3,
+ dml2_pstate_change_mall_svp = 4,
+ dml2_pstate_change_mall_full_frame = 6,
+ dml2_pstate_change_unsupported = 7
};
enum dml2_output_type_and_rate__type {
@@ -202,24 +196,23 @@ struct dml2_mcache_surface_allocation {
} informative;
};
-enum dml2_uclk_pstate_support_method {
- dml2_uclk_pstate_support_method_not_supported = 0,
- /* hw */
- dml2_uclk_pstate_support_method_vactive = 1,
- dml2_uclk_pstate_support_method_vblank = 2,
- dml2_uclk_pstate_support_method_reserved_hw = 5,
- /* fw */
- dml2_uclk_pstate_support_method_fw_subvp_phantom = 6,
- dml2_uclk_pstate_support_method_reserved_fw = 10,
- /* fw w/drr */
- dml2_uclk_pstate_support_method_fw_vactive_drr = 11,
- dml2_uclk_pstate_support_method_fw_vblank_drr = 12,
- dml2_uclk_pstate_support_method_fw_subvp_phantom_drr = 13,
- dml2_uclk_pstate_support_method_reserved_fw_drr_fixed = 20,
- dml2_uclk_pstate_support_method_fw_drr = 21,
- dml2_uclk_pstate_support_method_reserved_fw_drr_var = 22,
-
- dml2_uclk_pstate_support_method_count
+enum dml2_pstate_method {
+ dml2_pstate_method_na = 0,
+ /* hw exclusive modes */
+ dml2_pstate_method_vactive = 1,
+ dml2_pstate_method_vblank = 2,
+ dml2_pstate_method_reserved_hw = 5,
+ /* fw assisted exclusive modes */
+ dml2_pstate_method_fw_svp = 6,
+ dml2_pstate_method_reserved_fw = 10,
+ /* fw assisted modes requiring drr modulation */
+ dml2_pstate_method_fw_vactive_drr = 11,
+ dml2_pstate_method_fw_vblank_drr = 12,
+ dml2_pstate_method_fw_svp_drr = 13,
+ dml2_pstate_method_reserved_fw_drr_clamped = 20,
+ dml2_pstate_method_fw_drr = 21,
+ dml2_pstate_method_reserved_fw_drr_var = 22,
+ dml2_pstate_method_count
};
struct dml2_per_plane_programming {
@@ -241,7 +234,7 @@ struct dml2_per_plane_programming {
// If a stream is using odm split, then this value is always 1
unsigned int num_dpps_required;
- enum dml2_uclk_pstate_support_method uclk_pstate_support_method;
+ enum dml2_pstate_method uclk_pstate_support_method;
// MALL size requirements for MALL SS and SubVP
unsigned int surface_size_mall_bytes;
@@ -281,7 +274,7 @@ struct dml2_per_stream_programming {
unsigned int num_odms_required;
- enum dml2_uclk_pstate_support_method uclk_pstate_method;
+ enum dml2_pstate_method uclk_pstate_method;
struct {
bool enabled;
@@ -289,7 +282,8 @@ struct dml2_per_stream_programming {
union dml2_global_sync_programming global_sync;
} phantom_stream;
- struct dmub_fams2_stream_static_state fams2_params;
+ union dmub_cmd_fams2_config fams2_base_params;
+ union dmub_cmd_fams2_config fams2_sub_params;
};
//-----------------
@@ -339,7 +333,7 @@ struct dml2_mode_support_info {
bool DCCMetaBufferSizeNotExceeded;
bool TotalVerticalActiveBandwidthSupport;
bool VActiveBandwidthSupport;
- enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
bool USRRetrainingSupport;
bool PrefetchSupported;
bool DynamicMetadataSupported;
@@ -361,6 +355,7 @@ struct dml2_mode_support_info {
unsigned int AlignedYPitch[DML2_MAX_PLANES];
unsigned int AlignedCPitch[DML2_MAX_PLANES];
bool g6_temp_read_support;
+ bool temp_read_or_ppt_support;
}; // dml2_mode_support_info
struct dml2_display_cfg_programming {
@@ -392,6 +387,11 @@ struct dml2_display_cfg_programming {
unsigned long fclk_khz;
unsigned long dcfclk_khz;
} svp_prefetch;
+ struct {
+ unsigned long uclk_khz;
+ unsigned long fclk_khz;
+ unsigned long dcfclk_khz;
+ } svp_prefetch_no_throttle;
unsigned long deepsleep_dcfclk_khz;
unsigned long dispclk_khz;
@@ -444,7 +444,7 @@ struct dml2_display_cfg_programming {
double pstate_change_us;
double fclk_pstate_change_us;
double usr_retraining_us;
- double g6_temp_read_watermark_us;
+ double temp_read_or_ppt_watermark_us;
} watermarks;
struct {
@@ -653,6 +653,7 @@ struct dml2_display_cfg_programming {
double DisplayPipeLineDeliveryTimeLumaPrefetch[DML2_MAX_PLANES];
double DisplayPipeLineDeliveryTimeChromaPrefetch[DML2_MAX_PLANES];
+ double WritebackRequiredBandwidth;
double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES];
double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES];
double DSCCLK_calculated[DML2_MAX_PLANES];
@@ -662,6 +663,7 @@ struct dml2_display_cfg_programming {
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY
bool ROBUrgencyAvoidance;
+ double LowestPrefetchMargin;
} misc;
struct dml2_mode_support_info mode_support_info;
@@ -675,6 +677,7 @@ struct dml2_display_cfg_programming {
bool failed_mcache_validation;
bool failed_dpmm;
bool failed_mode_programming;
+ bool failed_map_watermarks;
} informative;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index 3d41ffde91c1..d68b4567e218 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -9,7 +9,7 @@
#include "dml2_debug.h"
#include "lib_float_math.h"
-static const struct dml2_core_ip_params core_dcn4_ip_caps_base = {
+struct dml2_core_ip_params core_dcn4_ip_caps_base = {
// Hardcoded values for DCN3x
.vblank_nom_default_us = 668,
.remote_iommu_outstanding_translations = 256,
@@ -90,6 +90,7 @@ static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *i
ip_caps->config_return_buffer_segment_size_in_kbytes = ip_params->config_return_buffer_segment_size_in_kbytes;
ip_caps->meta_fifo_size_in_kentries = ip_params->meta_fifo_size_in_kentries;
ip_caps->compressed_buffer_segment_size_in_kbytes = ip_params->compressed_buffer_segment_size_in_kbytes;
+ ip_caps->cursor_buffer_size = ip_params->cursor_buffer_size;
ip_caps->max_flip_time_us = ip_params->max_flip_time_us;
ip_caps->max_flip_time_lines = ip_params->max_flip_time_lines;
ip_caps->hostvm_mode = ip_params->hostvm_mode;
@@ -114,6 +115,7 @@ static void patch_ip_params_with_ip_caps(struct dml2_core_ip_params *ip_params,
ip_params->config_return_buffer_segment_size_in_kbytes = ip_caps->config_return_buffer_segment_size_in_kbytes;
ip_params->meta_fifo_size_in_kentries = ip_caps->meta_fifo_size_in_kentries;
ip_params->compressed_buffer_segment_size_in_kbytes = ip_caps->compressed_buffer_segment_size_in_kbytes;
+ ip_params->cursor_buffer_size = ip_caps->cursor_buffer_size;
ip_params->max_flip_time_us = ip_caps->max_flip_time_us;
ip_params->max_flip_time_lines = ip_caps->max_flip_time_lines;
ip_params->hostvm_mode = ip_caps->hostvm_mode;
@@ -316,28 +318,9 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
// Setup the appropriate p-state strategy
if (display_cfg->stage3.performed && display_cfg->stage3.success) {
- switch (display_cfg->stage3.pstate_switch_modes[plane_index]) {
- case dml2_uclk_pstate_support_method_vactive:
- case dml2_uclk_pstate_support_method_vblank:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom:
- case dml2_uclk_pstate_support_method_fw_drr:
- case dml2_uclk_pstate_support_method_fw_vactive_drr:
- case dml2_uclk_pstate_support_method_fw_vblank_drr:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
- programming->plane_programming[plane_index].uclk_pstate_support_method = display_cfg->stage3.pstate_switch_modes[plane_index];
- break;
- case dml2_uclk_pstate_support_method_reserved_hw:
- case dml2_uclk_pstate_support_method_reserved_fw:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_fixed:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_var:
- case dml2_uclk_pstate_support_method_not_supported:
- case dml2_uclk_pstate_support_method_count:
- default:
- programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
- break;
- }
+ programming->plane_programming[plane_index].uclk_pstate_support_method = display_cfg->stage3.pstate_switch_modes[plane_index];
} else {
- programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
+ programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_na;
}
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
@@ -360,7 +343,8 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
/* unconditionally populate fams2 params */
dml2_core_calcs_get_stream_fams2_programming(&core->clean_me_up.mode_lib,
display_cfg,
- &programming->stream_programming[main_plane->stream_index].fams2_params,
+ &programming->stream_programming[main_plane->stream_index].fams2_base_params,
+ &programming->stream_programming[main_plane->stream_index].fams2_sub_params,
programming->stream_programming[main_plane->stream_index].uclk_pstate_method,
plane_index);
@@ -572,18 +556,18 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
in_out->programming->plane_programming[plane_index].num_dpps_required = core->clean_me_up.mode_lib.mp.NoOfDPP[plane_index];
if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_fw_svp;
else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_fw_svp;
else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_fw_svp;
else {
if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_vactive;
else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_vblank;
else
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_na;
}
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 601320b1be81..c4dbf27abaf8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -11,6 +11,9 @@
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
#define DML_MAX_NUM_OF_SLICES_PER_DSC 4
+#define DML_MAX_COMPRESSION_RATIO 4
+//#define DML_MODE_SUPPORT_USE_DPM_DRAM_BW
+//#define DML_GLOBAL_PREFETCH_CHECK
#define ALLOW_SDPIF_RATE_LIMIT_PRE_CSTATE
const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
@@ -132,9 +135,9 @@ static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_su
dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
@@ -315,12 +318,11 @@ dml_get_var_func(meta_trip_memory_us, double, mode_lib->mp.MetaTripToMemory);
dml_get_var_func(wm_fclk_change, double, mode_lib->mp.Watermark.FCLKChangeWatermark);
dml_get_var_func(wm_usr_retraining, double, mode_lib->mp.Watermark.USRRetrainingWatermark);
-dml_get_var_func(wm_g6_temp_read, double, mode_lib->mp.Watermark.g6_temp_read_watermark_us);
+dml_get_var_func(wm_temp_read_or_ppt, double, mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us);
dml_get_var_func(wm_dram_clock_change, double, mode_lib->mp.Watermark.DRAMClockChangeWatermark);
dml_get_var_func(fraction_of_urgent_bandwidth, double, mode_lib->mp.FractionOfUrgentBandwidth);
dml_get_var_func(fraction_of_urgent_bandwidth_imm_flip, double, mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip);
dml_get_var_func(fraction_of_urgent_bandwidth_mall, double, mode_lib->mp.FractionOfUrgentBandwidthMALL);
-dml_get_var_func(urgent_latency, double, mode_lib->mp.UrgentLatency);
dml_get_var_func(wm_writeback_dram_clock_change, double, mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
dml_get_var_func(wm_writeback_fclk_change, double, mode_lib->mp.Watermark.WritebackFCLKChangeWatermark);
dml_get_var_func(stutter_efficiency, double, mode_lib->mp.StutterEfficiency);
@@ -355,7 +357,9 @@ dml_get_var_func(svp_prefetch_urg_bw_available_sdp, double, mode_lib->mp.urg_ban
dml_get_var_func(svp_prefetch_urg_bw_available_dram, double, mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
dml_get_var_func(svp_prefetch_urg_bw_available_dram_vm_only, double, mode_lib->mp.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_svp_prefetch]);
+dml_get_var_func(urgent_latency, double, mode_lib->mp.UrgentLatency);
dml_get_var_func(max_urgent_latency_us, double, mode_lib->ms.support.max_urgent_latency_us);
+dml_get_var_func(max_non_urgent_latency_us, double, mode_lib->ms.support.max_non_urgent_latency_us);
dml_get_var_func(avg_non_urgent_latency_us, double, mode_lib->ms.support.avg_non_urgent_latency_us);
dml_get_var_func(avg_urgent_latency_us, double, mode_lib->ms.support.avg_urgent_latency_us);
@@ -466,6 +470,24 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
case dml2_420_12:
val = 1;
break;
+ case dml2_422_planar_8:
+ val = 0;
+ break;
+ case dml2_422_planar_10:
+ val = 0;
+ break;
+ case dml2_422_planar_12:
+ val = 0;
+ break;
+ case dml2_422_packed_8:
+ val = 0;
+ break;
+ case dml2_422_packed_10:
+ val = 0;
+ break;
+ case dml2_422_packed_12:
+ val = 0;
+ break;
case dml2_rgbe_alpha:
val = 0;
break;
@@ -487,32 +509,31 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
{
- switch (sw_mode) {
- case (dml2_sw_linear):
- return 256; break;
- case (dml2_sw_256b_2d):
- return 256; break;
- case (dml2_sw_4kb_2d):
- return 4096; break;
- case (dml2_sw_64kb_2d):
- return 65536; break;
- case (dml2_sw_256kb_2d):
- return 262144; break;
- case (dml2_gfx11_sw_linear):
- return 256; break;
- case (dml2_gfx11_sw_64kb_d):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_t):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_x):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_r_x):
- return 65536; break;
- case (dml2_gfx11_sw_256kb_d_x):
- return 262144; break;
- case (dml2_gfx11_sw_256kb_r_x):
- return 262144; break;
- default:
+ if (sw_mode == dml2_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_sw_256b_2d)
+ return 256;
+ else if (sw_mode == dml2_sw_4kb_2d)
+ return 4096;
+ else if (sw_mode == dml2_sw_64kb_2d)
+ return 65536;
+ else if (sw_mode == dml2_sw_256kb_2d)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_t)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_256kb_d_x)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
+ return 262144;
+ else {
DML2_ASSERT(0);
return 256;
}
@@ -579,8 +600,8 @@ static void CalculateBytePerPixelAndBlockSizes(
{
*BytePerPixelDETY = 0;
*BytePerPixelDETC = 0;
- *BytePerPixelY = 0;
- *BytePerPixelC = 0;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 1;
if (SourcePixelFormat == dml2_444_64) {
*BytePerPixelDETY = 8;
@@ -820,7 +841,7 @@ static void CalculateSwathWidth(
// Output
unsigned int req_per_swath_ub_l[],
unsigned int req_per_swath_ub_c[],
- unsigned int SwathWidthSingleDPPY[],
+ unsigned int SwathWidthSingleDPPY[], // post-rotated plane width
unsigned int SwathWidthSingleDPPC[],
unsigned int SwathWidthY[], // per-pipe
unsigned int SwathWidthC[], // per-pipe
@@ -1403,7 +1424,6 @@ static unsigned int dscceComputeDelay(
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock, padding_pixels, ssm_group_priming_delay, ssm_pipeline_delay, obsm_pipeline_delay, slice_padded_pixels, ixd_plus_padding, ixd_plus_padding_groups, cycles_per_group, group_delay, pipeline_delay, pixels, additional_group_delay, lines_to_reach_ixd, groups_to_reach_ixd, slice_width_groups, initial_xmit_delay, number_of_lines_to_reach_ixd, slice_width_modified;
-
if (pixelFormat == dml2_420)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
@@ -1428,7 +1448,6 @@ static unsigned int dscceComputeDelay(
}
}
-
//sub-stream multiplexer balance fifo priming delay in groups as per dsc standard
if (bpc == 8)
ssm_group_priming_delay = 83;
@@ -1447,9 +1466,6 @@ static unsigned int dscceComputeDelay(
//determine number of padded pixels in the last group of a slice line, computed as
slice_padded_pixels = 3 * slice_width_groups - slice_width_modified;
-
-
-
//determine integer number of complete slice lines required to reach initial transmit delay without ssm delay considered
number_of_lines_to_reach_ixd = initial_xmit_delay / slice_width_modified;
@@ -1463,7 +1479,6 @@ static unsigned int dscceComputeDelay(
//number of groups required for a slice to reach initial transmit delay is the sum of the padded initial transmit delay plus the ssm group priming delay
groups_to_reach_ixd = ixd_plus_padding_groups + ssm_group_priming_delay;
-
//number of lines required to reach padded initial transmit delay in groups in slices to the left of the last horizontal slice
//needs to be rounded up as a complete slice lines are buffered prior to initial transmit delay being reached in the last horizontal slice
lines_to_reach_ixd = (groups_to_reach_ixd + slice_width_groups - 1) / slice_width_groups; //round up lines to reach ixd to next
@@ -1506,7 +1521,6 @@ static unsigned int dscceComputeDelay(
return pixels;
}
-
//updated in dcn4
static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, enum dml2_output_encoder_class Output)
{
@@ -2090,7 +2104,6 @@ static void CalculateDCCConfiguration(
yuv420 = 1;
else
yuv420 = 0;
-
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
@@ -2561,8 +2574,7 @@ static void calculate_mcache_setting(
if (*p->num_mcaches_l) {
l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
}
-
- if (l->is_dual_plane && *p->num_mcaches_c) {
+ if (l->is_dual_plane) {
l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
@@ -2682,12 +2694,12 @@ static double dml_get_return_bandwidth_available(
bool is_avg_bw,
bool is_hvm_en,
bool is_hvm_only,
- double dcflk_mhz,
+ double dcfclk_mhz,
double fclk_mhz,
double dram_bw_mbps)
{
double return_bw_mbps = 0.;
- double ideal_sdp_bandwidth = (double)soc->return_bus_width_bytes * dcflk_mhz;
+ double ideal_sdp_bandwidth = (double)soc->return_bus_width_bytes * dcfclk_mhz;
double ideal_fabric_bandwidth = fclk_mhz * (double)soc->fabric_datapath_to_dcn_data_return_bytes;
double ideal_dram_bandwidth = dram_bw_mbps; //dram_speed_mts * soc->clk_table.dram_config.channel_count * soc->clk_table.dram_config.channel_width_bytes;
@@ -2753,7 +2765,7 @@ static double dml_get_return_bandwidth_available(
dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcflk_mhz = %f\n", __func__, dcflk_mhz);
+ dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
@@ -3516,10 +3528,9 @@ static void CalculateUrgentBurstFactor(
dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
-
}
-static void CalculateDCFCLKDeepSleep(
+static void CalculateDCFCLKDeepSleepTdlut(
const struct dml2_display_cfg *display_cfg,
unsigned int NumberOfActiveSurfaces,
unsigned int BytePerPixelY[],
@@ -3534,6 +3545,10 @@ static void CalculateDCFCLKDeepSleep(
double ReadBandwidthChroma[],
unsigned int ReturnBusWidth,
+ double dispclk,
+ unsigned int tdlut_bytes_to_deliver[],
+ double prefetch_swath_time_us[],
+
// Output
double *DCFClkDeepSleep)
{
@@ -3568,6 +3583,22 @@ static void CalculateDCFCLKDeepSleep(
}
DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], pixel_rate_mhz / 16);
+ // adjust for 3dlut delivery time
+ if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && tdlut_bytes_to_deliver[k] > 0) {
+ double tdlut_required_deepsleep_dcfclk = (double) tdlut_bytes_to_deliver[k] / 64.0 / prefetch_swath_time_us[k];
+
+ dml2_printf("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ dml2_printf("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
+ dml2_printf("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
+ dml2_printf("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
+
+ // increase the deepsleep dcfclk to match the original dispclk throughput rate
+ if (tdlut_required_deepsleep_dcfclk > DCFClkDeepSleepPerSurface[k]) {
+ DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], tdlut_required_deepsleep_dcfclk);
+ DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], dispclk / 4.0);
+ }
+ }
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
@@ -3590,9 +3621,56 @@ static void CalculateDCFCLKDeepSleep(
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
*DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
}
+
dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
}
+static void CalculateDCFCLKDeepSleep(
+ const struct dml2_display_cfg *display_cfg,
+ unsigned int NumberOfActiveSurfaces,
+ unsigned int BytePerPixelY[],
+ unsigned int BytePerPixelC[],
+ unsigned int SwathWidthY[],
+ unsigned int SwathWidthC[],
+ unsigned int DPPPerSurface[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double Dppclk[],
+ double ReadBandwidthLuma[],
+ double ReadBandwidthChroma[],
+ unsigned int ReturnBusWidth,
+
+ // Output
+ double *DCFClkDeepSleep)
+{
+ double zero_double[DML2_MAX_PLANES];
+ unsigned int zero_integer[DML2_MAX_PLANES];
+
+ memset(zero_double, 0, DML2_MAX_PLANES * sizeof(double));
+ memset(zero_integer, 0, DML2_MAX_PLANES * sizeof(unsigned int));
+
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ NumberOfActiveSurfaces,
+ BytePerPixelY,
+ BytePerPixelC,
+ SwathWidthY,
+ SwathWidthC,
+ DPPPerSurface,
+ PSCL_THROUGHPUT,
+ PSCL_THROUGHPUT_CHROMA,
+ Dppclk,
+ ReadBandwidthLuma,
+ ReadBandwidthChroma,
+ ReturnBusWidth,
+ 0,
+ zero_integer, //tdlut_bytes_to_deliver,
+ zero_double, //prefetch_swath_time_us,
+
+ // Output
+ DCFClkDeepSleep);
+}
+
static double CalculateWriteBackDelay(
enum dml2_source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -3816,8 +3894,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
+ p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
+ p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
}
if (p->SwathHeightC[k] == 0)
@@ -4592,6 +4670,7 @@ static void calculate_tdlut_setting(
*p->tdlut_groups_per_2row_ub = 0;
*p->tdlut_opt_time = 0;
*p->tdlut_drain_time = 0;
+ *p->tdlut_bytes_to_deliver = 0;
*p->tdlut_bytes_per_group = 0;
*p->tdlut_pte_bytes_per_frame = 0;
*p->tdlut_bytes_per_frame = 0;
@@ -4660,6 +4739,7 @@ static void calculate_tdlut_setting(
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
+ *p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0);
}
#ifdef __DML_VBA_DEBUG__
@@ -4680,6 +4760,7 @@ static void calculate_tdlut_setting(
dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
+ dml2_printf("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
#endif
}
@@ -5069,20 +5150,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->trip_to_mem = 0.0;
*p->Tvm_trips = 0.0;
*p->Tr0_trips = 0.0;
- s->Tvm_no_trip_oto = 0.0;
- s->Tr0_no_trip_oto = 0.0;
s->Tvm_trips_rounded = 0.0;
s->Tr0_trips_rounded = 0.0;
s->max_Tsw = 0.0;
s->Lsw_oto = 0.0;
- s->Tpre_rounded = 0.0;
+ *p->Tpre_rounded = 0.0;
s->prefetch_bw_equ = 0.0;
s->Tvm_equ = 0.0;
s->Tr0_equ = 0.0;
s->Tdmbf = 0.0;
s->Tdmec = 0.0;
s->Tdmsks = 0.0;
- s->prefetch_sw_bytes = 0.0;
+ *p->prefetch_sw_bytes = 0.0;
s->prefetch_bw_pr = 0.0;
s->bytes_pp = 0.0;
s->dep_bytes = 0.0;
@@ -5207,6 +5286,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
+ dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
#endif
if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
@@ -5277,23 +5357,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC;
}
- s->prefetch_bw_pr = s->bytes_pp * p->myPipe->PixelClock / (double)p->myPipe->DPPPerSurface;
- if (p->myPipe->VRatio < 1.0)
- s->prefetch_bw_pr = p->myPipe->VRatio * s->prefetch_bw_pr;
- s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime);
-
- s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
- s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw);
-
- s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
-
- s->min_Lsw_equ = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_EQU__;
- s->min_Lsw_equ = math_max2(s->min_Lsw_equ, 2.0);
- s->min_Lsw_equ = math_max2(s->min_Lsw_equ, p->tdlut_drain_time / s->LineTime);
+ *p->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
+ *p->prefetch_sw_bytes = *p->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
vm_bytes = p->vm_bytes; // vm_bytes is dpde0_bytes_per_frame_ub_l + dpde0_bytes_per_frame_ub_c + 2*extra_dpde_bytes;
extra_tdpe_bytes = (unsigned int)math_max2(0, (p->display_cfg->gpuvm_max_page_table_levels - 1) * 128);
@@ -5302,57 +5367,103 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
vm_bytes = vm_bytes + p->tdlut_pte_bytes_per_frame + (p->display_cfg->gpuvm_enable ? extra_tdpe_bytes : 0);
tdlut_row_bytes = (unsigned long) math_ceil2(p->tdlut_bytes_per_frame/2.0, 1.0);
+
+ s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
+ s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
+ s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
+
+ // use vactive swath bw for prefetch oto and also cap prefetch_bw_oto to max_vratio_oto
+ // Note: in prefetch calculation, acounting is done mostly per-pipe.
+ // vactive swath bw represents the per-surface (aka per dml plane) bw to move vratio_l/c lines of bytes_l/c per line time
+ s->per_pipe_vactive_sw_bw = p->vactive_sw_bw_l / (double)p->myPipe->DPPPerSurface;
+
+ // one-to-one prefetch bw as one line of bytes per line time (as per vratio_pre_l/c = 1)
+ s->prefetch_bw_oto = (p->swath_width_luma_ub * p->myPipe->BytePerPixelY) / s->LineTime;
+
+ if (p->myPipe->BytePerPixelC > 0) {
+ s->per_pipe_vactive_sw_bw += p->vactive_sw_bw_c / (double)p->myPipe->DPPPerSurface;
+ s->prefetch_bw_oto += (p->swath_width_chroma_ub * p->myPipe->BytePerPixelC) / s->LineTime;
+ }
+
+ s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
+
+ s->prefetch_bw_oto = math_min2(s->prefetch_bw_oto, *p->prefetch_sw_bytes/(s->min_Lsw_oto*s->LineTime));
+
+ s->Lsw_oto = math_ceil2(4.0 * *p->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, 1.0) / 4.0;
+
s->prefetch_bw_oto = math_max3(s->prefetch_bw_oto,
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
- s->Lsw_oto = math_ceil2(4.0 * math_max2(s->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, s->min_Lsw_oto), 1.0) / 4.0;
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
+ dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
+ dml2_printf("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
+#endif
if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_no_trip_oto = math_max2(
+ s->Tvm_oto = math_max3(
+ *p->Tvm_trips,
*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto,
s->LineTime / 4.0);
- s->Tvm_oto = math_max2(
- *p->Tvm_trips,
- s->Tvm_no_trip_oto);
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
- s->Tvm_no_trip_oto = s->Tvm_trips_rounded;
s->Tvm_oto = s->Tvm_trips_rounded;
}
if ((p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable)) {
- s->Tr0_no_trip_oto = math_max2(
+ s->Tr0_oto = math_max3(
+ *p->Tr0_trips,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
- s->Tr0_oto = math_max2(
- *p->Tr0_trips,
- s->Tr0_no_trip_oto);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
- } else {
- s->Tr0_no_trip_oto = (s->LineTime - s->Tvm_oto) / 4.0;
- s->Tr0_oto = s->Tr0_no_trip_oto;
- }
+ } else
+ s->Tr0_oto = s->LineTime / 4.0;
s->Tvm_oto_lines = math_ceil2(4.0 * s->Tvm_oto / s->LineTime, 1) / 4.0;
s->Tr0_oto_lines = math_ceil2(4.0 * s->Tr0_oto / s->LineTime, 1) / 4.0;
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ dml2_printf("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
+ if (p->impacted_dst_y_pre > 0) {
+ dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ s->dst_y_prefetch_oto = math_max2(s->dst_y_prefetch_oto, p->impacted_dst_y_pre);
+ dml2_printf("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
+ }
+#endif
+ *p->Tpre_oto = s->dst_y_prefetch_oto * s->LineTime;
+
//To (time for delay after scaler) in line time
Lo = (unsigned int)(*p->DSTYAfterScaler + (double)*p->DSTXAfterScaler / (double)p->myPipe->HTotal);
+ s->min_Lsw_equ = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_EQU__;
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, p->tdlut_drain_time / s->LineTime);
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, 2.0);
//Tpre_equ in line time
if (p->DynamicMetadataVMEnabled && p->DynamicMetadataEnable)
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, *p->Tvm_trips) + s->TWait_p) / s->LineTime - Lo;
else
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, p->ExtraLatencyPrefetch) + s->TWait_p) / s->LineTime - Lo;
+
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ s->dst_y_prefetch_equ_impacted = math_max2(p->impacted_dst_y_pre, s->dst_y_prefetch_equ);
+
+ s->dst_y_prefetch_equ_impacted = math_min2(s->dst_y_prefetch_equ_impacted, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
+
+ if (s->dst_y_prefetch_equ_impacted > s->dst_y_prefetch_equ)
+ s->dst_y_prefetch_equ -= s->dst_y_prefetch_equ_impacted - s->dst_y_prefetch_equ;
+#endif
+
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
@@ -5370,7 +5481,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, s->prefetch_sw_bytes);
+ dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
@@ -5394,7 +5505,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
#endif
double Tpre = s->dst_y_prefetch_equ * s->LineTime;
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- s->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
+ *p->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
@@ -5420,7 +5531,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, s->Tpre_rounded, (s->Tpre_rounded - Tpre));
+ dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
#endif
@@ -5434,78 +5545,85 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// Tpre_rounded is Tpre rounding to 2-bit fraction
// Tvm_trips_rounded is Tvm_trips ceiling to 1/4 line time
// Tr0_trips_rounded is Tr0_trips ceiling to 1/4 line time
- // So that means prefetch bw calculated can be higher since the total time availabe for prefetch is less
- bool min_Lsw_equ_ok = s->Tpre_rounded >= s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded + s->min_Lsw_equ*s->LineTime;
+ // So that means prefetch bw calculated can be higher since the total time available for prefetch is less
+ bool min_Lsw_equ_ok = *p->Tpre_rounded >= s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded + s->min_Lsw_equ*s->LineTime;
+ bool tpre_gt_req_latency = true;
+#if 0
+ // Check that Tpre_rounded is big enough if all of the stages of the prefetch are time constrained.
+ // The terms Tvm_trips_rounded and Tr0_trips_rounded represent the min time constraints for the VM and row stages.
+ // Normally, these terms cover the overall time constraint for Tpre >= (Tex + max{Ttrip, Turg}), but if these terms are at their minimum, an explicit check is necessary.
+ tpre_gt_req_latency = *p->Tpre_rounded > (math_max2(p->Turg, s->trip_to_mem) + p->ExtraLatencyPrefetch);
+#endif
- if (s->dst_y_prefetch_equ > 1 && min_Lsw_equ_ok) {
+ if (s->dst_y_prefetch_equ > 1 && min_Lsw_equ_ok && tpre_gt_req_latency) {
s->prefetch_bw1 = 0.;
s->prefetch_bw2 = 0.;
s->prefetch_bw3 = 0.;
s->prefetch_bw4 = 0.;
// prefetch_bw1: VM + 2*R0 + SW
- if (s->Tpre_rounded - *p->Tno_bw > 0) {
+ if (*p->Tpre_rounded - *p->Tno_bw > 0) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor
+ 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)
- + s->prefetch_sw_bytes)
- / (s->Tpre_rounded - *p->Tno_bw);
- s->Tsw_est1 = s->prefetch_sw_bytes / s->prefetch_bw1;
+ + *p->prefetch_sw_bytes)
+ / (*p->Tpre_rounded - *p->Tno_bw);
+ s->Tsw_est1 = *p->prefetch_sw_bytes / s->prefetch_bw1;
} else
s->prefetch_bw1 = 0;
dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
- if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
+ if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
- (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
+ (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
- dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, s->Tpre_rounded);
+ dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
// prefetch_bw2: VM + SW
- if (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded > 0) {
- s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded);
- s->Tsw_est2 = s->prefetch_sw_bytes / s->prefetch_bw2;
+ if (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded > 0) {
+ s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + *p->prefetch_sw_bytes) /
+ (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded);
+ s->Tsw_est2 = *p->prefetch_sw_bytes / s->prefetch_bw2;
} else
s->prefetch_bw2 = 0;
dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
- if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
- s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
+ if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
+ s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
}
// prefetch_bw3: 2*R0 + SW
- if (s->Tpre_rounded - s->Tvm_trips_rounded > 0) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - s->Tvm_trips_rounded);
- s->Tsw_est3 = s->prefetch_sw_bytes / s->prefetch_bw3;
+ if (*p->Tpre_rounded - s->Tvm_trips_rounded > 0) {
+ s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + *p->prefetch_sw_bytes) /
+ (*p->Tpre_rounded - s->Tvm_trips_rounded);
+ s->Tsw_est3 = *p->prefetch_sw_bytes / s->prefetch_bw3;
} else
s->prefetch_bw3 = 0;
dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
- if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
+ if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
+ s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
// prefetch_bw4: SW
- if (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded > 0)
- s->prefetch_bw4 = s->prefetch_sw_bytes / (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded);
+ if (*p->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded > 0)
+ s->prefetch_bw4 = *p->prefetch_sw_bytes / (*p->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded);
else
s->prefetch_bw4 = 0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, s->Tpre_rounded, (s->Tpre_rounded - Tpre));
+ dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
@@ -5617,9 +5735,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
- // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
- s->Lsw_equ = s->dst_y_prefetch_equ - math_ceil2(4.0 * (s->Tvm_equ + 2 * s->Tr0_equ) / s->LineTime, 1.0) / 4.0;
-
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
*p->dst_y_prefetch = s->dst_y_prefetch_oto;
@@ -5628,31 +5743,33 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- s->dst_y_per_vm_no_trip_vblank = math_ceil2(4.0 * s->Tvm_no_trip_oto / s->LineTime, 1.0) / 4.0;
- s->dst_y_per_row_no_trip_vblank = math_ceil2(4.0 * s->Tr0_no_trip_oto / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
+
} else {
*p->dst_y_prefetch = s->dst_y_prefetch_equ;
+
+ if (s->dst_y_prefetch_equ < s->dst_y_prefetch_equ_impacted)
+ *p->dst_y_prefetch = s->dst_y_prefetch_equ_impacted;
+
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- s->dst_y_per_vm_no_trip_vblank = *p->dst_y_per_vm_vblank;
- s->dst_y_per_row_no_trip_vblank = *p->dst_y_per_row_vblank;
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
- /* take worst case Lsw to calculate bandwidth requirement regardless of schedule */
- s->LinesToRequestPrefetchPixelData = math_min2(s->Lsw_equ, s->Lsw_oto); // Lsw
+ // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
+ s->LinesToRequestPrefetchPixelData = *p->dst_y_prefetch - *p->dst_y_per_vm_vblank - 2 * *p->dst_y_per_row_vblank; // Lsw
s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line);
*p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime);
+ *p->prefetch_swath_time_us = (s->LinesToRequestPrefetchPixelData * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
@@ -5663,6 +5780,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ dml2_printf("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
@@ -5749,8 +5867,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
- __func__, min_Lsw_equ_ok, s->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
+ dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ __func__, min_Lsw_equ_ok, *p->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
+ dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded+Tvm_trips_rounded+2.0*Tr0_trips_rounded+min_Tsw_equ (%f) should be > \n",
+ __func__, tpre_gt_req_latency, (s->min_Lsw_equ*s->LineTime + s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded), p->Turg, s->trip_to_mem, p->ExtraLatencyPrefetch);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5769,13 +5889,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (vm_bytes == 0) {
prefetch_vm_bw = 0;
- } else if (s->dst_y_per_vm_no_trip_vblank > 0) {
+ } else if (*p->dst_y_per_vm_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
- prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (s->dst_y_per_vm_no_trip_vblank * s->LineTime);
+ prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -5787,8 +5907,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
prefetch_row_bw = 0;
- } else if (s->dst_y_per_row_no_trip_vblank > 0) {
- prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (s->dst_y_per_row_no_trip_vblank * s->LineTime);
+ } else if (*p->dst_y_per_row_vblank > 0) {
+ prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
@@ -5828,6 +5948,171 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
return s->NoTimeToPrefetch;
}
+static unsigned int get_num_lb_source_lines(unsigned int max_line_buffer_lines,
+ unsigned int line_buffer_size_bits,
+ unsigned int num_pipes,
+ unsigned int vp_width,
+ unsigned int vp_height,
+ double h_ratio,
+ enum dml2_rotation_angle rotation_angle)
+{
+ unsigned int num_lb_source_lines = 0;
+ double lb_bit_per_pixel = 57.0;
+ unsigned recin_width = vp_width/num_pipes;
+
+ if (dml_is_vertical_rotation(rotation_angle))
+ recin_width = vp_height/num_pipes;
+
+ num_lb_source_lines = (unsigned int) math_min2((double) max_line_buffer_lines,
+ math_floor2(line_buffer_size_bits / lb_bit_per_pixel / (recin_width / math_max2(h_ratio, 1.0)), 1.0));
+
+ return num_lb_source_lines;
+}
+
+static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned int num_planes, unsigned int Trpd_dcfclk_cycles[])
+{
+ int max_value = -1;
+ int max_idx = -1;
+ for (unsigned int i = 0; i < num_planes; i++) {
+ if (i != this_plane_idx && (int) Trpd_dcfclk_cycles[i] > max_value) {
+ max_value = Trpd_dcfclk_cycles[i];
+ max_idx = i;
+ }
+ }
+ if (max_idx <= 0) {
+ dml2_assert(max_idx >= 0);
+ max_idx = this_plane_idx;
+ }
+
+ return max_idx;
+}
+
+static double calculate_impacted_Tsw(unsigned int exclude_plane_idx, unsigned int num_planes, double *prefetch_swath_bytes, double bw_mbps)
+{
+ double sum = 0.;
+ for (unsigned int i = 0; i < num_planes; i++) {
+ if (i != exclude_plane_idx) {
+ sum += prefetch_swath_bytes[i];
+ }
+ }
+ return sum / bw_mbps;
+}
+
+// a global check against the aggregate effect of the per plane prefetch schedule
+static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *scratch,
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *p)
+{
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals *s = &scratch->CheckGlobalPrefetchAdmissibility_locals;
+ unsigned int i, k;
+
+ memset(s, 0, sizeof(struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals));
+
+ *p->recalc_prefetch_schedule = 0;
+ s->prefetch_global_check_passed = 1;
+ // worst case if the rob and cdb is fully hogged
+ s->max_Trpd_dcfclk_cycles = (unsigned int) math_ceil2((p->rob_buffer_size_kbytes*1024 + p->compressed_buffer_size_kbytes*DML_MAX_COMPRESSION_RATIO*1024)/64.0, 1.0);
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
+ dml2_printf("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
+ dml2_printf("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
+ dml2_printf("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
+ dml2_printf("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
+ dml2_printf("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
+#endif
+
+ // calculate the return impact from each plane, request is 256B per dcfclk
+ for (i = 0; i < p->num_active_planes; i++) {
+ s->src_detile_buf_size_bytes_l[i] = p->detile_buffer_size_bytes_l[i];
+ s->src_detile_buf_size_bytes_c[i] = p->detile_buffer_size_bytes_c[i];
+ s->src_swath_bytes_l[i] = p->full_swath_bytes_l[i];
+ s->src_swath_bytes_c[i] = p->full_swath_bytes_c[i];
+
+ if (p->pixel_format[i] == dml2_420_10) {
+ s->src_detile_buf_size_bytes_l[i] = (unsigned int) (s->src_detile_buf_size_bytes_l[i] * 1.5);
+ s->src_detile_buf_size_bytes_c[i] = (unsigned int) (s->src_detile_buf_size_bytes_c[i] * 1.5);
+ s->src_swath_bytes_l[i] = (unsigned int) (s->src_swath_bytes_l[i] * 1.5);
+ s->src_swath_bytes_c[i] = (unsigned int) (s->src_swath_bytes_c[i] * 1.5);
+ }
+
+ s->burst_bytes_to_fill_det = (unsigned int) (math_floor2(s->src_detile_buf_size_bytes_l[i] / p->chunk_bytes_l, 1) * p->chunk_bytes_l);
+ s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_l[i] / p->swath_height_l[i], 1) * s->src_swath_bytes_l[i]);
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
+ dml2_printf("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
+ dml2_printf("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
+ dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
+ dml2_printf("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
+ dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
+#endif
+
+ if (s->src_swath_bytes_c[i] > 0) { // dual_plane
+ s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(s->src_detile_buf_size_bytes_c[i] / p->chunk_bytes_c, 1) * p->chunk_bytes_c);
+
+ if (p->pixel_format[i] == dml2_422_planar_8 || p->pixel_format[i] == dml2_422_planar_10 || p->pixel_format[i] == dml2_422_planar_12) {
+ s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_c[i] / p->swath_height_c[i], 1) * s->src_swath_bytes_c[i]);
+ }
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
+ dml2_printf("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
+ dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
+ dml2_printf("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
+#endif
+ }
+
+ s->time_to_fill_det_us = (double) s->burst_bytes_to_fill_det / (256 * p->estimated_dcfclk_mhz); // fill time assume full burst at request rate
+ s->accumulated_return_path_dcfclk_cycles[i] = (unsigned int) math_ceil2(((DML_MAX_COMPRESSION_RATIO-1) * 64 * p->estimated_dcfclk_mhz) * s->time_to_fill_det_us / 64.0, 1.0); //for 64B per DCFClk
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
+ dml2_printf("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
+ dml2_printf("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
+#endif
+ // clamping to worst case delay which is one which occupy the full rob+cdb
+ if (s->accumulated_return_path_dcfclk_cycles[i] > s->max_Trpd_dcfclk_cycles)
+ s->accumulated_return_path_dcfclk_cycles[i] = s->max_Trpd_dcfclk_cycles;
+ }
+
+ // Figure out the impacted prefetch time for each plane
+ // if impacted_Tre is > equ bw Tpre, we need to fail the prefetch schedule as we need a higher state to support the bw
+ for (i = 0; i < p->num_active_planes; i++) {
+ k = find_max_impact_plane(i, p->num_active_planes, s->accumulated_return_path_dcfclk_cycles); // plane k causes most impact to plane i
+ // the rest of planes (except for k) complete for bw
+ p->impacted_dst_y_pre[i] = s->accumulated_return_path_dcfclk_cycles[k]/p->estimated_dcfclk_mhz;
+ p->impacted_dst_y_pre[i] += calculate_impacted_Tsw(k, p->num_active_planes, p->prefetch_sw_bytes, p->estimated_urg_bandwidth_required_mbps);
+ p->impacted_dst_y_pre[i] = math_ceil2(p->impacted_dst_y_pre[i] / p->line_time[i], 0.25);
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
+#endif
+ }
+
+ if (p->Tpre_rounded != NULL && p->Tpre_oto != NULL) {
+ for (i = 0; i < p->num_active_planes; i++) {
+ if (p->impacted_dst_y_pre[i] > p->dst_y_prefetch[i]) {
+ s->prefetch_global_check_passed = 0;
+ *p->recalc_prefetch_schedule = 1;
+ }
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
+ dml2_printf("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
+#endif
+ }
+ } else {
+ // likely a mode programming calls, assume support, and no recalc - not used anyways
+ s->prefetch_global_check_passed = 1;
+ *p->recalc_prefetch_schedule = 0;
+ }
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
+ dml2_printf("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
+#endif
+
+ return s->prefetch_global_check_passed;
+}
+
static void calculate_peak_bandwidth_required(
struct dml2_core_internal_scratch *s,
struct dml2_core_calcs_calculate_peak_bandwidth_required_params *p)
@@ -6046,7 +6331,7 @@ static void check_urgent_bandwidth_support(
double *frac_urg_bandwidth_nom,
double *frac_urg_bandwidth_mall,
bool *vactive_bandwidth_support_ok, // vactive ok
- bool *bandwidth_support_ok, // max of vm, prefetch, vactive all ok
+ bool *bandwidth_support_ok,// max of vm, prefetch, vactive all ok
unsigned int mall_allocated_for_dcn_mbytes,
double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
@@ -6116,7 +6401,6 @@ static void check_urgent_bandwidth_support(
}
}
#endif
-
}
static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal_soc_state_type eval_state,
@@ -6438,7 +6722,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->Z8StutterExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
p->Watermark->Z8StutterEnterPlusExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
}
- p->Watermark->g6_temp_read_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
+ p->Watermark->temp_read_or_ppt_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
@@ -6454,12 +6738,12 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: g6_temp_read_watermark_us = %f\n", __func__, p->Watermark->g6_temp_read_watermark_us);
+ dml2_printf("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
#endif
s->TotalActiveWriteback = 0;
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
s->TotalActiveWriteback = s->TotalActiveWriteback + 1;
}
}
@@ -6522,7 +6806,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines= %u\n", __func__, k, p->MaxLineBufferLines);
+ dml2_printf("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
@@ -6563,7 +6847,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->ActiveDRAMClockChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->DRAMClockChangeWatermark;
s->ActiveFCLKChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->FCLKChangeWatermark;
s->USRRetrainingLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->USRRetrainingWatermark;
- s->g6_temp_read_latency_margin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->g6_temp_read_watermark_us;
+ s->g6_temp_read_latency_margin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->temp_read_or_ppt_watermark_us;
if (p->VActiveLatencyHidingMargin)
p->VActiveLatencyHidingMargin[k] = s->ActiveDRAMClockChangeLatencyMargin[k];
@@ -6571,9 +6855,12 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
if (p->VActiveLatencyHidingUs)
p->VActiveLatencyHidingUs[k] = s->ActiveClockChangeLatencyHiding;
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
- s->WritebackLatencyHiding = (double)p->WritebackInterfaceBufferSize * 1024.0 / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height * (double)h_total / pixel_clock_mhz) * 4.0);
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
+ if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
+ s->WritebackLatencyHiding = (double)p->WritebackInterfaceBufferSize * 1024.0
+ / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height
+ * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width
+ / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height * (double)h_total / pixel_clock_mhz) * 4.0);
+ if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format == dml2_444_64) {
s->WritebackLatencyHiding = s->WritebackLatencyHiding / 2;
}
s->WritebackDRAMClockChangeLatencyMargin = s->WritebackLatencyHiding - p->Watermark->WritebackDRAMClockChangeWatermark;
@@ -6588,36 +6875,36 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
uclk_pstate_change_strategy = p->display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy;
reserved_vblank_time_us = (double)p->display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns / 1000;
- p->FCLKChangeSupport[k] = dml2_fclock_change_unsupported;
+ p->FCLKChangeSupport[k] = dml2_pstate_change_unsupported;
if (s->ActiveFCLKChangeLatencyMargin[k] > 0)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vactive;
+ p->FCLKChangeSupport[k] = dml2_pstate_change_vactive;
else if (reserved_vblank_time_us >= p->mmSOCParameters.FCLKChangeLatency)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vblank;
+ p->FCLKChangeSupport[k] = dml2_pstate_change_vblank;
- if (p->FCLKChangeSupport[k] == dml2_fclock_change_unsupported)
+ if (p->FCLKChangeSupport[k] == dml2_pstate_change_unsupported)
*p->global_fclk_change_supported = false;
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_unsupported;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_unsupported;
if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_auto) {
if (p->display_cfg->overrides.all_streams_blanked ||
(s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency))
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank_and_vactive;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vblank_and_vactive;
else if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vactive;
else if (reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vblank;
} else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vactive && s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vactive;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vblank && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vblank;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_drr)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_drr;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_drr;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_svp)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_svp;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_mall_svp;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_full_frame;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_mall_full_frame;
- if (p->DRAMClockChangeSupport[k] == dml2_dram_clock_change_unsupported)
+ if (p->DRAMClockChangeSupport[k] == dml2_pstate_change_unsupported)
*p->global_dram_clock_change_supported = false;
s->dst_y_pstate = (unsigned int)(math_ceil2((p->mmSOCParameters.DRAMClockChangeLatency + p->mmSOCParameters.UrgentLatency) / (h_total / pixel_clock_mhz), 1));
@@ -6915,8 +7202,7 @@ struct dml2_core_internal_g6_temp_read_blackouts_table {
} entries[DML_MAX_CLK_TABLE_SIZE];
};
-static const struct dml2_core_internal_g6_temp_read_blackouts_table
- core_dcn4_g6_temp_read_blackout_table = {
+struct dml2_core_internal_g6_temp_read_blackouts_table core_dcn4_g6_temp_read_blackout_table = {
.entries = {
{
.uclk_khz = 96000,
@@ -7036,6 +7322,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
+#endif
struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
@@ -7083,12 +7372,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; k++)
dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
-
- // dml2_printf_dml_policy(&mode_lib->ms.policy);
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, mode_lib->ms.num_active_planes);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -7183,8 +7466,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- mode_lib->ms.SurfaceReadBandwidthLuma[k] = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->ms.SurfaceReadBandwidthChroma[k] = mode_lib->ms.SwathWidthCSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ mode_lib->ms.vactive_sw_bw_l[k] = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ mode_lib->ms.vactive_sw_bw_c[k] = mode_lib->ms.SwathWidthCSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
mode_lib->ms.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width *
display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
@@ -7194,35 +7477,35 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
#endif
}
// Writeback bandwidth
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format == dml2_444_64) {
+ mode_lib->ms.WriteBandwidth[k][0] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height
+ * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width
+ / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height
* display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
/ ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8.0;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
+ } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
+ mode_lib->ms.WriteBandwidth[k][0] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height
+ * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width
+ / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height
* display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
/ ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4.0;
} else {
- mode_lib->ms.WriteBandwidth[k] = 0.0;
+ mode_lib->ms.WriteBandwidth[k][0] = 0.0;
}
}
/*Writeback Latency support check*/
mode_lib->ms.support.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0 &&
+ (mode_lib->ms.WriteBandwidth[k][0] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) {
mode_lib->ms.support.WritebackLatencySupport = false;
}
}
@@ -7231,19 +7514,19 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
/* Writeback Scale Ratio and Taps Support Check */
mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > mode_lib->ip.writeback_max_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > mode_lib->ip.writeback_max_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio < mode_lib->ip.writeback_min_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio < mode_lib->ip.writeback_min_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > (unsigned int) mode_lib->ip.writeback_max_hscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps > (unsigned int) mode_lib->ip.writeback_max_vscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps
- || (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > 2.0 && ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps % 2) == 1))) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio > mode_lib->ip.writeback_max_hscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio > mode_lib->ip.writeback_max_vscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio < mode_lib->ip.writeback_min_hscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio < mode_lib->ip.writeback_min_vscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps > (unsigned int) mode_lib->ip.writeback_max_hscl_taps
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps > (unsigned int) mode_lib->ip.writeback_max_vscl_taps
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps
+ || (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps > 2.0 && ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps % 2) == 1))) {
mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
}
- if (2.0 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps - 1) * 57 > mode_lib->ip.writeback_line_buffer_buffer_size) {
+ if (2.0 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height * (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps - 1) * 57 > mode_lib->ip.writeback_line_buffer_buffer_size) {
mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
}
}
@@ -7423,8 +7706,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateSwathAndDETConfiguration_params->nomDETInKByte = mode_lib->ms.NomDETInKByte;
CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->ms.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->ms.SurfaceReadBandwidthChroma;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->ms.vactive_sw_bw_l;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->ms.vactive_sw_bw_c;
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = mode_lib->ms.MaximumSwathWidthLuma;
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = mode_lib->ms.MaximumSwathWidthChroma;
CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->ms.Read256BlockHeightY;
@@ -7671,16 +7954,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
//DISPCLK/DPPCLK
mode_lib->ms.WritebackRequiredDISPCLK = 0;
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->ms.WritebackRequiredDISPCLK = math_max2(mode_lib->ms.WritebackRequiredDISPCLK,
- CalculateWriteBackDISPCLK(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
+ CalculateWriteBackDISPCLK(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
mode_lib->ip.writeback_line_buffer_buffer_size));
}
@@ -7712,7 +7995,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!s->stream_visited[display_cfg->plane_descriptors[k].stream_index]) {
s->stream_visited[display_cfg->plane_descriptors[k].stream_index] = 1;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true)
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0)
s->TotalNumberOfActiveWriteback = s->TotalNumberOfActiveWriteback + 1;
s->TotalNumberOfActiveOTG = s->TotalNumberOfActiveOTG + 1;
@@ -8256,23 +8539,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.PSCL_FACTOR,
mode_lib->ms.PSCL_FACTOR_CHROMA,
mode_lib->ms.RequiredDPPCLK,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
mode_lib->soc.return_bus_width_bytes,
/* Output */
&mode_lib->ms.dcfclk_deepsleep);
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->ms.WritebackDelayTime[k] = mode_lib->soc.qos_parameters.writeback.base_latency_us + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->ms.RequiredDISPCLK;
} else {
mode_lib->ms.WritebackDelayTime[k] = 0.0;
@@ -8349,7 +8632,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: urgent latency tolerance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
+ dml2_printf("DML::%s: urgent latency tolarance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
#endif
mode_lib->ms.support.OutstandingRequestsSupport = true;
@@ -8367,6 +8650,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
* (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
+ mode_lib->ms.support.max_non_urgent_latency_us
+ = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
+ / mode_lib->ms.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
+
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
@@ -8408,7 +8698,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
memset(calculate_mcache_setting_params, 0, sizeof(struct dml2_core_calcs_calculate_mcache_setting_params));
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0 || mode_lib->ip.dcn_mrq_present) {
+ if (mode_lib->soc.mcache_size_bytes == 0 || mode_lib->ip.dcn_mrq_present) {
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.mall_prefetch_sdp_overhead_factor[k] = 1.0;
mode_lib->ms.mall_prefetch_dram_overhead_factor[k] = 1.0;
@@ -8515,8 +8805,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->hostvm_enable,
mode_lib->ms.MaxDCFCLK,
mode_lib->ms.MaxFabricClock,
+#ifdef DML_MODE_SUPPORT_USE_DPM_DRAM_BW
+ mode_lib->ms.dram_bw_mbps);
+#else
mode_lib->ms.max_dram_bw_mbps);
-
+#endif
// Average BW support check
calculate_avg_bandwidth_required(
@@ -8524,8 +8817,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
// input
display_cfg,
mode_lib->ms.num_active_planes,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
mode_lib->ms.cursor_bw,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
@@ -8595,6 +8888,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
@@ -8638,9 +8932,32 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&mode_lib->ms.ExtraLatency_sr,
&mode_lib->ms.ExtraLatencyPrefetch);
- {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ s->impacted_dst_y_pre[k] = 0;
+
+ s->recalc_prefetch_schedule = 0;
+ s->recalc_prefetch_done = 0;
+ do {
mode_lib->ms.support.PrefetchSupported = true;
+
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
mode_lib->ms.TWait[k] = CalculateTWait(
@@ -8730,6 +9047,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
// output
CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
@@ -8758,6 +9078,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
@@ -8766,6 +9090,27 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
} // for k num_planes
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.BytePerPixelY,
+ mode_lib->ms.BytePerPixelC,
+ mode_lib->ms.SwathWidthY,
+ mode_lib->ms.SwathWidthC,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.PSCL_FACTOR,
+ mode_lib->ms.PSCL_FACTOR_CHROMA,
+ mode_lib->ms.RequiredDPPCLK,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
+ mode_lib->soc.return_bus_width_bytes,
+ mode_lib->ms.RequiredDISPCLK,
+ s->tdlut_bytes_to_deliver,
+ s->prefetch_swath_time_us,
+
+ /* Output */
+ &mode_lib->ms.dcfclk_deepsleep);
+
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
if (mode_lib->ms.dst_y_prefetch[k] < 2.0
|| mode_lib->ms.LinesForVM[k] >= 32.0
@@ -8789,7 +9134,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
mode_lib->ms.support.VRatioInPrefetchSupported = false;
@@ -8799,10 +9144,14 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
+
+ // By default, do not recalc prefetch schedule
+ s->recalc_prefetch_schedule = 0;
+
// Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- double line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
// Calculate Urgent burst factor for prefetch
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
@@ -8815,7 +9164,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.swath_width_chroma_ub[k],
mode_lib->ms.SwathHeightY[k],
mode_lib->ms.SwathHeightC[k],
- line_time_us,
+ s->line_times[k],
mode_lib->ms.UrgLatency,
mode_lib->ms.VRatioPreY[k],
mode_lib->ms.VRatioPreC[k],
@@ -8852,8 +9201,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.SurfaceReadBandwidthChroma;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
@@ -8899,127 +9248,164 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
+ if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
+
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
+ ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
+ s->recalc_prefetch_done = 1;
+ s->recalc_prefetch_schedule = 1;
+ }
+#endif
+ } // prefetch schedule ok, do urg bw and flip schedule
+ } while (s->recalc_prefetch_schedule);
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true && mode_lib->ms.support.VRatioInPrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
+ // Flip Schedule
+ // Both prefetch schedule and BW okay
+ if (mode_lib->ms.support.PrefetchSupported == true) {
+ mode_lib->ms.BandwidthAvailableForImmediateFlip =
+ get_bandwidth_available_for_immediate_flip(
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
+ mode_lib->ms.support.urg_bandwidth_available);
- }
+ mode_lib->ms.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip) {
+ s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
+ s->HostVMInefficiencyFactor,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.meta_row_bytes[k]);
+ } else {
+ s->per_pipe_flip_bytes[k] = 0;
+ }
+ mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- mode_lib->ip.max_flip_time_lines,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
+ }
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 1;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.SurfaceReadBandwidthChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ CalculateFlipSchedule(
+ &mode_lib->scratch,
+ display_cfg->plane_descriptors[k].immediate_flip,
+ 1, // use_lb_flip_bw
+ s->HostVMInefficiencyFactor,
+ s->Tvm_trips_flip[k],
+ s->Tr0_trips_flip[k],
+ s->Tvm_trips_flip_rounded[k],
+ s->Tr0_trips_flip_rounded[k],
+ display_cfg->gpuvm_enable,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.BandwidthAvailableForImmediateFlip,
+ mode_lib->ms.TotImmediateFlipBytes,
+ display_cfg->plane_descriptors[k].pixel_format,
+ (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
+ mode_lib->ms.Tno_bw_flip[k],
+ mode_lib->ms.dpte_row_height[k],
+ mode_lib->ms.dpte_row_height_chroma[k],
+ mode_lib->ms.use_one_row_for_frame_flip[k],
+ mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
+ s->per_pipe_flip_bytes[k],
+ mode_lib->ms.meta_row_bytes[k],
+ s->meta_row_height_luma[k],
+ s->meta_row_height_chroma[k],
+ mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
+
+ /* Output */
+ &mode_lib->ms.dst_y_per_vm_flip[k],
+ &mode_lib->ms.dst_y_per_row_flip[k],
+ &mode_lib->ms.final_flip_bw[k],
+ &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
+ }
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 1;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
- } else { // if prefetch not support, assume iflip is not supported too
+ calculate_immediate_flip_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth_flip
+ &mode_lib->ms.support.ImmediateFlipSupport,
+
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_flip,
+ mode_lib->ms.support.non_urg_bandwidth_required_flip,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
mode_lib->ms.support.ImmediateFlipSupport = false;
- }
- } // prefetch schedule
+ }
+
+ } else { // if prefetch not support, assume iflip is not supported too
+ mode_lib->ms.support.ImmediateFlipSupport = false;
}
s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
@@ -9116,8 +9502,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->pstate_bytes_required_c,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
mode_lib->ms.surface_avg_vactive_required_bw,
mode_lib->ms.surface_peak_required_bw,
/* outputs */
@@ -9187,12 +9573,12 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
mode_lib->ms.support.DPPPerSurface[k] = mode_lib->ms.NoOfDPP[k];
}
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.ODMMode[k] = mode_lib->ms.ODMMode[k];
mode_lib->ms.support.DSCEnabled[k] = mode_lib->ms.RequiresDSC[k];
mode_lib->ms.support.FECEnabled[k] = mode_lib->ms.RequiresFEC[k];
@@ -9229,7 +9615,7 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
@@ -9882,7 +10268,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
if (!l->stream_visited[p->display_cfg->plane_descriptors[k].stream_index]) {
- if (p->display_cfg->stream_descriptors[k].writeback.enable)
+ if (p->display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0)
l->TotalActiveWriteback = l->TotalActiveWriteback + 1;
if (TotalNumberOfActiveOTG == 0) { // first otg
@@ -9984,6 +10370,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
struct dml2_core_calcs_CalculateStutterEfficiency_params *CalculateStutterEfficiency_params = &mode_lib->scratch.CalculateStutterEfficiency_params;
struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_shared_CalculateMetaAndPTETimes_params *CalculateMetaAndPTETimes_params = &mode_lib->scratch.CalculateMetaAndPTETimes_params;
@@ -10075,12 +10462,6 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_assert(s->SOCCLK > 0);
#ifdef __DML_VBA_DEBUG__
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, s->num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, s->num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, s->num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, s->num_active_planes);
- // dml2_printf_dml_display_cfg_hw_resource(&display_cfg->hw, s->num_active_planes);
-
dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
@@ -10198,10 +10579,10 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width * display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
- mode_lib->mp.SurfaceReadBandwidthLuma[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->mp.SurfaceReadBandwidthChroma[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: ReadBandwidthSurfaceLuma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthSurfaceChroma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
+ mode_lib->mp.vactive_sw_bw_l[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ mode_lib->mp.vactive_sw_bw_c[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ dml2_printf("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
}
CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
@@ -10217,8 +10598,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateSwathAndDETConfiguration_params->nomDETInKByte = s->NomDETInKByte;
CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->mp.vactive_sw_bw_l;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->mp.vactive_sw_bw_c;
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = s->dummy_single_array[0];
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = s->dummy_single_array[1];
CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->mp.Read256BlockHeightY;
@@ -10539,8 +10920,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
}
@@ -10583,17 +10964,17 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.TCalc = 24.0 / mode_lib->mp.DCFCLKDeepSleep;
for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->mp.WritebackDelay[k] =
mode_lib->soc.qos_parameters.writeback.base_latency_us
+ CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->mp.Dispclk;
} else
mode_lib->mp.WritebackDelay[k] = 0;
@@ -10679,10 +11060,25 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
bool cursor_not_enough_urgent_latency_hiding = 0;
- double line_time_us = 0.0;
-
- line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->mp.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->mp.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
calculate_cursor_req_attributes(
display_cfg->plane_descriptors[k].cursor.cursor_width,
@@ -10699,7 +11095,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
display_cfg->plane_descriptors[k].cursor.cursor_width,
s->cursor_bytes_per_chunk[k],
s->cursor_lines_per_chunk[k],
- line_time_us,
+ s->line_times[k],
mode_lib->mp.UrgentLatency,
// output
@@ -10714,7 +11110,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.swath_width_chroma_ub[k],
mode_lib->mp.SwathHeightY[k],
mode_lib->mp.SwathHeightC[k],
- line_time_us,
+ s->line_times[k],
mode_lib->mp.UrgentLatency,
display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
@@ -10752,6 +11148,35 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
#endif
+ if (s->num_active_planes > 1) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = s->num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->mp.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->mp.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->mp.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->mp.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->mp.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = 0; // don't care
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = 0; // don't care
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = mode_lib->mp.Dcfclk;
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->mp.dst_y_prefetch;
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->dummy_boolean[0];
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params); // dont care about the check output for mode programming
+ }
+
{
s->DestinationLineTimesForPrefetchLessThan2 = false;
s->VRatioPrefetchMoreThanMax = false;
@@ -10763,11 +11188,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
mode_lib->mp.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.TripToMemory,
- !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
- get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
+ display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
+ mode_lib->mp.UrgentLatency,
+ mode_lib->mp.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
myPipe->Dppclk = mode_lib->mp.Dppclk[k];
myPipe->Dispclk = mode_lib->mp.Dispclk;
@@ -10848,6 +11273,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->mp.meta_row_bytes[k];
CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->mp.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->mp.vactive_sw_bw_c[k];
// output
CalculatePrefetchSchedule_params->DSTXAfterScaler = &mode_lib->mp.DSTXAfterScaler[k];
@@ -10876,9 +11304,18 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VUpdateWidthPix = &mode_lib->mp.VUpdateWidthPix[k];
CalculatePrefetchSchedule_params->VReadyOffsetPix = &mode_lib->mp.VReadyOffsetPix[k];
CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->mp.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->dummy_single[0];
mode_lib->mp.NoTimeToPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
+ if (s->impacted_dst_y_pre[k] > 0)
+ mode_lib->mp.impacted_prefetch_margin_us[k] = (mode_lib->mp.dst_y_prefetch[k] - s->impacted_dst_y_pre[k]) * s->line_times[k];
+ else
+ mode_lib->mp.impacted_prefetch_margin_us[k] = 0;
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
#endif
@@ -10956,8 +11393,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceLuma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceChroma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
@@ -10988,8 +11425,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor;
calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->mp.mall_prefetch_dram_overhead_factor;
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.SurfaceReadBandwidthChroma;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
@@ -11120,8 +11557,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor;
calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->mp.mall_prefetch_dram_overhead_factor;
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.SurfaceReadBandwidthChroma;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
@@ -11238,8 +11675,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->mmSOCParameters.USRRetrainingLatency = 0;
s->mmSOCParameters.SMNLatency = 0;
s->mmSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
- s->mmSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
- s->mmSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mmSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->mp.uclk_freq_mhz, mode_lib->mp.FabricClock, in_out_params->min_clk_index);
+ s->mmSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->mp.FabricClock;
s->mmSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
CalculateWatermarks_params->display_cfg = display_cfg;
@@ -11289,7 +11726,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) - mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
@@ -11475,25 +11912,25 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
//Maximum Bandwidth Used
s->TotalWRBandwidth = 0;
- s->WRBandwidth = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_32) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8;
+ for (k = 0; k < display_cfg->num_streams; ++k) {
+ s->WRBandwidth = 0;
+ if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) {
+ s->WRBandwidth = display_cfg->stream_descriptors[k].writeback.writeback_stream[0].output_height
+ * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].output_width /
+ (display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height
+ / ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000))
+ * (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0);
+ s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
}
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
}
mode_lib->mp.TotalDataReadBandwidth = 0;
for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.SurfaceReadBandwidthLuma[k] + mode_lib->mp.SurfaceReadBandwidthChroma[k];
+ mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.vactive_sw_bw_l[k] + mode_lib->mp.vactive_sw_bw_c[k];
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
#endif
}
@@ -11530,8 +11967,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateStutterEfficiency_params->BlockWidth256BytesC = mode_lib->mp.Read256BlockWidthC;
CalculateStutterEfficiency_params->DCCYMaxUncompressedBlock = mode_lib->mp.DCCYMaxUncompressedBlock;
CalculateStutterEfficiency_params->DCCCMaxUncompressedBlock = mode_lib->mp.DCCCMaxUncompressedBlock;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
+ CalculateStutterEfficiency_params->ReadBandwidthSurfaceLuma = mode_lib->mp.vactive_sw_bw_l;
+ CalculateStutterEfficiency_params->ReadBandwidthSurfaceChroma = mode_lib->mp.vactive_sw_bw_c;
CalculateStutterEfficiency_params->dpte_row_bw = mode_lib->mp.dpte_row_bw;
CalculateStutterEfficiency_params->meta_row_bw = mode_lib->mp.meta_row_bw;
CalculateStutterEfficiency_params->rob_alloc_compressed = mode_lib->ip.dcn_mrq_present;
@@ -11742,7 +12179,7 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
+ wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
wm_regs->usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
@@ -12321,14 +12758,18 @@ void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_interna
void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib,
const struct display_configuation_with_meta *display_cfg,
- struct dmub_fams2_stream_static_state *fams2_programming,
- enum dml2_uclk_pstate_support_method pstate_method,
+ union dmub_cmd_fams2_config *fams2_base_programming,
+ union dmub_cmd_fams2_config *fams2_sub_programming,
+ enum dml2_pstate_method pstate_method,
int plane_index)
{
const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index];
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index];
const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index];
+ struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base;
+ union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state;
+
unsigned int i;
if (display_cfg->display_config.overrides.all_streams_blanked) {
@@ -12337,110 +12778,110 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
}
/* from display configuration */
- fams2_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
- fams2_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
- fams2_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
+ base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
+ base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch);
- fams2_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_descriptor->timing.v_active);
- fams2_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
+ base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
/* from meta */
- fams2_programming->otg_vline_time_ns =
+ base_programming->otg_vline_time_ns =
(unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0);
- fams2_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
- fams2_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
- fams2_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
- fams2_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
+ base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
+ base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
+ base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_fams2_meta->method_drr.programming_delay_otg_vlines);
- fams2_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
- fams2_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
+ base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
+ base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
/* from core */
- fams2_programming->config.bits.min_ttu_vblank_usable = true;
+ base_programming->config.bits.min_ttu_vblank_usable = true;
for (i = 0; i < display_cfg->display_config.num_planes; i++) {
/* check if all planes support p-state in blank */
if (display_cfg->display_config.plane_descriptors[i].stream_index == plane_descriptor->stream_index &&
mode_lib->mp.MinTTUVBlank[i] <= mode_lib->mp.Watermark.DRAMClockChangeWatermark) {
- fams2_programming->config.bits.min_ttu_vblank_usable = false;
+ base_programming->config.bits.min_ttu_vblank_usable = false;
break;
}
}
switch (pstate_method) {
- case dml2_uclk_pstate_support_method_vactive:
- case dml2_uclk_pstate_support_method_fw_vactive_drr:
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
/* legacy vactive */
- fams2_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
- fams2_programming->sub_state.legacy.vactive_det_fill_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
- fams2_programming->config.bits.clamp_vtotal_min = true;
+ base_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
+ sub_programming->legacy.vactive_det_fill_delay_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
+ base_programming->config.bits.clamp_vtotal_min = true;
break;
- case dml2_uclk_pstate_support_method_vblank:
- case dml2_uclk_pstate_support_method_fw_vblank_drr:
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
/* legacy vblank */
- fams2_programming->type = FAMS2_STREAM_TYPE_VBLANK;
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
- fams2_programming->config.bits.clamp_vtotal_min = true;
+ base_programming->type = FAMS2_STREAM_TYPE_VBLANK;
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
+ base_programming->config.bits.clamp_vtotal_min = true;
break;
- case dml2_uclk_pstate_support_method_fw_drr:
+ case dml2_pstate_method_fw_drr:
/* drr */
- fams2_programming->type = FAMS2_STREAM_TYPE_DRR;
- fams2_programming->sub_state.drr.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
- fams2_programming->sub_state.drr.nom_stretched_vtotal =
- (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
+ base_programming->type = FAMS2_STREAM_TYPE_DRR;
+ sub_programming->drr.programming_delay_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
+ sub_programming->drr.nom_stretched_vtotal =
+ (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
/* drr only clamps to vtotal min for single display */
- fams2_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
- fams2_programming->sub_state.drr.only_stretch_if_required = true;
+ base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
+ sub_programming->drr.only_stretch_if_required = true;
break;
- case dml2_uclk_pstate_support_method_fw_subvp_phantom:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
/* subvp */
- fams2_programming->type = FAMS2_STREAM_TYPE_SUBVP;
- fams2_programming->sub_state.subvp.vratio_numerator =
- (uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
- fams2_programming->sub_state.subvp.vratio_denominator = 1000;
- fams2_programming->sub_state.subvp.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
- fams2_programming->sub_state.subvp.prefetch_to_mall_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
- fams2_programming->sub_state.subvp.phantom_vtotal =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
- fams2_programming->sub_state.subvp.phantom_vactive =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
- fams2_programming->sub_state.subvp.config.bits.is_multi_planar =
- plane_descriptor->surface.plane1.height > 0;
- fams2_programming->sub_state.subvp.config.bits.is_yuv420 =
- plane_descriptor->pixel_format == dml2_420_8 ||
- plane_descriptor->pixel_format == dml2_420_10 ||
- plane_descriptor->pixel_format == dml2_420_12;
-
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
- fams2_programming->config.bits.clamp_vtotal_min = true;
+ base_programming->type = FAMS2_STREAM_TYPE_SUBVP;
+ sub_programming->subvp.vratio_numerator =
+ (uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
+ sub_programming->subvp.vratio_denominator = 1000;
+ sub_programming->subvp.programming_delay_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
+ sub_programming->subvp.prefetch_to_mall_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
+ sub_programming->subvp.phantom_vtotal =
+ (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
+ sub_programming->subvp.phantom_vactive =
+ (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
+ sub_programming->subvp.config.bits.is_multi_planar =
+ plane_descriptor->surface.plane1.height > 0;
+ sub_programming->subvp.config.bits.is_yuv420 =
+ plane_descriptor->pixel_format == dml2_420_8 ||
+ plane_descriptor->pixel_format == dml2_420_10 ||
+ plane_descriptor->pixel_format == dml2_420_12;
+
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
+ base_programming->config.bits.clamp_vtotal_min = true;
break;
- case dml2_uclk_pstate_support_method_reserved_hw:
- case dml2_uclk_pstate_support_method_reserved_fw:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_fixed:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_var:
- case dml2_uclk_pstate_support_method_not_supported:
- case dml2_uclk_pstate_support_method_count:
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_na:
+ case dml2_pstate_method_count:
default:
/* this should never happen */
break;
@@ -12569,6 +13010,8 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState;
out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize;
out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits;
+ out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.temp_read_or_ppt_support;
+ out->informative.mode_support_info.g6_temp_read_support = mode_lib->ms.support.g6_temp_read_support;
out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots;
out->informative.mode_support_info.NotEnoughDSCUnits = mode_lib->ms.support.NotEnoughDSCUnits;
@@ -12662,7 +13105,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.watermarks.pstate_change_us = dml_get_wm_dram_clock_change(mode_lib);
out->informative.watermarks.fclk_pstate_change_us = dml_get_wm_fclk_change(mode_lib);
out->informative.watermarks.usr_retraining_us = dml_get_wm_usr_retraining(mode_lib);
- out->informative.watermarks.g6_temp_read_watermark_us = dml_get_wm_g6_temp_read(mode_lib);
+ out->informative.watermarks.temp_read_or_ppt_watermark_us = dml_get_wm_temp_read_or_ppt(mode_lib);
out->informative.mall.total_surface_size_in_mall_bytes = 0;
for (k = 0; k < out->display_config.num_planes; ++k)
@@ -12745,6 +13188,8 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.qos.max_active_fclk_change_latency_supported = dml_get_fclk_change_latency(mode_lib);
+ out->informative.misc.LowestPrefetchMargin = 10 * 1000 * 1000;
+
for (k = 0; k < out->display_config.num_planes; k++) {
if ((out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us)
@@ -12824,6 +13269,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
+ out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0;
out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
@@ -12831,6 +13277,9 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.PTE_BUFFER_MODE[k] = mode_lib->mp.PTE_BUFFER_MODE[k];
out->informative.misc.DSCDelay[k] = mode_lib->mp.DSCDelay[k];
out->informative.misc.MaxActiveDRAMClockChangeLatencySupported[k] = mode_lib->mp.MaxActiveDRAMClockChangeLatencySupported[k];
+
+ if (mode_lib->mp.impacted_prefetch_margin_us[k] < out->informative.misc.LowestPrefetchMargin)
+ out->informative.misc.LowestPrefetchMargin = mode_lib->mp.impacted_prefetch_margin_us[k];
}
// For this DV informative layer, all pipes in the same planes will just use the same id
@@ -12853,16 +13302,11 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.non_optimized_mcache_allocation[k].global_mcache_ids_plane1[n] = k;
}
}
-
- out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
- / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
+ out->informative.qos.max_non_urgent_latency_us = dml_get_max_non_urgent_latency_us(mode_lib);
if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
- / mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
+ / mode_lib->ms.support.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
out->informative.misc.ROBUrgencyAvoidance = true;
} else {
out->informative.misc.ROBUrgencyAvoidance = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index df2d1550a14b..27ef0e096b25 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
@@ -28,7 +28,7 @@ void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *displ
void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_cfg_programming *out);
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index);
void dml2_core_calcs_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index);
-void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_fams2_stream_static_state *fams2_programming, enum dml2_uclk_pstate_support_method pstate_method, int plane_index);
+void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, union dmub_cmd_fams2_config *fams2_base_programming, union dmub_cmd_fams2_config *fams2_sub_programming, enum dml2_pstate_method pstate_method, int plane_index);
void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_cmd_fams2_global_config *fams2_global_config);
void dml2_core_calcs_get_dpte_row_height(unsigned int *dpte_row_height, struct dml2_core_internal_display_mode_lib *mode_lib, bool is_plane1, enum dml2_source_format_class SourcePixelFormat, enum dml2_swizzle_mode SurfaceTiling, enum dml2_rotation_angle ScanDirection, unsigned int pitch, unsigned int GPUVMMinPageSizeKBytes);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index cbdfbd5a0bde..23c0fca5515f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -201,7 +201,7 @@ struct dml2_core_internal_watermarks {
double Z8StutterExitWatermark;
double Z8StutterEnterPlusExitWatermark;
double USRRetrainingWatermark;
- double g6_temp_read_watermark_us;
+ double temp_read_or_ppt_watermark_us;
};
struct dml2_core_internal_mode_support_info {
@@ -252,8 +252,8 @@ struct dml2_core_internal_mode_support_info {
bool PTEBufferSizeNotExceeded;
bool DCCMetaBufferSizeNotExceeded;
- enum dml2_dram_clock_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
- enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
bool USRRetrainingSupport;
@@ -318,12 +318,15 @@ struct dml2_core_internal_mode_support_info {
bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
double max_urgent_latency_us;
+ double max_non_urgent_latency_us;
double avg_non_urgent_latency_us;
double avg_urgent_latency_us;
+ double df_response_time_us;
bool incorrect_imall_usage;
bool g6_temp_read_support;
+ bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
};
@@ -378,8 +381,8 @@ struct dml2_core_internal_mode_support {
unsigned int DETBufferSizeC[DML2_MAX_PLANES];
unsigned int SwathHeightY[DML2_MAX_PLANES];
unsigned int SwathHeightC[DML2_MAX_PLANES];
- unsigned int SwathWidthY[DML2_MAX_PLANES];
- unsigned int SwathWidthC[DML2_MAX_PLANES];
+ unsigned int SwathWidthY[DML2_MAX_PLANES]; // per-pipe
+ unsigned int SwathWidthC[DML2_MAX_PLANES]; // per-pipe
// ----------------------------------
// Intermediates/Informational
@@ -476,9 +479,9 @@ struct dml2_core_internal_mode_support {
// Bandwidth Related Info
double BandwidthAvailableForImmediateFlip;
- double SurfaceReadBandwidthLuma[DML2_MAX_PLANES]; // no dcc overhead, for the plane
- double SurfaceReadBandwidthChroma[DML2_MAX_PLANES];
- double WriteBandwidth[DML2_MAX_PLANES];
+ double vactive_sw_bw_l[DML2_MAX_PLANES]; // no dcc overhead, for the plane
+ double vactive_sw_bw_c[DML2_MAX_PLANES];
+ double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
@@ -539,7 +542,7 @@ struct dml2_core_internal_mode_program {
unsigned int qos_param_index; // to access the uclk dependent dpm table
unsigned int active_min_uclk_dpm_index; // to access the min_clk table
double FabricClock; /// <brief Basically just the clock freq at the min (or given) state
- double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
+ //double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double dram_bw_mbps;
double uclk_freq_mhz;
unsigned int NoOfDPP[DML2_MAX_PLANES];
@@ -562,14 +565,14 @@ struct dml2_core_internal_mode_program {
double BytePerPixelInDETC[DML2_MAX_PLANES];
unsigned int BytePerPixelY[DML2_MAX_PLANES];
unsigned int BytePerPixelC[DML2_MAX_PLANES];
- unsigned int SwathWidthY[DML2_MAX_PLANES];
- unsigned int SwathWidthC[DML2_MAX_PLANES];
+ unsigned int SwathWidthY[DML2_MAX_PLANES]; // per-pipe
+ unsigned int SwathWidthC[DML2_MAX_PLANES]; // per-pipe
unsigned int req_per_swath_ub_l[DML2_MAX_PLANES];
unsigned int req_per_swath_ub_c[DML2_MAX_PLANES];
unsigned int SwathWidthSingleDPPY[DML2_MAX_PLANES];
unsigned int SwathWidthSingleDPPC[DML2_MAX_PLANES];
- double SurfaceReadBandwidthLuma[DML2_MAX_PLANES];
- double SurfaceReadBandwidthChroma[DML2_MAX_PLANES];
+ double vactive_sw_bw_l[DML2_MAX_PLANES];
+ double vactive_sw_bw_c[DML2_MAX_PLANES];
double excess_vactive_fill_bw_l[DML2_MAX_PLANES];
double excess_vactive_fill_bw_c[DML2_MAX_PLANES];
@@ -797,8 +800,9 @@ struct dml2_core_internal_mode_program {
double MaxActiveFCLKChangeLatencySupported;
bool USRRetrainingSupport;
bool g6_temp_read_support;
- enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
- enum dml2_dram_clock_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ bool temp_read_or_ppt_support;
+ enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
@@ -846,6 +850,8 @@ struct dml2_core_internal_mode_program {
bool mall_comb_mcache_l[DML2_MAX_PLANES];
bool mall_comb_mcache_c[DML2_MAX_PLANES];
bool lc_comb_mcache[DML2_MAX_PLANES];
+
+ double impacted_prefetch_margin_us[DML2_MAX_PLANES];
};
struct dml2_core_internal_SOCParametersList {
@@ -862,6 +868,7 @@ struct dml2_core_internal_SOCParametersList {
double USRRetrainingLatency;
double SMNLatency;
double g6_temp_read_blackout_us;
+ double temp_read_or_ppt_blackout_us;
double max_urgent_latency_us;
double df_response_time_us;
enum dml2_qos_param_type qos_type;
@@ -951,6 +958,7 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
double tdlut_opt_time[DML2_MAX_PLANES];
double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_to_deliver[DML2_MAX_PLANES];
unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
@@ -961,6 +969,18 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+
+ double prefetch_sw_bytes[DML2_MAX_PLANES];
+ double Tpre_rounded[DML2_MAX_PLANES];
+ double Tpre_oto[DML2_MAX_PLANES];
+ bool recalc_prefetch_schedule;
+ bool recalc_prefetch_done;
+ double impacted_dst_y_pre[DML2_MAX_PLANES];
+ double line_times[DML2_MAX_PLANES];
+ enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_l[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_c[DML2_MAX_PLANES];
+ double prefetch_swath_time_us[DML2_MAX_PLANES];
};
struct dml2_core_calcs_mode_programming_locals {
@@ -1024,6 +1044,7 @@ struct dml2_core_calcs_mode_programming_locals {
unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
double tdlut_opt_time[DML2_MAX_PLANES];
double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_to_deliver[DML2_MAX_PLANES];
unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
@@ -1041,6 +1062,16 @@ struct dml2_core_calcs_mode_programming_locals {
unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+
+ double prefetch_sw_bytes[DML2_MAX_PLANES];
+ double Tpre_rounded[DML2_MAX_PLANES];
+ double Tpre_oto[DML2_MAX_PLANES];
+ bool recalc_prefetch_schedule;
+ double impacted_dst_y_pre[DML2_MAX_PLANES];
+ double line_times[DML2_MAX_PLANES];
+ enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_l[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_c[DML2_MAX_PLANES];
};
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals {
@@ -1048,6 +1079,7 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_local
double ActiveFCLKChangeLatencyMargin[DML2_MAX_PLANES];
double USRRetrainingLatencyMargin[DML2_MAX_PLANES];
double g6_temp_read_latency_margin[DML2_MAX_PLANES];
+ double temp_read_or_ppt_latency_margin[DML2_MAX_PLANES];
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
@@ -1185,17 +1217,14 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double LineTime;
double dst_y_prefetch_equ;
double prefetch_bw_oto;
+ double per_pipe_vactive_sw_bw;
double Tvm_oto;
double Tr0_oto;
- double Tvm_no_trip_oto;
- double Tr0_no_trip_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingVM;
double TimeForFetchingRowInVBlank;
- double dst_y_per_vm_no_trip_vblank;
- double dst_y_per_row_no_trip_vblank;
double LinesToRequestPrefetchPixelData;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
@@ -1203,15 +1232,12 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double Tr0_trips_rounded;
double max_Tsw;
double Lsw_oto;
- double Lsw_equ;
- double Tpre_rounded;
double prefetch_bw_equ;
double Tvm_equ;
double Tr0_equ;
double Tdmbf;
double Tdmec;
double Tdmsks;
- double prefetch_sw_bytes;
double total_row_bytes;
double prefetch_bw_pr;
double bytes_pp;
@@ -1225,6 +1251,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double prefetch_bw2;
double prefetch_bw3;
double prefetch_bw4;
+ double dst_y_prefetch_equ_impacted;
double TWait_p;
unsigned int cursor_prefetch_bytes;
@@ -1545,17 +1572,18 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
// Output
struct dml2_core_internal_watermarks *Watermark;
- enum dml2_dram_clock_change_support *DRAMClockChangeSupport;
+ enum dml2_pstate_change_support *DRAMClockChangeSupport;
bool *global_dram_clock_change_supported;
double *MaxActiveDRAMClockChangeLatencySupported;
unsigned int *SubViewportLinesNeededInMALL;
- enum dml2_fclock_change_support *FCLKChangeSupport;
+ enum dml2_pstate_change_support *FCLKChangeSupport;
bool *global_fclk_change_supported;
double *MaxActiveFCLKChangeLatencySupported;
bool *USRRetrainingSupport;
double *VActiveLatencyHidingMargin;
double *VActiveLatencyHidingUs;
bool *g6_temp_read_support;
+ bool *temp_read_or_ppt_support;
};
@@ -1727,8 +1755,8 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double PrefetchSourceLinesC;
unsigned int VInitPreFillC;
unsigned int MaxNumSwathC;
- unsigned int swath_width_luma_ub;
- unsigned int swath_width_chroma_ub;
+ unsigned int swath_width_luma_ub; // per-pipe
+ unsigned int swath_width_chroma_ub; // per-pipe
unsigned int SwathHeightY;
unsigned int SwathHeightC;
double TWait;
@@ -1750,6 +1778,10 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
unsigned int meta_row_bytes;
double mall_prefetch_sdp_overhead_factor;
+ double impacted_dst_y_pre;
+ double vactive_sw_bw_l; // per surface bw
+ double vactive_sw_bw_c; // per surface bw
+
// output
unsigned int *DSTXAfterScaler;
unsigned int *DSTYAfterScaler;
@@ -1767,6 +1799,8 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *Tdmdl_vm;
double *Tdmdl;
double *TSetup;
+ double *Tpre_rounded;
+ double *Tpre_oto;
double *Tvm_trips;
double *Tr0_trips;
double *Tvm_trips_flip;
@@ -1777,6 +1811,48 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
unsigned int *VUpdateWidthPix;
unsigned int *VReadyOffsetPix;
double *prefetch_cursor_bw;
+ double *prefetch_sw_bytes;
+ double *prefetch_swath_time_us;
+};
+
+struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params {
+ unsigned int num_active_planes;
+ enum dml2_source_format_class *pixel_format;
+ unsigned int rob_buffer_size_kbytes;
+ unsigned int compressed_buffer_size_kbytes;
+ unsigned int chunk_bytes_l; // same for all planes
+ unsigned int chunk_bytes_c;
+ unsigned int *detile_buffer_size_bytes_l;
+ unsigned int *detile_buffer_size_bytes_c;
+ unsigned int *full_swath_bytes_l;
+ unsigned int *full_swath_bytes_c;
+ unsigned int *lb_source_lines_l;
+ unsigned int *lb_source_lines_c;
+ unsigned int *swath_height_l;
+ unsigned int *swath_height_c;
+ double *prefetch_sw_bytes;
+ double *Tpre_rounded;
+ double *Tpre_oto;
+ double estimated_dcfclk_mhz;
+ double estimated_urg_bandwidth_required_mbps;
+ double *line_time;
+ double *dst_y_prefetch;
+
+ // output
+ bool *recalc_prefetch_schedule;
+ double *impacted_dst_y_pre;
+};
+
+struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals {
+ unsigned int max_Trpd_dcfclk_cycles;
+ unsigned int burst_bytes_to_fill_det;
+ double time_to_fill_det_us;
+ unsigned int accumulated_return_path_dcfclk_cycles[DML2_MAX_PLANES];
+ bool prefetch_global_check_passed;
+ unsigned int src_swath_bytes_l[DML2_MAX_PLANES];
+ unsigned int src_swath_bytes_c[DML2_MAX_PLANES];
+ unsigned int src_detile_buf_size_bytes_l[DML2_MAX_PLANES];
+ unsigned int src_detile_buf_size_bytes_c[DML2_MAX_PLANES];
};
struct dml2_core_calcs_calculate_mcache_row_bytes_params {
@@ -1921,6 +1997,7 @@ struct dml2_core_calcs_calculate_tdlut_setting_params {
unsigned int *tdlut_groups_per_2row_ub;
double *tdlut_opt_time;
double *tdlut_drain_time;
+ unsigned int *tdlut_bytes_to_deliver;
unsigned int *tdlut_bytes_per_group;
};
@@ -2004,6 +2081,7 @@ struct dml2_core_internal_scratch {
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals;
struct dml2_core_calcs_CalculateVMRowAndSwath_locals CalculateVMRowAndSwath_locals;
struct dml2_core_calcs_CalculatePrefetchSchedule_locals CalculatePrefetchSchedule_locals;
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals CheckGlobalPrefetchAdmissibility_locals;
struct dml2_core_shared_CalculateSwathAndDETConfiguration_locals CalculateSwathAndDETConfiguration_locals;
struct dml2_core_shared_TruncToValidBPP_locals TruncToValidBPP_locals;
struct dml2_core_shared_CalculateDETBufferSize_locals CalculateDETBufferSize_locals;
@@ -2019,6 +2097,7 @@ struct dml2_core_internal_scratch {
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params CalculateSwathAndDETConfiguration_params;
struct dml2_core_calcs_CalculateStutterEfficiency_params CalculateStutterEfficiency_params;
struct dml2_core_calcs_CalculatePrefetchSchedule_params CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params CheckGlobalPrefetchAdmissibility_params;
struct dml2_core_calcs_calculate_mcache_setting_params calculate_mcache_setting_params;
struct dml2_core_calcs_calculate_tdlut_setting_params calculate_tdlut_setting_params;
struct dml2_core_shared_calculate_vm_and_row_bytes_params calculate_vm_and_row_bytes_params;
@@ -2038,7 +2117,6 @@ struct dml2_core_internal_display_mode_lib {
// Used to hold input; intermediate and output of the calculations
struct dml2_core_internal_mode_support ms; // struct for mode support
struct dml2_core_internal_mode_program mp; // struct for mode programming
-
// Available overridable calculators for core_shared.
// if null, core_shared will use default calculators.
struct dml2_core_shared_calculation_funcs funcs;
@@ -2051,7 +2129,6 @@ struct dml2_core_calcs_mode_support_ex {
const struct dml2_display_cfg *in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
-
//unsigned int in_state_index;
struct dml2_core_internal_mode_support_info *out_evaluation_info;
};
@@ -2064,9 +2141,7 @@ struct dml2_core_calcs_mode_programming_ex {
const struct dml2_mcg_min_clock_table *min_clk_table;
const struct core_display_cfg_support_info *cfg_support_info;
int min_clk_index;
-
struct dml2_display_cfg_programming *programming;
-
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index 714b5c39b7e6..456b3f8a6d38 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -63,6 +63,150 @@ bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
case dml2_mono_16:
val = 0;
break;
+ case dml2_422_planar_8:
+ val = 0;
+ break;
+ case dml2_422_planar_10:
+ val = 0;
+ break;
+ case dml2_422_planar_12:
+ val = 0;
+ break;
+ case dml2_422_packed_8:
+ val = 0;
+ break;
+ case dml2_422_packed_10:
+ val = 0;
+ break;
+ case dml2_422_packed_12:
+ val = 0;
+ break;
+ default:
+ DML2_ASSERT(0);
+ break;
+ }
+ return val;
+}
+
+bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format)
+{
+ bool val = false;
+
+ switch (source_format) {
+ case dml2_444_8:
+ val = 0;
+ break;
+ case dml2_444_16:
+ val = 0;
+ break;
+ case dml2_444_32:
+ val = 0;
+ break;
+ case dml2_444_64:
+ val = 0;
+ break;
+ case dml2_420_8:
+ val = 0;
+ break;
+ case dml2_420_10:
+ val = 0;
+ break;
+ case dml2_420_12:
+ val = 0;
+ break;
+ case dml2_rgbe_alpha:
+ val = 0;
+ break;
+ case dml2_rgbe:
+ val = 0;
+ break;
+ case dml2_mono_8:
+ val = 0;
+ break;
+ case dml2_mono_16:
+ val = 0;
+ break;
+ case dml2_422_planar_8:
+ val = 1;
+ break;
+ case dml2_422_planar_10:
+ val = 1;
+ break;
+ case dml2_422_planar_12:
+ val = 1;
+ break;
+ case dml2_422_packed_8:
+ val = 0;
+ break;
+ case dml2_422_packed_10:
+ val = 0;
+ break;
+ case dml2_422_packed_12:
+ val = 0;
+ break;
+ default:
+ DML2_ASSERT(0);
+ break;
+ }
+ return val;
+}
+
+bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
+{
+ bool val = false;
+
+ switch (source_format) {
+ case dml2_444_8:
+ val = 0;
+ break;
+ case dml2_444_16:
+ val = 0;
+ break;
+ case dml2_444_32:
+ val = 0;
+ break;
+ case dml2_444_64:
+ val = 0;
+ break;
+ case dml2_420_8:
+ val = 0;
+ break;
+ case dml2_420_10:
+ val = 0;
+ break;
+ case dml2_420_12:
+ val = 0;
+ break;
+ case dml2_rgbe_alpha:
+ val = 0;
+ break;
+ case dml2_rgbe:
+ val = 0;
+ break;
+ case dml2_mono_8:
+ val = 0;
+ break;
+ case dml2_mono_16:
+ val = 0;
+ break;
+ case dml2_422_planar_8:
+ val = 0;
+ break;
+ case dml2_422_planar_10:
+ val = 0;
+ break;
+ case dml2_422_planar_12:
+ val = 0;
+ break;
+ case dml2_422_packed_8:
+ val = 1;
+ break;
+ case dml2_422_packed_10:
+ val = 1;
+ break;
+ case dml2_422_packed_12:
+ val = 1;
+ break;
default:
DML2_ASSERT(0);
break;
@@ -154,9 +298,9 @@ void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mod
dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
@@ -280,39 +424,49 @@ bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_c
return is_phantom;
}
-unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
-{
- switch (sw_mode) {
- case (dml2_sw_linear):
- return 256; break;
- case (dml2_sw_256b_2d):
- return 256; break;
- case (dml2_sw_4kb_2d):
- return 4096; break;
- case (dml2_sw_64kb_2d):
- return 65536; break;
- case (dml2_sw_256kb_2d):
- return 262144; break;
- case (dml2_gfx11_sw_linear):
- return 256; break;
- case (dml2_gfx11_sw_64kb_d):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_t):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_x):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_r_x):
- return 65536; break;
- case (dml2_gfx11_sw_256kb_d_x):
- return 262144; break;
- case (dml2_gfx11_sw_256kb_r_x):
- return 262144; break;
- default:
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+
+ if (sw_mode == dml2_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_sw_256b_2d)
+ return 256;
+ else if (sw_mode == dml2_sw_4kb_2d)
+ return 4096;
+ else if (sw_mode == dml2_sw_64kb_2d)
+ return 65536;
+ else if (sw_mode == dml2_sw_256kb_2d)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_t)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_256kb_d_x)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
+ return 262144;
+ else {
DML2_ASSERT(0);
return 256;
};
}
+bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+ return (byte_per_pixel != 2);
+}
+
+bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
+{
+ return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
+};
+
bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan)
{
@@ -325,7 +479,6 @@ bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan)
return is_vert;
}
-
int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
{
int unsigned version = 0;
@@ -334,17 +487,17 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_sw_256b_2d ||
sw_mode == dml2_sw_4kb_2d ||
sw_mode == dml2_sw_64kb_2d ||
- sw_mode == dml2_sw_256kb_2d) {
+ sw_mode == dml2_sw_256kb_2d)
version = 12;
- } else if (sw_mode == dml2_gfx11_sw_linear ||
+ else if (sw_mode == dml2_gfx11_sw_linear ||
sw_mode == dml2_gfx11_sw_64kb_d ||
sw_mode == dml2_gfx11_sw_64kb_d_t ||
sw_mode == dml2_gfx11_sw_64kb_d_x ||
sw_mode == dml2_gfx11_sw_64kb_r_x ||
sw_mode == dml2_gfx11_sw_256kb_d_x ||
- sw_mode == dml2_gfx11_sw_256kb_r_x) {
+ sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
- } else {
+ else {
dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
DML2_ASSERT(0);
}
@@ -403,7 +556,7 @@ bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
{
bool ret_val = 0;
- if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
+ if (dml2_core_utils_is_420(source_format) || dml2_core_utils_is_422_planar(source_format) || (source_format == dml2_rgbe_alpha))
ret_val = 1;
return ret_val;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
index a5cc6a07167a..95f0d017add4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
@@ -11,6 +11,8 @@
double dml2_core_utils_div_rem(double dividend, unsigned int divisor, unsigned int *remainder);
const char *dml2_core_utils_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type);
bool dml2_core_utils_is_420(enum dml2_source_format_class source_format);
+bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format);
+bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format);
void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only);
const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type);
void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg);
@@ -18,8 +20,10 @@ unsigned int dml2_core_utils_round_to_multiple(unsigned int num, unsigned int mu
unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info);
void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane);
bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
-unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode);
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
+bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan);
+bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode);
int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode);
unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params);
unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 8869ea089312..fc77fb34a19a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -96,6 +96,7 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
double min_uclk_latency;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
+ /* assumes DF throttling is enabled */
min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100);
@@ -125,6 +126,37 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
+
+ /* assumes DF throttling is disabled */
+ min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
+ min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100);
+
+ min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
+ min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100);
+
+ min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg;
+
+ min_fclk_avg = (double)mode_support_result->global.svp_prefetch.average_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
+ min_fclk_avg = (double)min_fclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.fclk_derate_percent / 100);
+
+ min_fclk_urgent = (double)mode_support_result->global.svp_prefetch.urgent_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
+ min_fclk_urgent = (double)min_fclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.fclk_derate_percent / 100);
+
+ min_fclk_bw = min_fclk_urgent > min_fclk_avg ? min_fclk_urgent : min_fclk_avg;
+
+ min_dcfclk_avg = (double)mode_support_result->global.svp_prefetch.average_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
+ min_dcfclk_avg = (double)min_dcfclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dcfclk_derate_percent / 100);
+
+ min_dcfclk_urgent = (double)mode_support_result->global.svp_prefetch.urgent_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
+ min_dcfclk_urgent = (double)min_dcfclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100);
+
+ min_dcfclk_bw = min_dcfclk_urgent > min_dcfclk_avg ? min_dcfclk_urgent : min_dcfclk_avg;
+
+ get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
+
+ in_out->programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
}
static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
@@ -272,6 +304,17 @@ static bool map_soc_min_clocks_to_dpm_fine_grained(struct dml2_display_cfg_progr
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.uclk_khz, &state_table->uclk);
+ /* these clocks are optional, so they can fail to map, in which case map all to 0 */
+ if (result) {
+ if (!round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz, &state_table->dcfclk) ||
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz, &state_table->fclk) ||
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz, &state_table->uclk)) {
+ display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz = 0;
+ }
+ }
+
return result;
}
@@ -374,11 +417,11 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
static bool are_timings_trivially_synchronizable(struct dml2_display_cfg *display_config, int mask)
{
- unsigned char i;
+ unsigned int i;
bool identical = true;
bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->num_streams; i++) {
@@ -413,10 +456,10 @@ static bool are_timings_trivially_synchronizable(struct dml2_display_cfg *displa
static int find_smallest_idle_time_in_vblank_us(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out, int mask)
{
- unsigned char i;
+ unsigned int i;
int min_idle_us = 0;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
// Create a remap array to enable simple iteration through only masked stream indicies
@@ -711,7 +754,7 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
@@ -725,7 +768,7 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index a31db5742675..e763c8e45da8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -195,11 +195,11 @@ static int count_planes_with_stream_index(const struct dml2_display_cfg *display
static bool are_timings_trivially_synchronizable(struct display_configuation_with_meta *display_config, int mask)
{
- unsigned char i;
+ unsigned int i;
bool identical = true;
bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->display_config.num_streams; i++) {
@@ -347,8 +347,12 @@ static int find_highest_odm_load_stream_index(
int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
+ if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
+ odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
/ mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
+ else
+ odm_load = 0;
+
if (odm_load > highest_odm_load) {
highest_odm_load_index = i;
highest_odm_load = odm_load;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 92269f0e50ed..a3324f7b9ba6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -13,32 +13,32 @@ static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
// VActive Preferred
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then SVP
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Finally VBlank, but allow base clocks for latency to increase
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
*/
@@ -49,56 +49,56 @@ static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1
static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
// VActive only is preferred
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then VActive + VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then VBlank only
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then SVP + VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then SVP + DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then SVP + SVP
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then DRR + VActive
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then DRR + DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Finally VBlank, but allow base clocks for latency to increase
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
*/
@@ -109,32 +109,32 @@ static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2
static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
// All VActive
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na },
.allow_state_increase = true,
},
// VActive + 1 VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = false,
},
// All VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = false,
},
// All DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na },
.allow_state_increase = true,
},
// All VBlank, with state increase allowed
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = true,
},
*/
@@ -145,32 +145,32 @@ static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3
static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
// All VActive
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive },
.allow_state_increase = true,
},
// VActive + 1 VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank },
.allow_state_increase = false,
},
// All Vblank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
.allow_state_increase = false,
},
// All DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr },
.allow_state_increase = true,
},
// All VBlank, with state increase allowed
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
.allow_state_increase = true,
},
*/
@@ -355,29 +355,30 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o
return result;
}
-static enum dml2_pmo_pstate_method convert_strategy_to_drr_variant(const enum dml2_pmo_pstate_method base_strategy)
+static enum dml2_pstate_method convert_strategy_to_drr_variant(const enum dml2_pstate_method base_strategy)
{
- enum dml2_pmo_pstate_method variant_strategy = 0;
+ enum dml2_pstate_method variant_strategy = 0;
switch (base_strategy) {
- case dml2_pmo_pstate_strategy_vactive:
- variant_strategy = dml2_pmo_pstate_strategy_fw_vactive_drr;
+ case dml2_pstate_method_vactive:
+ variant_strategy = dml2_pstate_method_fw_vactive_drr;
break;
- case dml2_pmo_pstate_strategy_vblank:
- variant_strategy = dml2_pmo_pstate_strategy_fw_vblank_drr;
+ case dml2_pstate_method_vblank:
+ variant_strategy = dml2_pstate_method_fw_vblank_drr;
break;
- case dml2_pmo_pstate_strategy_fw_svp:
- variant_strategy = dml2_pmo_pstate_strategy_fw_svp_drr;
+ case dml2_pstate_method_fw_svp:
+ variant_strategy = dml2_pstate_method_fw_svp_drr;
break;
- case dml2_pmo_pstate_strategy_fw_vactive_drr:
- case dml2_pmo_pstate_strategy_fw_vblank_drr:
- case dml2_pmo_pstate_strategy_fw_svp_drr:
- case dml2_pmo_pstate_strategy_fw_drr:
- case dml2_pmo_pstate_strategy_reserved_hw:
- case dml2_pmo_pstate_strategy_reserved_fw:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_clamped:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_var:
- case dml2_pmo_pstate_strategy_na:
+ case dml2_pstate_method_fw_vactive_drr:
+ case dml2_pstate_method_fw_vblank_drr:
+ case dml2_pstate_method_fw_svp_drr:
+ case dml2_pstate_method_fw_drr:
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_count:
+ case dml2_pstate_method_na:
default:
/* no variant for this mode */
variant_strategy = base_strategy;
@@ -419,23 +420,22 @@ static unsigned int get_num_expanded_strategies(
static void insert_strategy_into_expanded_list(
const struct dml2_pmo_pstate_strategy *per_stream_pstate_strategy,
- int stream_count,
- struct dml2_pmo_init_data *init_data)
+ const int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
- struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
-
- expanded_strategy_list = get_expanded_strategy_list(init_data, stream_count);
+ if (expanded_strategy_list && num_expanded_strategies) {
+ memcpy(&expanded_strategy_list[*num_expanded_strategies], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
- if (expanded_strategy_list) {
- memcpy(&expanded_strategy_list[init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
-
- init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]++;
+ (*num_expanded_strategies)++;
}
}
-static void expand_base_strategy(struct dml2_pmo_instance *pmo,
+static void expand_base_strategy(
const struct dml2_pmo_pstate_strategy *base_strategy,
- unsigned int stream_count)
+ const unsigned int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
bool skip_to_next_stream;
bool expanded_strategy_added;
@@ -473,7 +473,7 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
if (i >= stream_count - 1) {
/* insert into strategy list */
- insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, &pmo->init_data);
+ insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, expanded_strategy_list, num_expanded_strategies);
expanded_strategy_added = true;
} else {
/* skip to next stream */
@@ -512,9 +512,9 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_strategy,
const struct dml2_pmo_pstate_strategy *variant_strategy,
- unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
- unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
- unsigned int stream_count)
+ const unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
+ const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
+ const unsigned int stream_count)
{
bool valid = true;
unsigned int i;
@@ -522,7 +522,7 @@ static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_
/* check all restrictions are met */
for (i = 0; i < stream_count; i++) {
/* vblank + vblank_drr variants are invalid */
- if (base_strategy->per_stream_pstate_method[i] == dml2_pmo_pstate_strategy_vblank &&
+ if (base_strategy->per_stream_pstate_method[i] == dml2_pstate_method_vblank &&
((num_streams_per_base_method[i] > 0 && num_streams_per_variant_method[i] > 0) ||
num_streams_per_variant_method[i] > 1)) {
valid = false;
@@ -533,9 +533,12 @@ static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_
return valid;
}
-static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
+static void expand_variant_strategy(
const struct dml2_pmo_pstate_strategy *base_strategy,
- unsigned int stream_count)
+ const unsigned int stream_count,
+ const bool should_permute,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
bool variant_found;
unsigned int i, j;
@@ -544,7 +547,7 @@ static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
- enum dml2_pmo_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
+ enum dml2_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
struct dml2_pmo_pstate_strategy variant_strategy = { 0 };
/* determine number of displays per method */
@@ -585,7 +588,13 @@ static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
}
if (variant_found && is_variant_method_valid(base_strategy, &variant_strategy, num_streams_per_base_method, num_streams_per_variant_method, stream_count)) {
- expand_base_strategy(pmo, &variant_strategy, stream_count);
+ if (should_permute) {
+ /* permutations are permitted, proceed to expand */
+ expand_base_strategy(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
+ } else {
+ /* no permutations allowed, so add to list now */
+ insert_strategy_into_expanded_list(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
+ }
}
/* rollback to earliest method with bases remaining */
@@ -612,18 +621,19 @@ static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
}
}
-static void expand_base_strategies(
- struct dml2_pmo_instance *pmo,
- const struct dml2_pmo_pstate_strategy *base_strategies_list,
- const unsigned int num_base_strategies,
- unsigned int stream_count)
+void pmo_dcn4_fams2_expand_base_pstate_strategies(
+ const struct dml2_pmo_pstate_strategy *base_strategies_list,
+ const unsigned int num_base_strategies,
+ const unsigned int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
unsigned int i;
/* expand every explicit base strategy (except all DRR) */
for (i = 0; i < num_base_strategies; i++) {
- expand_base_strategy(pmo, &base_strategies_list[i], stream_count);
- expand_variant_strategy(pmo, &base_strategies_list[i], stream_count);
+ expand_base_strategy(&base_strategies_list[i], stream_count, expanded_strategy_list, num_expanded_strategies);
+ expand_variant_strategy(&base_strategies_list[i], stream_count, true, expanded_strategy_list, num_expanded_strategies);
}
}
@@ -652,25 +662,45 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_1_display, base_strategy_list_1_display_size, 1);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_1_display,
+ base_strategy_list_1_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 2:
DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_2_display, base_strategy_list_2_display_size, 2);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_2_display,
+ base_strategy_list_2_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 3:
DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_3_display, base_strategy_list_3_display_size, 3);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_3_display,
+ base_strategy_list_3_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 4:
DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_4_display, base_strategy_list_4_display_size, 4);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_4_display,
+ base_strategy_list_4_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
}
}
@@ -783,8 +813,12 @@ static int find_highest_odm_load_stream_index(
int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
+ if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
+ odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
/ mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
+ else
+ odm_load = 0;
+
if (odm_load > highest_odm_load) {
highest_odm_load_index = i;
highest_odm_load = odm_load;
@@ -941,11 +975,8 @@ static void build_synchronized_timing_groups(
/* find synchronizable timing groups */
for (j = i + 1; j < display_config->display_config.num_streams; j++) {
if (memcmp(master_timing,
- &display_config->display_config.stream_descriptors[j].timing,
- sizeof(struct dml2_timing_cfg)) == 0 &&
- display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder &&
- (display_config->display_config.stream_descriptors[i].output.output_encoder != dml2_hdmi || //hdmi requires formats match
- display_config->display_config.stream_descriptors[i].output.output_format == display_config->display_config.stream_descriptors[j].output.output_format)) {
+ &display_config->display_config.stream_descriptors[j].timing,
+ sizeof(struct dml2_timing_cfg)) == 0) {
set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
set_bit_in_bitfield(&stream_mapped_mask, j);
}
@@ -959,7 +990,7 @@ static bool all_timings_support_vactive(const struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_config,
unsigned int mask)
{
- unsigned char i;
+ unsigned int i;
bool valid = true;
// Create a remap array to enable simple iteration through only masked stream indicies
@@ -1008,7 +1039,7 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_config,
unsigned int mask)
{
- unsigned char i;
+ unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
const struct dml2_stream_parameters *stream_descriptor;
const struct dml2_fams2_meta *stream_fams2_meta;
@@ -1050,7 +1081,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
const struct dml2_plane_parameters *plane_descriptor;
const struct dml2_fams2_meta *stream_fams2_meta;
unsigned int microschedule_vlines;
- unsigned char i;
+ unsigned int i;
unsigned int num_planes_per_stream[DML2_MAX_PLANES] = { 0 };
@@ -1106,24 +1137,73 @@ static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *ps
scratch->pmo_dcn4.num_pstate_candidates++;
}
-static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_method method)
+static enum dml2_pstate_method uclk_pstate_strategy_override_to_pstate_method(const enum dml2_uclk_pstate_change_strategy override_strategy)
{
- unsigned char i;
- enum dml2_uclk_pstate_change_strategy matching_strategy = (enum dml2_uclk_pstate_change_strategy) dml2_pmo_pstate_strategy_na;
+ enum dml2_pstate_method method = dml2_pstate_method_na;
- if (method == dml2_pmo_pstate_strategy_vactive || method == dml2_pmo_pstate_strategy_fw_vactive_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
- else if (method == dml2_pmo_pstate_strategy_vblank || method == dml2_pmo_pstate_strategy_fw_vblank_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
- else if (method == dml2_pmo_pstate_strategy_fw_svp)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
- else if (method == dml2_pmo_pstate_strategy_fw_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_drr;
+ switch (override_strategy) {
+ case dml2_uclk_pstate_change_strategy_force_vactive:
+ method = dml2_pstate_method_vactive;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_vblank:
+ method = dml2_pstate_method_vblank;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_drr:
+ method = dml2_pstate_method_fw_drr;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_mall_svp:
+ method = dml2_pstate_method_fw_svp;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_mall_full_frame:
+ case dml2_uclk_pstate_change_strategy_auto:
+ default:
+ method = dml2_pstate_method_na;
+ }
+
+ return method;
+}
+
+static enum dml2_uclk_pstate_change_strategy pstate_method_to_uclk_pstate_strategy_override(const enum dml2_pstate_method method)
+{
+ enum dml2_uclk_pstate_change_strategy override_strategy = dml2_uclk_pstate_change_strategy_auto;
+
+ switch (method) {
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
+ break;
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
+ break;
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
+ break;
+ case dml2_pstate_method_fw_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_drr;
+ break;
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_count:
+ case dml2_pstate_method_na:
+ default:
+ override_strategy = dml2_uclk_pstate_change_strategy_auto;
+ }
+
+ return override_strategy;
+}
+
+static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pstate_method method)
+{
+ unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(plane_mask, i)) {
if (display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto &&
- display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != matching_strategy)
+ display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != pstate_method_to_uclk_pstate_strategy_override(method))
return false;
}
}
@@ -1149,32 +1229,33 @@ static void build_method_scheduling_params(
static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
- enum dml2_pmo_pstate_method stream_pstate_method,
+ enum dml2_pstate_method stream_pstate_method,
int stream_idx)
{
struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
switch (stream_pstate_method) {
- case dml2_pmo_pstate_strategy_vactive:
- case dml2_pmo_pstate_strategy_fw_vactive_drr:
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
break;
- case dml2_pmo_pstate_strategy_vblank:
- case dml2_pmo_pstate_strategy_fw_vblank_drr:
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
break;
- case dml2_pmo_pstate_strategy_fw_svp:
- case dml2_pmo_pstate_strategy_fw_svp_drr:
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
break;
- case dml2_pmo_pstate_strategy_fw_drr:
+ case dml2_pstate_method_fw_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
break;
- case dml2_pmo_pstate_strategy_reserved_hw:
- case dml2_pmo_pstate_strategy_reserved_fw:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_clamped:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_var:
- case dml2_pmo_pstate_strategy_na:
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_count:
+ case dml2_pstate_method_na:
default:
stream_method_fams2_meta = NULL;
}
@@ -1215,7 +1296,7 @@ static bool is_timing_group_schedulable(
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
if (!stream_method_fams2_meta)
- return false;
+ continue;
if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
@@ -1295,7 +1376,7 @@ static bool is_config_schedulable(
if (j_disallow_us < jp1_disallow_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
- s->pmo_dcn4.sorted_group_gtl_disallow_index[j+1]);
+ s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]);
swapped = true;
}
}
@@ -1354,7 +1435,7 @@ static bool is_config_schedulable(
if (j_period_us < jp1_period_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
- s->pmo_dcn4.sorted_group_gtl_period_index[j+1]);
+ s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]);
swapped = true;
}
}
@@ -1413,7 +1494,7 @@ static bool is_config_schedulable(
static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_method stream_pstate_method,
+ const enum dml2_pstate_method stream_pstate_method,
unsigned int stream_index)
{
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[stream_index];
@@ -1468,7 +1549,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
{
struct dml2_pmo_scratch *s = &pmo->scratch;
- unsigned char stream_index = 0;
+ unsigned int stream_index = 0;
unsigned int svp_count = 0;
unsigned int svp_stream_mask = 0;
@@ -1494,19 +1575,19 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
strategy_matches_drr_requirements &=
stream_matches_drr_policy(pmo, display_cfg, pstate_strategy->per_stream_pstate_method[stream_index], stream_index);
- if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
svp_count++;
set_bit_in_bitfield(&svp_stream_mask, stream_index);
- } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
drr_count++;
set_bit_in_bitfield(&drr_stream_mask, stream_index);
- } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
- pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
vactive_count++;
set_bit_in_bitfield(&vactive_stream_mask, stream_index);
- } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
vblank_count++;
set_bit_in_bitfield(&vblank_stream_mask, stream_index);
}
@@ -1532,7 +1613,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
{
- unsigned char i;
+ unsigned int i;
int min_vactive_margin_us = 0xFFFFFFF;
for (i = 0; i < DML2_MAX_PLANES; i++) {
@@ -1625,7 +1706,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
/* for single stream, guarantee at least an instant of allow */
stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
math_max2(0.0,
- timing->v_active - stream_fams2_meta->min_allow_width_otg_vlines - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
+ timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
} else {
/* for multi stream, bound to a max fill time defined by IP caps */
stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
@@ -1738,8 +1819,10 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
struct display_configuation_with_meta *display_config;
const struct dml2_plane_parameters *plane_descriptor;
const struct dml2_pmo_pstate_strategy *strategy_list = NULL;
+ struct dml2_pmo_pstate_strategy override_base_strategy = { 0 };
unsigned int strategy_list_size = 0;
- unsigned char plane_index, stream_index, i;
+ unsigned int plane_index, stream_index, i;
+ bool build_override_strategy = true;
state->performed = true;
in_out->base_display_config->stage3.min_clk_index_for_latency = in_out->base_display_config->stage1.min_clk_index_for_latency;
@@ -1763,7 +1846,11 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
set_bit_in_bitfield(&s->pmo_dcn4.stream_plane_mask[plane_descriptor->stream_index], plane_index);
- state->pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
+ state->pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
+
+ build_override_strategy &= plane_descriptor->overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto;
+ override_base_strategy.per_stream_pstate_method[plane_descriptor->stream_index] =
+ uclk_pstate_strategy_override_to_pstate_method(plane_descriptor->overrides.uclk_pstate_change_strategy);
}
// Figure out which streams can do vactive, and also build up implicit SVP and FAMS2 meta
@@ -1781,13 +1868,30 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
/* get synchronized timing groups */
build_synchronized_timing_groups(pmo, display_config);
- strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
- if (!strategy_list)
- return false;
-
- strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
+ if (build_override_strategy) {
+ /* build expanded override strategy list (no permutations) */
+ override_base_strategy.allow_state_increase = true;
+ s->pmo_dcn4.num_expanded_override_strategies = 0;
+ insert_strategy_into_expanded_list(&override_base_strategy,
+ display_config->display_config.num_streams,
+ s->pmo_dcn4.expanded_override_strategy_list,
+ &s->pmo_dcn4.num_expanded_override_strategies);
+ expand_variant_strategy(&override_base_strategy,
+ display_config->display_config.num_streams,
+ false,
+ s->pmo_dcn4.expanded_override_strategy_list,
+ &s->pmo_dcn4.num_expanded_override_strategies);
+
+ /* use override strategy list */
+ strategy_list = s->pmo_dcn4.expanded_override_strategy_list;
+ strategy_list_size = s->pmo_dcn4.num_expanded_override_strategies;
+ } else {
+ /* use predefined strategy list */
+ strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
+ strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
+ }
- if (strategy_list_size == 0)
+ if (!strategy_list || strategy_list_size == 0)
return false;
s->pmo_dcn4.num_pstate_candidates = 0;
@@ -1799,7 +1903,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
}
if (s->pmo_dcn4.num_pstate_candidates > 0) {
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates - 1].allow_state_increase = true;
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates-1].allow_state_increase = true;
s->pmo_dcn4.cur_pstate_candidate = -1;
return true;
} else {
@@ -1832,7 +1936,7 @@ static void reset_display_configuration(struct display_configuation_with_meta *d
// Reset strategy to auto
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_auto;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_not_supported;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_na;
}
}
@@ -1840,7 +1944,7 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1849,7 +1953,7 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
}
}
@@ -1861,13 +1965,13 @@ static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *
{
struct dml2_pmo_scratch *scratch = &pmo->scratch;
- unsigned char plane_index;
+ unsigned int plane_index;
int stream_index = -1;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp;
}
}
@@ -1884,13 +1988,13 @@ static void setup_planes_for_svp_drr_by_mask(struct display_configuation_with_me
{
struct dml2_pmo_scratch *scratch = &pmo->scratch;
- unsigned char plane_index;
+ unsigned int plane_index;
int stream_index = -1;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_subvp_phantom_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp_drr;
}
}
@@ -1905,7 +2009,7 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1915,7 +2019,7 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
plane->overrides.reserved_vblank_time_ns = (long)math_max2(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000.0,
plane->overrides.reserved_vblank_time_ns);
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vblank;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
}
}
@@ -1925,7 +2029,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1933,7 +2037,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
plane = &display_config->display_config.plane_descriptors[plane_index];
plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_vblank_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
}
}
}
@@ -1942,14 +2046,14 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
unsigned int stream_index;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
@@ -1963,14 +2067,14 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
unsigned int stream_index;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_vactive_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
@@ -1992,26 +2096,26 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
+ if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
success = false;
break;
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive) {
setup_planes_for_vactive_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank) {
setup_planes_for_vblank_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp) {
fams2_required = true;
setup_planes_for_svp_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
fams2_required = true;
setup_planes_for_vactive_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
fams2_required = true;
setup_planes_for_vblank_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
fams2_required = true;
setup_planes_for_svp_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
fams2_required = true;
setup_planes_for_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
}
@@ -2031,7 +2135,7 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask)
{
int min_time_us = 0xFFFFFF;
- unsigned char plane_index = 0;
+ unsigned int plane_index = 0;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
@@ -2066,34 +2170,34 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
- if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
REQUIRED_RESERVED_TIME ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr) ||
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
+ if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pstate_method_fw_drr) ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
p_state_supported = false;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 0c25bd3e9ac0..6baab7ad6ecc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
@@ -23,4 +23,11 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out);
bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out);
+void pmo_dcn4_fams2_expand_base_pstate_strategies(
+ const struct dml2_pmo_pstate_strategy *base_strategies_list,
+ const unsigned int num_base_strategies,
+ const unsigned int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
index add51d41a515..7ed0242a4b33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -72,7 +72,6 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
out->init_for_stutter = pmo_dcn4_fams2_init_for_stutter;
out->test_for_stutter = pmo_dcn4_fams2_test_for_stutter;
out->optimize_for_stutter = pmo_dcn4_fams2_optimize_for_stutter;
-
result = true;
break;
case dml2_project_invalid:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
new file mode 100644
index 000000000000..f88931ccbc5e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml_top.h"
+#include "dml2_internal_shared_types.h"
+#include "dml2_top_soc15.h"
+
+unsigned int dml2_get_instance_size_bytes(void)
+{
+ return sizeof(struct dml2_instance);
+}
+
+bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
+{
+ switch (in_out->options.project_id) {
+ case dml2_project_dcn4x_stage1:
+ case dml2_project_dcn4x_stage2:
+ case dml2_project_dcn4x_stage2_auto_drr_svp:
+ return dml2_top_soc15_initialize_instance(in_out);
+ case dml2_project_invalid:
+ default:
+ return false;
+ }
+}
+
+bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
+{
+ if (!in_out->dml2_instance->funcs.check_mode_supported)
+ return false;
+
+ return in_out->dml2_instance->funcs.check_mode_supported(in_out);
+}
+
+bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out)
+{
+ if (!in_out->dml2_instance->funcs.build_mode_programming)
+ return false;
+
+ return in_out->dml2_instance->funcs.build_mode_programming(in_out);
+}
+
+bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out)
+{
+ if (!in_out->dml2_instance->funcs.build_mcache_programming)
+ return false;
+
+ return in_out->dml2_instance->funcs.build_mcache_programming(in_out);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
new file mode 100644
index 000000000000..5e14d85821e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml2_top_legacy.h"
+#include "dml2_top_soc15.h"
+#include "dml2_core_factory.h"
+#include "dml2_pmo_factory.h"
+#include "display_mode_core_structs.h"
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
new file mode 100644
index 000000000000..14d0ae03dce6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DML2_TOP_LEGACY_H__
+#define __DML2_TOP_LEGACY_H__
+#include "dml2_internal_shared_types.h"
+bool dml2_top_legacy_initialize_instance(struct dml2_initialize_instance_in_out *in_out);
+#endif /* __DML2_TOP_LEGACY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
deleted file mode 100644
index d0e026d981b5..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
+++ /dev/null
@@ -1,307 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_top_optimization.h"
-#include "dml2_internal_shared_types.h"
-#include "dml_top_mcache.h"
-
-static void copy_display_configuration_with_meta(struct display_configuation_with_meta *dst, const struct display_configuation_with_meta *src)
-{
- memcpy(dst, src, sizeof(struct display_configuation_with_meta));
-}
-
-bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
-
- state->performed = true;
-
- return true;
-}
-
-bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
-
- return state->min_clk_index_for_latency == 0;
-}
-
-bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params)
-{
- bool result = false;
-
- if (params->display_config->stage1.min_clk_index_for_latency > 0) {
- copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
- params->optimized_display_config->stage1.min_clk_index_for_latency--;
- result = true;
- }
-
- return result;
-}
-
-bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
- bool mcache_success = false;
- bool result = false;
-
- memset(l, 0, sizeof(struct dml2_optimization_test_function_locals));
-
- l->test_mcache.calc_mcache_count_params.dml2_instance = params->dml;
- l->test_mcache.calc_mcache_count_params.display_config = &params->display_config->display_config;
- l->test_mcache.calc_mcache_count_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
-
- result = dml2_top_mcache_calc_mcache_count_and_offsets(&l->test_mcache.calc_mcache_count_params); // use core to get the basic mcache_allocations
-
- if (result) {
- l->test_mcache.assign_global_mcache_ids_params.allocations = params->display_config->stage2.mcache_allocations;
- l->test_mcache.assign_global_mcache_ids_params.num_allocations = params->display_config->display_config.num_planes;
-
- dml2_top_mcache_assign_global_mcache_ids(&l->test_mcache.assign_global_mcache_ids_params);
-
- l->test_mcache.validate_admissibility_params.dml2_instance = params->dml;
- l->test_mcache.validate_admissibility_params.display_cfg = &params->display_config->display_config;
- l->test_mcache.validate_admissibility_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
- l->test_mcache.validate_admissibility_params.cfg_support_info = &params->display_config->mode_support_result.cfg_support_info;
-
- mcache_success = dml2_top_mcache_validate_admissability(&l->test_mcache.validate_admissibility_params); // also find the shift to make mcache allocation works
-
- memcpy(params->display_config->stage2.per_plane_mcache_support, l->test_mcache.validate_admissibility_params.per_plane_status, sizeof(bool) * DML2_MAX_PLANES);
- }
-
- return mcache_success;
-}
-
-bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
- bool optimize_success = false;
-
- if (params->last_candidate_supported == false)
- return false;
-
- copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
-
- l->optimize_mcache.optimize_mcache_params.instance = &params->dml->pmo_instance;
- l->optimize_mcache.optimize_mcache_params.dcc_mcache_supported = params->display_config->stage2.per_plane_mcache_support;
- l->optimize_mcache.optimize_mcache_params.display_config = &params->display_config->display_config;
- l->optimize_mcache.optimize_mcache_params.optimized_display_cfg = &params->optimized_display_config->display_config;
- l->optimize_mcache.optimize_mcache_params.cfg_support_info = &params->optimized_display_config->mode_support_result.cfg_support_info;
-
- optimize_success = params->dml->pmo_instance.optimize_dcc_mcache(&l->optimize_mcache.optimize_mcache_params);
-
- return optimize_success;
-}
-
-bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_init_function_locals *l = params->locals;
-
- l->vmin.init_params.instance = &params->dml->pmo_instance;
- l->vmin.init_params.base_display_config = params->display_config;
- return params->dml->pmo_instance.init_for_vmin(&l->vmin.init_params);
-}
-
-bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
-
- l->test_vmin.pmo_test_vmin_params.instance = &params->dml->pmo_instance;
- l->test_vmin.pmo_test_vmin_params.display_config = params->display_config;
- l->test_vmin.pmo_test_vmin_params.vmin_limits = &params->dml->soc_bbox.vmin_limit;
- return params->dml->pmo_instance.test_for_vmin(&l->test_vmin.pmo_test_vmin_params);
-}
-
-bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
-
- if (params->last_candidate_supported == false)
- return false;
-
- l->optimize_vmin.pmo_optimize_vmin_params.instance = &params->dml->pmo_instance;
- l->optimize_vmin.pmo_optimize_vmin_params.base_display_config = params->display_config;
- l->optimize_vmin.pmo_optimize_vmin_params.optimized_display_config = params->optimized_display_config;
- return params->dml->pmo_instance.optimize_for_vmin(&l->optimize_vmin.pmo_optimize_vmin_params);
-}
-
-bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
-{
- bool test_passed = false;
- bool optimize_succeeded = true;
- bool candidate_validation_passed = true;
- struct optimization_init_function_params init_params = { 0 };
- struct optimization_test_function_params test_params = { 0 };
- struct optimization_optimize_function_params optimize_params = { 0 };
-
- if (!params->dml ||
- !params->optimize_function ||
- !params->test_function ||
- !params->display_config ||
- !params->optimized_display_config)
- return false;
-
- copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
-
- init_params.locals = &l->init_function_locals;
- init_params.dml = params->dml;
- init_params.display_config = &l->cur_candidate_display_cfg;
-
- if (params->init_function && !params->init_function(&init_params))
- return false;
-
- test_params.locals = &l->test_function_locals;
- test_params.dml = params->dml;
- test_params.display_config = &l->cur_candidate_display_cfg;
-
- test_passed = params->test_function(&test_params);
-
- while (!test_passed && optimize_succeeded) {
- memset(&optimize_params, 0, sizeof(struct optimization_optimize_function_params));
-
- optimize_params.locals = &l->optimize_function_locals;
- optimize_params.dml = params->dml;
- optimize_params.display_config = &l->cur_candidate_display_cfg;
- optimize_params.optimized_display_config = &l->next_candidate_display_cfg;
- optimize_params.last_candidate_supported = candidate_validation_passed;
-
- optimize_succeeded = params->optimize_function(&optimize_params);
-
- if (optimize_succeeded) {
- l->mode_support_params.instance = &params->dml->core_instance;
- l->mode_support_params.display_cfg = &l->next_candidate_display_cfg;
- l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
-
- if (l->next_candidate_display_cfg.stage3.performed)
- l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage3.min_clk_index_for_latency;
- else
- l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage1.min_clk_index_for_latency;
-
- candidate_validation_passed = params->dml->core_instance.mode_support(&l->mode_support_params);
-
- l->next_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
- }
-
- if (optimize_succeeded && candidate_validation_passed) {
- memset(&test_params, 0, sizeof(struct optimization_test_function_params));
- test_params.locals = &l->test_function_locals;
- test_params.dml = params->dml;
- test_params.display_config = &l->next_candidate_display_cfg;
- test_passed = params->test_function(&test_params);
-
- copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, &l->next_candidate_display_cfg);
-
- // If optimization is not all or nothing, then store partial progress in output
- if (!params->all_or_nothing)
- copy_display_configuration_with_meta(params->optimized_display_config, &l->next_candidate_display_cfg);
- }
- }
-
- if (test_passed)
- copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
-
- return test_passed;
-}
-
-bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
-{
- int highest_state, lowest_state, cur_state;
- bool supported = false;
-
- if (!params->dml ||
- !params->optimize_function ||
- !params->test_function ||
- !params->display_config ||
- !params->optimized_display_config)
- return false;
-
- copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
- highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
- lowest_state = 0;
-
- while (highest_state > lowest_state) {
- cur_state = (highest_state + lowest_state) / 2;
-
- l->mode_support_params.instance = &params->dml->core_instance;
- l->mode_support_params.display_cfg = &l->cur_candidate_display_cfg;
- l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
- l->mode_support_params.min_clk_index = cur_state;
-
- supported = params->dml->core_instance.mode_support(&l->mode_support_params);
-
- if (supported) {
- l->cur_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
- highest_state = cur_state;
- } else {
- lowest_state = cur_state + 1;
- }
- }
- l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency = lowest_state;
-
- copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
-
- return true;
-}
-
-bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_init_function_locals *l = params->locals;
-
- l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.init_params.base_display_config = params->display_config;
-
- return params->dml->pmo_instance.init_for_uclk_pstate(&l->uclk_pstate.init_params);
-}
-
-bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
-
- l->uclk_pstate.test_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.test_params.base_display_config = params->display_config;
-
- return params->dml->pmo_instance.test_for_uclk_pstate(&l->uclk_pstate.test_params);
-}
-
-bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
-
- l->uclk_pstate.optimize_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.optimize_params.base_display_config = params->display_config;
- l->uclk_pstate.optimize_params.optimized_display_config = params->optimized_display_config;
- l->uclk_pstate.optimize_params.last_candidate_failed = !params->last_candidate_supported;
-
- return params->dml->pmo_instance.optimize_for_uclk_pstate(&l->uclk_pstate.optimize_params);
-}
-
-bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_init_function_locals *l = params->locals;
-
- l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.init_params.base_display_config = params->display_config;
-
- return params->dml->pmo_instance.init_for_stutter(&l->stutter.stutter_params);
-}
-
-bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
-
- l->stutter.stutter_params.instance = &params->dml->pmo_instance;
- l->stutter.stutter_params.base_display_config = params->display_config;
- return params->dml->pmo_instance.test_for_stutter(&l->stutter.stutter_params);
-}
-
-bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
-
- l->stutter.stutter_params.instance = &params->dml->pmo_instance;
- l->stutter.stutter_params.base_display_config = params->display_config;
- l->stutter.stutter_params.optimized_display_config = params->optimized_display_config;
- l->stutter.stutter_params.last_candidate_failed = !params->last_candidate_supported;
- return params->dml->pmo_instance.optimize_for_stutter(&l->stutter.stutter_params);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
deleted file mode 100644
index 9f22ab33eab1..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DML2_TOP_OPTIMIZATION_H__
-#define __DML2_TOP_OPTIMIZATION_H__
-
-#include "dml2_external_lib_deps.h"
-#include "dml2_internal_shared_types.h"
-
-bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params);
-bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params);
-
-bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
new file mode 100644
index 000000000000..a8f58f8448e4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
@@ -0,0 +1,1178 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml2_top_soc15.h"
+#include "dml2_mcg_factory.h"
+#include "dml2_dpmm_factory.h"
+#include "dml2_core_factory.h"
+#include "dml2_pmo_factory.h"
+#include "lib_float_math.h"
+#include "dml2_debug.h"
+static void setup_unoptimized_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
+{
+ memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
+ out->stage1.min_clk_index_for_latency = dml->min_clk_table.dram_bw_table.num_entries - 1; //dml->min_clk_table.clean_me_up.soc_bb.num_states - 1;
+}
+
+static void setup_speculative_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
+{
+ memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
+ out->stage1.min_clk_index_for_latency = 0;
+}
+
+static void copy_display_configuration_with_meta(struct display_configuation_with_meta *dst, const struct display_configuation_with_meta *src)
+{
+ memcpy(dst, src, sizeof(struct display_configuation_with_meta));
+}
+
+static bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
+
+ state->performed = true;
+
+ return true;
+}
+
+static bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
+
+ return state->min_clk_index_for_latency == 0;
+}
+
+static bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params)
+{
+ bool result = false;
+
+ if (params->display_config->stage1.min_clk_index_for_latency > 0) {
+ copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
+ params->optimized_display_config->stage1.min_clk_index_for_latency--;
+ result = true;
+ }
+
+ return result;
+}
+
+static bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+ bool mcache_success = false;
+ bool result = false;
+
+ memset(l, 0, sizeof(struct dml2_optimization_test_function_locals));
+
+ l->test_mcache.calc_mcache_count_params.dml2_instance = params->dml;
+ l->test_mcache.calc_mcache_count_params.display_config = &params->display_config->display_config;
+ l->test_mcache.calc_mcache_count_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
+
+ result = dml2_top_mcache_calc_mcache_count_and_offsets(&l->test_mcache.calc_mcache_count_params); // use core to get the basic mcache_allocations
+
+ if (result) {
+ l->test_mcache.assign_global_mcache_ids_params.allocations = params->display_config->stage2.mcache_allocations;
+ l->test_mcache.assign_global_mcache_ids_params.num_allocations = params->display_config->display_config.num_planes;
+
+ dml2_top_mcache_assign_global_mcache_ids(&l->test_mcache.assign_global_mcache_ids_params);
+
+ l->test_mcache.validate_admissibility_params.dml2_instance = params->dml;
+ l->test_mcache.validate_admissibility_params.display_cfg = &params->display_config->display_config;
+ l->test_mcache.validate_admissibility_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
+ l->test_mcache.validate_admissibility_params.cfg_support_info = &params->display_config->mode_support_result.cfg_support_info;
+
+ mcache_success = dml2_top_mcache_validate_admissability(&l->test_mcache.validate_admissibility_params); // also find the shift to make mcache allocation works
+
+ memcpy(params->display_config->stage2.per_plane_mcache_support, l->test_mcache.validate_admissibility_params.per_plane_status, sizeof(bool) * DML2_MAX_PLANES);
+ }
+
+ return mcache_success;
+}
+
+static bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+ bool optimize_success = false;
+
+ if (params->last_candidate_supported == false)
+ return false;
+
+ copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
+
+ l->optimize_mcache.optimize_mcache_params.instance = &params->dml->pmo_instance;
+ l->optimize_mcache.optimize_mcache_params.dcc_mcache_supported = params->display_config->stage2.per_plane_mcache_support;
+ l->optimize_mcache.optimize_mcache_params.display_config = &params->display_config->display_config;
+ l->optimize_mcache.optimize_mcache_params.optimized_display_cfg = &params->optimized_display_config->display_config;
+ l->optimize_mcache.optimize_mcache_params.cfg_support_info = &params->optimized_display_config->mode_support_result.cfg_support_info;
+
+ optimize_success = params->dml->pmo_instance.optimize_dcc_mcache(&l->optimize_mcache.optimize_mcache_params);
+
+ return optimize_success;
+}
+
+static bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_init_function_locals *l = params->locals;
+
+ l->vmin.init_params.instance = &params->dml->pmo_instance;
+ l->vmin.init_params.base_display_config = params->display_config;
+ return params->dml->pmo_instance.init_for_vmin(&l->vmin.init_params);
+}
+
+static bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+
+ l->test_vmin.pmo_test_vmin_params.instance = &params->dml->pmo_instance;
+ l->test_vmin.pmo_test_vmin_params.display_config = params->display_config;
+ l->test_vmin.pmo_test_vmin_params.vmin_limits = &params->dml->soc_bbox.vmin_limit;
+ return params->dml->pmo_instance.test_for_vmin(&l->test_vmin.pmo_test_vmin_params);
+}
+
+static bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+
+ if (params->last_candidate_supported == false)
+ return false;
+
+ l->optimize_vmin.pmo_optimize_vmin_params.instance = &params->dml->pmo_instance;
+ l->optimize_vmin.pmo_optimize_vmin_params.base_display_config = params->display_config;
+ l->optimize_vmin.pmo_optimize_vmin_params.optimized_display_config = params->optimized_display_config;
+ return params->dml->pmo_instance.optimize_for_vmin(&l->optimize_vmin.pmo_optimize_vmin_params);
+}
+
+static bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_init_function_locals *l = params->locals;
+
+ l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.init_params.base_display_config = params->display_config;
+
+ return params->dml->pmo_instance.init_for_uclk_pstate(&l->uclk_pstate.init_params);
+}
+
+static bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+
+ l->uclk_pstate.test_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.test_params.base_display_config = params->display_config;
+
+ return params->dml->pmo_instance.test_for_uclk_pstate(&l->uclk_pstate.test_params);
+}
+
+static bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+
+ l->uclk_pstate.optimize_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.optimize_params.base_display_config = params->display_config;
+ l->uclk_pstate.optimize_params.optimized_display_config = params->optimized_display_config;
+ l->uclk_pstate.optimize_params.last_candidate_failed = !params->last_candidate_supported;
+
+ return params->dml->pmo_instance.optimize_for_uclk_pstate(&l->uclk_pstate.optimize_params);
+}
+
+static bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_init_function_locals *l = params->locals;
+
+ l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.init_params.base_display_config = params->display_config;
+
+ return params->dml->pmo_instance.init_for_stutter(&l->stutter.stutter_params);
+}
+
+static bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+
+ l->stutter.stutter_params.instance = &params->dml->pmo_instance;
+ l->stutter.stutter_params.base_display_config = params->display_config;
+ return params->dml->pmo_instance.test_for_stutter(&l->stutter.stutter_params);
+}
+
+static bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+
+ l->stutter.stutter_params.instance = &params->dml->pmo_instance;
+ l->stutter.stutter_params.base_display_config = params->display_config;
+ l->stutter.stutter_params.optimized_display_config = params->optimized_display_config;
+ l->stutter.stutter_params.last_candidate_failed = !params->last_candidate_supported;
+ return params->dml->pmo_instance.optimize_for_stutter(&l->stutter.stutter_params);
+}
+
+static bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
+{
+ bool test_passed = false;
+ bool optimize_succeeded = true;
+ bool candidate_validation_passed = true;
+ struct optimization_init_function_params init_params = { 0 };
+ struct optimization_test_function_params test_params = { 0 };
+ struct optimization_optimize_function_params optimize_params = { 0 };
+
+ if (!params->dml ||
+ !params->optimize_function ||
+ !params->test_function ||
+ !params->display_config ||
+ !params->optimized_display_config)
+ return false;
+
+ copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
+
+ init_params.locals = &l->init_function_locals;
+ init_params.dml = params->dml;
+ init_params.display_config = &l->cur_candidate_display_cfg;
+
+ if (params->init_function && !params->init_function(&init_params))
+ return false;
+
+ test_params.locals = &l->test_function_locals;
+ test_params.dml = params->dml;
+ test_params.display_config = &l->cur_candidate_display_cfg;
+
+ test_passed = params->test_function(&test_params);
+
+ while (!test_passed && optimize_succeeded) {
+ memset(&optimize_params, 0, sizeof(struct optimization_optimize_function_params));
+
+ optimize_params.locals = &l->optimize_function_locals;
+ optimize_params.dml = params->dml;
+ optimize_params.display_config = &l->cur_candidate_display_cfg;
+ optimize_params.optimized_display_config = &l->next_candidate_display_cfg;
+ optimize_params.last_candidate_supported = candidate_validation_passed;
+
+ optimize_succeeded = params->optimize_function(&optimize_params);
+
+ if (optimize_succeeded) {
+ l->mode_support_params.instance = &params->dml->core_instance;
+ l->mode_support_params.display_cfg = &l->next_candidate_display_cfg;
+ l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
+
+ if (l->next_candidate_display_cfg.stage3.performed)
+ l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage3.min_clk_index_for_latency;
+ else
+ l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage1.min_clk_index_for_latency;
+ candidate_validation_passed = params->dml->core_instance.mode_support(&l->mode_support_params);
+ l->next_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
+ }
+
+ if (optimize_succeeded && candidate_validation_passed) {
+ memset(&test_params, 0, sizeof(struct optimization_test_function_params));
+ test_params.locals = &l->test_function_locals;
+ test_params.dml = params->dml;
+ test_params.display_config = &l->next_candidate_display_cfg;
+ test_passed = params->test_function(&test_params);
+
+ copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, &l->next_candidate_display_cfg);
+
+ // If optimization is not all or nothing, then store partial progress in output
+ if (!params->all_or_nothing)
+ copy_display_configuration_with_meta(params->optimized_display_config, &l->next_candidate_display_cfg);
+ }
+ }
+
+ if (test_passed)
+ copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
+
+ return test_passed;
+}
+
+static bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
+{
+ int highest_state, lowest_state, cur_state;
+ bool supported = false;
+
+ if (!params->dml ||
+ !params->optimize_function ||
+ !params->test_function ||
+ !params->display_config ||
+ !params->optimized_display_config)
+ return false;
+
+ copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
+ highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
+ lowest_state = 0;
+
+ while (highest_state > lowest_state) {
+ cur_state = (highest_state + lowest_state) / 2;
+
+ l->mode_support_params.instance = &params->dml->core_instance;
+ l->mode_support_params.display_cfg = &l->cur_candidate_display_cfg;
+ l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
+ l->mode_support_params.min_clk_index = cur_state;
+ supported = params->dml->core_instance.mode_support(&l->mode_support_params);
+
+ if (supported) {
+ l->cur_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
+ highest_state = cur_state;
+ } else {
+ lowest_state = cur_state + 1;
+ }
+ }
+ l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency = lowest_state;
+
+ copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
+
+ return true;
+}
+
+/*
+* Takes an input set of mcache boundaries and finds the appropriate setting of cache programming.
+* Returns true if a valid set of programming can be made, and false otherwise. "Valid" means
+* that the horizontal viewport does not span more than 2 cache slices.
+*
+* It optionally also can apply a constant shift to all the cache boundaries.
+*/
+static const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
+static const uint32_t SPLIT_LOCATION_UNDEFINED = 0xFFFF;
+
+static bool calculate_first_second_splitting(const int *mcache_boundaries, int num_boundaries, int shift,
+ int pipe_h_vp_start, int pipe_h_vp_end, int *first_offset, int *second_offset)
+{
+ const int MAX_VP = 0xFFFFFF;
+ int left_cache_id;
+ int right_cache_id;
+ int range_start;
+ int range_end;
+ bool success = false;
+
+ if (num_boundaries <= 1) {
+ if (first_offset && second_offset) {
+ *first_offset = 0;
+ *second_offset = -1;
+ }
+ success = true;
+ return success;
+ } else {
+ range_start = 0;
+ for (left_cache_id = 0; left_cache_id < num_boundaries; left_cache_id++) {
+ range_end = mcache_boundaries[left_cache_id] - shift - 1;
+
+ if (range_start <= pipe_h_vp_start && pipe_h_vp_start <= range_end)
+ break;
+
+ range_start = range_end + 1;
+ }
+
+ range_end = MAX_VP;
+ for (right_cache_id = num_boundaries - 1; right_cache_id >= -1; right_cache_id--) {
+ if (right_cache_id >= 0)
+ range_start = mcache_boundaries[right_cache_id] - shift;
+ else
+ range_start = 0;
+
+ if (range_start <= pipe_h_vp_end && pipe_h_vp_end <= range_end) {
+ break;
+ }
+ range_end = range_start - 1;
+ }
+ right_cache_id = (right_cache_id + 1) % num_boundaries;
+
+ if (right_cache_id == left_cache_id) {
+ if (first_offset && second_offset) {
+ *first_offset = left_cache_id;
+ *second_offset = -1;
+ }
+ success = true;
+ } else if (right_cache_id == (left_cache_id + 1) % num_boundaries) {
+ if (first_offset && second_offset) {
+ *first_offset = left_cache_id;
+ *second_offset = right_cache_id;
+ }
+ success = true;
+ }
+ }
+
+ return success;
+}
+
+/*
+* For a given set of pipe start/end x positions, checks to see it can support the input mcache splitting.
+* It also attempts to "optimize" by finding a shift if the default 0 shift does not work.
+*/
+static bool find_shift_for_valid_cache_id_assignment(int *mcache_boundaries, unsigned int num_boundaries,
+ int *pipe_vp_startx, int *pipe_vp_endx, unsigned int pipe_count, int shift_granularity, int *shift)
+{
+ int max_shift = 0xFFFF;
+ unsigned int pipe_index;
+ unsigned int i, slice_width;
+ bool success = false;
+
+ for (i = 0; i < num_boundaries; i++) {
+ if (i == 0)
+ slice_width = mcache_boundaries[i];
+ else
+ slice_width = mcache_boundaries[i] - mcache_boundaries[i - 1];
+
+ if (max_shift > (int)slice_width) {
+ max_shift = slice_width;
+ }
+ }
+
+ for (*shift = 0; *shift <= max_shift; *shift += shift_granularity) {
+ success = true;
+ for (pipe_index = 0; pipe_index < pipe_count; pipe_index++) {
+ if (!calculate_first_second_splitting(mcache_boundaries, num_boundaries, *shift,
+ pipe_vp_startx[pipe_index], pipe_vp_endx[pipe_index], 0, 0)) {
+ success = false;
+ break;
+ }
+ }
+ if (success)
+ break;
+ }
+
+ return success;
+}
+
+/*
+* Counts the number of elements inside input array within the given span length.
+* Formally, what is the size of the largest subset of the array where the largest and smallest element
+* differ no more than the span.
+*/
+static unsigned int count_elements_in_span(int *array, unsigned int array_size, unsigned int span)
+{
+ unsigned int i;
+ unsigned int span_start_value;
+ unsigned int span_start_index;
+ unsigned int greatest_element_count;
+
+ if (array_size == 0)
+ return 1;
+
+ if (span == 0)
+ return array_size > 0 ? 1 : 0;
+
+ span_start_value = 0;
+ span_start_index = 0;
+ greatest_element_count = 0;
+
+ while (span_start_index < array_size) {
+ for (i = span_start_index; i < array_size; i++) {
+ if (array[i] - span_start_value <= span) {
+ if (i - span_start_index + 1 > greatest_element_count) {
+ greatest_element_count = i - span_start_index + 1;
+ }
+ } else
+ break;
+ }
+
+ span_start_index++;
+
+ if (span_start_index < array_size) {
+ span_start_value = array[span_start_index - 1] + 1;
+ }
+ }
+
+ return greatest_element_count;
+}
+
+static bool calculate_h_split_for_scaling_transform(int full_vp_width, int h_active, int num_pipes,
+ enum dml2_scaling_transform scaling_transform, int *pipe_vp_x_start, int *pipe_vp_x_end)
+{
+ int i, slice_width;
+ const char MAX_SCL_VP_OVERLAP = 3;
+ bool success = false;
+
+ switch (scaling_transform) {
+ case dml2_scaling_transform_centered:
+ case dml2_scaling_transform_aspect_ratio:
+ case dml2_scaling_transform_fullscreen:
+ slice_width = full_vp_width / num_pipes;
+ for (i = 0; i < num_pipes; i++) {
+ pipe_vp_x_start[i] = i * slice_width;
+ pipe_vp_x_end[i] = (i + 1) * slice_width - 1;
+
+ if (pipe_vp_x_start[i] < MAX_SCL_VP_OVERLAP)
+ pipe_vp_x_start[i] = 0;
+ else
+ pipe_vp_x_start[i] -= MAX_SCL_VP_OVERLAP;
+
+ if (pipe_vp_x_end[i] > full_vp_width - MAX_SCL_VP_OVERLAP - 1)
+ pipe_vp_x_end[i] = full_vp_width - 1;
+ else
+ pipe_vp_x_end[i] += MAX_SCL_VP_OVERLAP;
+ }
+ break;
+ case dml2_scaling_transform_explicit:
+ default:
+ success = false;
+ break;
+ }
+
+ return success;
+}
+
+bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
+ struct dml2_top_mcache_validate_admissability_locals *l = &dml->scratch.mcache_validate_admissability_locals;
+
+ const int MAX_PIXEL_OVERLAP = 6;
+ int max_per_pipe_vp_p0 = 0;
+ int max_per_pipe_vp_p1 = 0;
+ int temp, p0shift, p1shift;
+ unsigned int plane_index = 0;
+ unsigned int i;
+ unsigned int odm_combine_factor;
+ unsigned int mpc_combine_factor;
+ unsigned int num_dpps;
+ unsigned int num_boundaries;
+ enum dml2_scaling_transform scaling_transform;
+ const struct dml2_plane_parameters *plane;
+ const struct dml2_stream_parameters *stream;
+
+ bool p0pass = false;
+ bool p1pass = false;
+ bool all_pass = true;
+
+ for (plane_index = 0; plane_index < params->display_cfg->num_planes; plane_index++) {
+ if (!params->display_cfg->plane_descriptors[plane_index].surface.dcc.enable)
+ continue;
+
+ plane = &params->display_cfg->plane_descriptors[plane_index];
+ stream = &params->display_cfg->stream_descriptors[plane->stream_index];
+
+ num_dpps = odm_combine_factor = params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
+
+ if (odm_combine_factor == 1)
+ num_dpps = mpc_combine_factor = (unsigned int)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
+ else
+ mpc_combine_factor = 1;
+
+ if (odm_combine_factor > 1) {
+ max_per_pipe_vp_p0 = plane->surface.plane0.width;
+ temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane0.h_ratio * stream->timing.h_active / odm_combine_factor);
+
+ if (temp < max_per_pipe_vp_p0)
+ max_per_pipe_vp_p0 = temp;
+
+ max_per_pipe_vp_p1 = plane->surface.plane1.width;
+ temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane1.h_ratio * stream->timing.h_active / odm_combine_factor);
+
+ if (temp < max_per_pipe_vp_p1)
+ max_per_pipe_vp_p1 = temp;
+ } else {
+ max_per_pipe_vp_p0 = plane->surface.plane0.width / mpc_combine_factor;
+ max_per_pipe_vp_p1 = plane->surface.plane1.width / mpc_combine_factor;
+ }
+
+ max_per_pipe_vp_p0 += 2 * MAX_PIXEL_OVERLAP;
+ max_per_pipe_vp_p1 += MAX_PIXEL_OVERLAP;
+
+ p0shift = 0;
+ p1shift = 0;
+
+ // The last element in the unshifted boundary array will always be the first pixel outside the
+ // plane, which means theres no mcache associated with it, so -1
+ num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1;
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
+ num_boundaries, max_per_pipe_vp_p0) <= 1) && (num_boundaries <= num_dpps)) {
+ p0pass = true;
+ }
+ num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1;
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
+ num_boundaries, max_per_pipe_vp_p1) <= 1) && (num_boundaries <= num_dpps)) {
+ p1pass = true;
+ }
+
+ if (!p0pass || !p1pass) {
+ if (odm_combine_factor > 1) {
+ num_dpps = odm_combine_factor;
+ scaling_transform = plane->composition.scaling_transform;
+ } else {
+ num_dpps = mpc_combine_factor;
+ scaling_transform = dml2_scaling_transform_fullscreen;
+ }
+
+ if (!p0pass) {
+ if (plane->composition.viewport.stationary) {
+ calculate_h_split_for_scaling_transform(plane->surface.plane0.width,
+ stream->timing.h_active, num_dpps, scaling_transform,
+ &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
+ p0pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
+ params->mcache_allocations[plane_index].num_mcaches_plane0,
+ &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index], num_dpps,
+ params->mcache_allocations[plane_index].shift_granularity.p0, &p0shift);
+ }
+ }
+ if (!p1pass) {
+ if (plane->composition.viewport.stationary) {
+ calculate_h_split_for_scaling_transform(plane->surface.plane1.width,
+ stream->timing.h_active, num_dpps, scaling_transform,
+ &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
+ p1pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
+ params->mcache_allocations[plane_index].num_mcaches_plane1,
+ &l->plane1.pipe_vp_startx[plane_index], &l->plane1.pipe_vp_endx[plane_index], num_dpps,
+ params->mcache_allocations[plane_index].shift_granularity.p1, &p1shift);
+ }
+ }
+ }
+
+ if (p0pass && p1pass) {
+ for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane0; i++) {
+ params->mcache_allocations[plane_index].mcache_x_offsets_plane0[i] -= p0shift;
+ }
+ for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane1; i++) {
+ params->mcache_allocations[plane_index].mcache_x_offsets_plane1[i] -= p1shift;
+ }
+ }
+
+ params->per_plane_status[plane_index] = p0pass && p1pass;
+ all_pass &= p0pass && p1pass;
+ }
+
+ return all_pass;
+}
+
+static void reset_mcache_allocations(struct dml2_hubp_pipe_mcache_regs *per_plane_pipe_mcache_regs)
+{
+ // Initialize all entries to special valid MCache ID and special valid split coordinate
+ per_plane_pipe_mcache_regs->main.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p0.split_location = SPLIT_LOCATION_UNDEFINED;
+
+ per_plane_pipe_mcache_regs->mall.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p0.split_location = SPLIT_LOCATION_UNDEFINED;
+
+ per_plane_pipe_mcache_regs->main.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p1.split_location = SPLIT_LOCATION_UNDEFINED;
+
+ per_plane_pipe_mcache_regs->mall.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p1.split_location = SPLIT_LOCATION_UNDEFINED;
+}
+
+void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params)
+{
+ int i;
+ unsigned int j;
+ int next_unused_cache_id = 0;
+
+ for (i = 0; i < params->num_allocations; i++) {
+ if (!params->allocations[i].valid)
+ continue;
+
+ for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
+ params->allocations[i].global_mcache_ids_plane0[j] = next_unused_cache_id++;
+ }
+ for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
+ params->allocations[i].global_mcache_ids_plane1[j] = next_unused_cache_id++;
+ }
+
+ // The "psuedo-last" slice is always wrapped around
+ params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0] =
+ params->allocations[i].global_mcache_ids_plane0[0];
+ params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1] =
+ params->allocations[i].global_mcache_ids_plane1[0];
+
+ // If we need dedicated caches for mall requesting, then we assign them here.
+ if (params->allocations[i].requires_dedicated_mall_mcache) {
+ for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
+ params->allocations[i].global_mcache_ids_mall_plane0[j] = next_unused_cache_id++;
+ }
+ for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
+ params->allocations[i].global_mcache_ids_mall_plane1[j] = next_unused_cache_id++;
+ }
+
+ // The "psuedo-last" slice is always wrapped around
+ params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0] =
+ params->allocations[i].global_mcache_ids_mall_plane0[0];
+ params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1] =
+ params->allocations[i].global_mcache_ids_mall_plane1[0];
+ }
+
+ // If P0 and P1 are sharing caches, then it means the largest mcache IDs for p0 and p1 can be the same
+ // since mcache IDs are always ascending, then it means the largest mcacheID of p1 should be the
+ // largest mcacheID of P0
+ if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
+ params->allocations[i].last_slice_sharing.plane0_plane1) {
+ params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
+ params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
+ }
+
+ // If we need dedicated caches handle last slice sharing
+ if (params->allocations[i].requires_dedicated_mall_mcache) {
+ if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
+ params->allocations[i].last_slice_sharing.plane0_plane1) {
+ params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
+ params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1];
+ }
+ // If mall_comb_mcache_l is set then it means that largest mcache ID for MALL p0 can be same as regular read p0
+ if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p0) {
+ params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1] =
+ params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
+ }
+ // If mall_comb_mcache_c is set then it means that largest mcache ID for MALL p1 can be same as regular
+ // read p1 (which can be same as regular read p0 if plane0_plane1 is also set)
+ if (params->allocations[i].num_mcaches_plane1 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p1) {
+ params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
+ params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1];
+ }
+ }
+
+ // If you don't need dedicated mall mcaches, the mall mcache assignments are identical to the normal requesting
+ if (!params->allocations[i].requires_dedicated_mall_mcache) {
+ memcpy(params->allocations[i].global_mcache_ids_mall_plane0, params->allocations[i].global_mcache_ids_plane0,
+ sizeof(params->allocations[i].global_mcache_ids_mall_plane0));
+ memcpy(params->allocations[i].global_mcache_ids_mall_plane1, params->allocations[i].global_mcache_ids_plane1,
+ sizeof(params->allocations[i].global_mcache_ids_mall_plane1));
+ }
+ }
+}
+
+bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
+ struct dml2_top_mcache_verify_mcache_size_locals *l = &dml->scratch.mcache_verify_mcache_size_locals;
+
+ unsigned int total_mcaches_required;
+ unsigned int i;
+ bool result = false;
+
+ if (dml->soc_bbox.num_dcc_mcaches == 0) {
+ return true;
+ }
+
+ total_mcaches_required = 0;
+ l->calc_mcache_params.instance = &dml->core_instance;
+ for (i = 0; i < params->display_config->num_planes; i++) {
+ if (!params->display_config->plane_descriptors[i].surface.dcc.enable) {
+ memset(&params->mcache_allocations[i], 0, sizeof(struct dml2_mcache_surface_allocation));
+ continue;
+ }
+
+ l->calc_mcache_params.plane_descriptor = &params->display_config->plane_descriptors[i];
+ l->calc_mcache_params.mcache_allocation = &params->mcache_allocations[i];
+ l->calc_mcache_params.plane_index = i;
+
+ if (!dml->core_instance.calculate_mcache_allocation(&l->calc_mcache_params)) {
+ result = false;
+ break;
+ }
+
+ if (params->mcache_allocations[i].valid) {
+ total_mcaches_required += params->mcache_allocations[i].num_mcaches_plane0 + params->mcache_allocations[i].num_mcaches_plane1;
+ if (params->mcache_allocations[i].last_slice_sharing.plane0_plane1)
+ total_mcaches_required--;
+ }
+ }
+ dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
+
+ if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
+ result = false;
+ } else {
+ result = true;
+ }
+
+ return result;
+}
+
+static bool dml2_top_soc15_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
+ struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals;
+ struct dml2_display_cfg_programming *dpmm_programming = &dml->dpmm_instance.dpmm_scratch.programming;
+
+ bool result = false;
+ bool mcache_success = false;
+ memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
+
+ setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
+
+ l->mode_support_params.instance = &dml->core_instance;
+ l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_support_params.min_clk_table = &dml->min_clk_table;
+ l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
+ result = dml->core_instance.mode_support(&l->mode_support_params);
+ l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
+
+ if (result) {
+ struct optimization_phase_params mcache_phase = {
+ .dml = dml,
+ .display_config = &l->base_display_config_with_meta,
+ .test_function = dml2_top_optimization_test_function_mcache,
+ .optimize_function = dml2_top_optimization_optimize_function_mcache,
+ .optimized_display_config = &l->optimized_display_config_with_meta,
+ .all_or_nothing = false,
+ };
+ mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &mcache_phase);
+ }
+
+ /*
+ * Call DPMM to map all requirements to minimum clock state
+ */
+ if (result) {
+ l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
+ l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_mode_params.programming = dpmm_programming;
+ l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
+ l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
+ result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
+ }
+
+ in_out->is_supported = mcache_success;
+ result = result && in_out->is_supported;
+
+ return result;
+}
+
+static bool dml2_top_soc15_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
+ struct dml2_build_mode_programming_locals *l = &dml->scratch.build_mode_programming_locals;
+
+ bool result = false;
+ bool mcache_success = false;
+ bool uclk_pstate_success = false;
+ bool vmin_success = false;
+ bool stutter_success = false;
+ unsigned int i;
+
+ memset(l, 0, sizeof(struct dml2_build_mode_programming_locals));
+ memset(in_out->programming, 0, sizeof(struct dml2_display_cfg_programming));
+
+ memcpy(&in_out->programming->display_config, in_out->display_config, sizeof(struct dml2_display_cfg));
+
+ setup_speculative_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
+
+ l->mode_support_params.instance = &dml->core_instance;
+ l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_support_params.min_clk_table = &dml->min_clk_table;
+ l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
+ result = dml->core_instance.mode_support(&l->mode_support_params);
+
+ l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
+
+ if (!result) {
+ setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
+
+ l->mode_support_params.instance = &dml->core_instance;
+ l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_support_params.min_clk_table = &dml->min_clk_table;
+ l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
+ result = dml->core_instance.mode_support(&l->mode_support_params);
+ l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
+
+ if (!result) {
+ l->informative_params.instance = &dml->core_instance;
+ l->informative_params.programming = in_out->programming;
+ l->informative_params.mode_is_supported = false;
+ dml->core_instance.populate_informative(&l->informative_params);
+
+ return false;
+ }
+
+ /*
+ * Phase 1: Determine minimum clocks to satisfy latency requirements for this mode
+ */
+ memset(&l->min_clock_for_latency_phase, 0, sizeof(struct optimization_phase_params));
+ l->min_clock_for_latency_phase.dml = dml;
+ l->min_clock_for_latency_phase.display_config = &l->base_display_config_with_meta;
+ l->min_clock_for_latency_phase.init_function = dml2_top_optimization_init_function_min_clk_for_latency;
+ l->min_clock_for_latency_phase.test_function = dml2_top_optimization_test_function_min_clk_for_latency;
+ l->min_clock_for_latency_phase.optimize_function = dml2_top_optimization_optimize_function_min_clk_for_latency;
+ l->min_clock_for_latency_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->min_clock_for_latency_phase.all_or_nothing = false;
+
+ dml2_top_optimization_perform_optimization_phase_1(&l->optimization_phase_locals, &l->min_clock_for_latency_phase);
+
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ }
+
+ /*
+ * Phase 2: Satisfy DCC mcache requirements
+ */
+ memset(&l->mcache_phase, 0, sizeof(struct optimization_phase_params));
+ l->mcache_phase.dml = dml;
+ l->mcache_phase.display_config = &l->base_display_config_with_meta;
+ l->mcache_phase.test_function = dml2_top_optimization_test_function_mcache;
+ l->mcache_phase.optimize_function = dml2_top_optimization_optimize_function_mcache;
+ l->mcache_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->mcache_phase.all_or_nothing = true;
+
+ mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->mcache_phase);
+
+ if (!mcache_success) {
+ l->informative_params.instance = &dml->core_instance;
+ l->informative_params.programming = in_out->programming;
+ l->informative_params.mode_is_supported = false;
+
+ dml->core_instance.populate_informative(&l->informative_params);
+
+ in_out->programming->informative.failed_mcache_validation = true;
+ return false;
+ }
+
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+
+ /*
+ * Phase 3: Optimize for Pstate
+ */
+ memset(&l->uclk_pstate_phase, 0, sizeof(struct optimization_phase_params));
+ l->uclk_pstate_phase.dml = dml;
+ l->uclk_pstate_phase.display_config = &l->base_display_config_with_meta;
+ l->uclk_pstate_phase.init_function = dml2_top_optimization_init_function_uclk_pstate;
+ l->uclk_pstate_phase.test_function = dml2_top_optimization_test_function_uclk_pstate;
+ l->uclk_pstate_phase.optimize_function = dml2_top_optimization_optimize_function_uclk_pstate;
+ l->uclk_pstate_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->uclk_pstate_phase.all_or_nothing = true;
+
+ uclk_pstate_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->uclk_pstate_phase);
+
+ if (uclk_pstate_success) {
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ l->base_display_config_with_meta.stage3.success = true;
+ }
+
+ /*
+ * Phase 4: Optimize for Vmin
+ */
+ memset(&l->vmin_phase, 0, sizeof(struct optimization_phase_params));
+ l->vmin_phase.dml = dml;
+ l->vmin_phase.display_config = &l->base_display_config_with_meta;
+ l->vmin_phase.init_function = dml2_top_optimization_init_function_vmin;
+ l->vmin_phase.test_function = dml2_top_optimization_test_function_vmin;
+ l->vmin_phase.optimize_function = dml2_top_optimization_optimize_function_vmin;
+ l->vmin_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->vmin_phase.all_or_nothing = false;
+
+ vmin_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->vmin_phase);
+
+ if (l->optimized_display_config_with_meta.stage4.performed) {
+ /*
+ * when performed is true, optimization has applied to
+ * optimized_display_config_with_meta and it has passed mode
+ * support. However it may or may not pass the test function to
+ * reach actual Vmin. As long as voltage is optimized even if it
+ * doesn't reach Vmin level, there is still power benefit so in
+ * this case we will still copy this optimization into base
+ * display config.
+ */
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ l->base_display_config_with_meta.stage4.success = vmin_success;
+ }
+
+ /*
+ * Phase 5: Optimize for Stutter
+ */
+ memset(&l->stutter_phase, 0, sizeof(struct optimization_phase_params));
+ l->stutter_phase.dml = dml;
+ l->stutter_phase.display_config = &l->base_display_config_with_meta;
+ l->stutter_phase.init_function = dml2_top_optimization_init_function_stutter;
+ l->stutter_phase.test_function = dml2_top_optimization_test_function_stutter;
+ l->stutter_phase.optimize_function = dml2_top_optimization_optimize_function_stutter;
+ l->stutter_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->stutter_phase.all_or_nothing = true;
+
+ stutter_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->stutter_phase);
+
+ if (stutter_success) {
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ l->base_display_config_with_meta.stage5.success = true;
+ }
+
+ /*
+ * Populate mcache programming
+ */
+ for (i = 0; i < in_out->display_config->num_planes; i++) {
+ in_out->programming->plane_programming[i].mcache_allocation = l->base_display_config_with_meta.stage2.mcache_allocations[i];
+ }
+
+ /*
+ * Call DPMM to map all requirements to minimum clock state
+ */
+ if (result) {
+ l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
+ l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_mode_params.programming = in_out->programming;
+ l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
+ l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
+ result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
+ if (!result)
+ in_out->programming->informative.failed_dpmm = true;
+ }
+
+ if (result) {
+ l->mode_programming_params.instance = &dml->core_instance;
+ l->mode_programming_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_programming_params.cfg_support_info = &l->base_display_config_with_meta.mode_support_result.cfg_support_info;
+ l->mode_programming_params.programming = in_out->programming;
+ result = dml->core_instance.mode_programming(&l->mode_programming_params);
+ if (!result)
+ in_out->programming->informative.failed_mode_programming = true;
+ }
+
+ if (result) {
+ l->dppm_map_watermarks_params.core = &dml->core_instance;
+ l->dppm_map_watermarks_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_watermarks_params.programming = in_out->programming;
+ result = dml->dpmm_instance.map_watermarks(&l->dppm_map_watermarks_params);
+ }
+
+ l->informative_params.instance = &dml->core_instance;
+ l->informative_params.programming = in_out->programming;
+ l->informative_params.mode_is_supported = result;
+
+ dml->core_instance.populate_informative(&l->informative_params);
+
+ return result;
+}
+
+bool dml2_top_soc15_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params)
+{
+ bool success = true;
+ int config_index, pipe_index;
+ int first_offset, second_offset;
+ int free_per_plane_reg_index = 0;
+
+ memset(params->per_plane_pipe_mcache_regs, 0, DML2_MAX_PLANES * DML2_MAX_DCN_PIPES * sizeof(struct dml2_hubp_pipe_mcache_regs *));
+
+ for (config_index = 0; config_index < params->num_configurations; config_index++) {
+ for (pipe_index = 0; pipe_index < params->mcache_configurations[config_index].num_pipes; pipe_index++) {
+ // Allocate storage for the mcache regs
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index] = &params->mcache_regs_set[free_per_plane_reg_index++];
+
+ reset_mcache_allocations(params->per_plane_pipe_mcache_regs[config_index][pipe_index]);
+
+ if (params->mcache_configurations[config_index].plane_descriptor->surface.dcc.enable) {
+ // P0 always enabled
+ if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0,
+ params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane0,
+ 0,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start +
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_width - 1,
+ &first_offset, &second_offset)) {
+ success = false;
+ break;
+ }
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[first_offset];
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[first_offset];
+
+ if (second_offset >= 0) {
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
+ }
+
+ // Populate P1 if enabled
+ if (params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1_enabled) {
+ if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1,
+ params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane1,
+ 0,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start +
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_width - 1,
+ &first_offset, &second_offset)) {
+ success = false;
+ break;
+ }
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[first_offset];
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[first_offset];
+
+ if (second_offset >= 0) {
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
+ }
+ }
+ }
+ }
+ }
+
+ return success;
+}
+
+static const struct dml2_top_funcs soc15_funcs = {
+ .check_mode_supported = dml2_top_soc15_check_mode_supported,
+ .build_mode_programming = dml2_top_soc15_build_mode_programming,
+ .build_mcache_programming = dml2_top_soc15_build_mcache_programming,
+};
+
+bool dml2_top_soc15_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
+ struct dml2_initialize_instance_locals *l = &dml->scratch.initialize_instance_locals;
+ struct dml2_core_initialize_in_out core_init_params = { 0 };
+ struct dml2_mcg_build_min_clock_table_params_in_out mcg_build_min_clk_params = { 0 };
+ struct dml2_pmo_initialize_in_out pmo_init_params = { 0 };
+ bool result = false;
+
+ memset(l, 0, sizeof(struct dml2_initialize_instance_locals));
+ memset(dml, 0, sizeof(struct dml2_instance));
+
+ memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
+ memcpy(&dml->soc_bbox, &in_out->soc_bb, sizeof(struct dml2_soc_bb));
+
+ dml->project_id = in_out->options.project_id;
+ dml->pmo_options = in_out->options.pmo_options;
+
+ // Initialize All Components
+ result = dml2_mcg_create(in_out->options.project_id, &dml->mcg_instance);
+
+ if (result)
+ result = dml2_dpmm_create(in_out->options.project_id, &dml->dpmm_instance);
+
+ if (result)
+ result = dml2_core_create(in_out->options.project_id, &dml->core_instance);
+
+ if (result) {
+ mcg_build_min_clk_params.soc_bb = &in_out->soc_bb;
+ mcg_build_min_clk_params.min_clk_table = &dml->min_clk_table;
+ result = dml->mcg_instance.build_min_clock_table(&mcg_build_min_clk_params);
+ }
+
+ if (result) {
+ core_init_params.project_id = in_out->options.project_id;
+ core_init_params.instance = &dml->core_instance;
+ core_init_params.minimum_clock_table = &dml->min_clk_table;
+ core_init_params.explicit_ip_bb = in_out->overrides.explicit_ip_bb;
+ core_init_params.explicit_ip_bb_size = in_out->overrides.explicit_ip_bb_size;
+ core_init_params.ip_caps = &in_out->ip_caps;
+ core_init_params.soc_bb = &in_out->soc_bb;
+ result = dml->core_instance.initialize(&core_init_params);
+
+ if (core_init_params.explicit_ip_bb && core_init_params.explicit_ip_bb_size > 0) {
+ memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
+ }
+ }
+
+ if (result)
+ result = dml2_pmo_create(in_out->options.project_id, &dml->pmo_instance);
+
+ if (result) {
+ pmo_init_params.instance = &dml->pmo_instance;
+ pmo_init_params.soc_bb = &dml->soc_bbox;
+ pmo_init_params.ip_caps = &dml->ip_caps;
+ pmo_init_params.mcg_clock_table_size = dml->min_clk_table.dram_bw_table.num_entries;
+ pmo_init_params.options = &dml->pmo_options;
+ dml->pmo_instance.initialize(&pmo_init_params);
+ }
+ dml->funcs = soc15_funcs;
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
index 7b1f6f7143d0..53bd8602f9ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
@@ -2,22 +2,13 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#ifndef __DML_TOP_MCACHE_H__
-#define __DML_TOP_MCACHE_H__
-
-#include "dml2_external_lib_deps.h"
-#include "dml_top_display_cfg_types.h"
-#include "dml_top_types.h"
+#ifndef __DML2_TOP_SOC15_H__
+#define __DML2_TOP_SOC15_H__
#include "dml2_internal_shared_types.h"
+bool dml2_top_soc15_initialize_instance(struct dml2_initialize_instance_in_out *in_out);
bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params);
-
void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params);
-
bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params);
-
-bool dml2_top_mcache_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params);
-
-bool dml2_top_mcache_unit_test(void);
-
-#endif
+bool dml2_top_soc15_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params);
+#endif /* __DML2_TOP_SOC15_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
deleted file mode 100644
index a342ebfbe4e7..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
+++ /dev/null
@@ -1,549 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_debug.h"
-
-#include "dml_top_mcache.h"
-#include "lib_float_math.h"
-
-#include "dml2_internal_shared_types.h"
-
-/*
-* Takes an input set of mcache boundaries and finds the appropriate setting of cache programming.
-* Returns true if a valid set of programming can be made, and false otherwise. "Valid" means
-* that the horizontal viewport does not span more than 2 cache slices.
-*
-* It optionally also can apply a constant shift to all the cache boundaries.
-*/
-static const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
-static const uint32_t SPLIT_LOCATION_UNDEFINED = 0xFFFF;
-
-static bool calculate_first_second_splitting(const int *mcache_boundaries, int num_boundaries, int shift,
- int pipe_h_vp_start, int pipe_h_vp_end, int *first_offset, int *second_offset)
-{
- const int MAX_VP = 0xFFFFFF;
- int left_cache_id;
- int right_cache_id;
- int range_start;
- int range_end;
- bool success = false;
-
- if (num_boundaries <= 1) {
- if (first_offset && second_offset) {
- *first_offset = 0;
- *second_offset = -1;
- }
- success = true;
- return success;
- } else {
- range_start = 0;
- for (left_cache_id = 0; left_cache_id < num_boundaries; left_cache_id++) {
- range_end = mcache_boundaries[left_cache_id] - shift - 1;
-
- if (range_start <= pipe_h_vp_start && pipe_h_vp_start <= range_end)
- break;
-
- range_start = range_end + 1;
- }
-
- range_end = MAX_VP;
- for (right_cache_id = num_boundaries - 1; right_cache_id >= -1; right_cache_id--) {
- if (right_cache_id >= 0)
- range_start = mcache_boundaries[right_cache_id] - shift;
- else
- range_start = 0;
-
- if (range_start <= pipe_h_vp_end && pipe_h_vp_end <= range_end) {
- break;
- }
- range_end = range_start - 1;
- }
- right_cache_id = (right_cache_id + 1) % num_boundaries;
-
- if (right_cache_id == left_cache_id) {
- if (first_offset && second_offset) {
- *first_offset = left_cache_id;
- *second_offset = -1;
- }
- success = true;
- } else if (right_cache_id == (left_cache_id + 1) % num_boundaries) {
- if (first_offset && second_offset) {
- *first_offset = left_cache_id;
- *second_offset = right_cache_id;
- }
- success = true;
- }
- }
-
- return success;
-}
-
-/*
-* For a given set of pipe start/end x positions, checks to see it can support the input mcache splitting.
-* It also attempts to "optimize" by finding a shift if the default 0 shift does not work.
-*/
-static bool find_shift_for_valid_cache_id_assignment(int *mcache_boundaries, unsigned int num_boundaries,
- int *pipe_vp_startx, int *pipe_vp_endx, unsigned int pipe_count, int shift_granularity, int *shift)
-{
- int max_shift = 0xFFFF;
- unsigned int pipe_index;
- unsigned int i, slice_width;
- bool success = false;
-
- for (i = 0; i < num_boundaries; i++) {
- if (i == 0)
- slice_width = mcache_boundaries[i];
- else
- slice_width = mcache_boundaries[i] - mcache_boundaries[i - 1];
-
- if (max_shift > (int)slice_width) {
- max_shift = slice_width;
- }
- }
-
- for (*shift = 0; *shift <= max_shift; *shift += shift_granularity) {
- success = true;
- for (pipe_index = 0; pipe_index < pipe_count; pipe_index++) {
- if (!calculate_first_second_splitting(mcache_boundaries, num_boundaries, *shift,
- pipe_vp_startx[pipe_index], pipe_vp_endx[pipe_index], 0, 0)) {
- success = false;
- break;
- }
- }
- if (success)
- break;
- }
-
- return success;
-}
-
-/*
-* Counts the number of elements inside input array within the given span length.
-* Formally, what is the size of the largest subset of the array where the largest and smallest element
-* differ no more than the span.
-*/
-static unsigned int count_elements_in_span(int *array, unsigned int array_size, unsigned int span)
-{
- unsigned int i;
- unsigned int span_start_value;
- unsigned int span_start_index;
- unsigned int greatest_element_count;
-
- if (array_size == 0)
- return 1;
-
- if (span == 0)
- return array_size > 0 ? 1 : 0;
-
- span_start_value = 0;
- span_start_index = 0;
- greatest_element_count = 0;
-
- while (span_start_index < array_size) {
- for (i = span_start_index; i < array_size; i++) {
- if (array[i] - span_start_value <= span) {
- if (i - span_start_index + 1 > greatest_element_count) {
- greatest_element_count = i - span_start_index + 1;
- }
- } else
- break;
- }
-
- span_start_index++;
-
- if (span_start_index < array_size) {
- span_start_value = array[span_start_index - 1] + 1;
- }
- }
-
- return greatest_element_count;
-}
-
-static bool calculate_h_split_for_scaling_transform(int full_vp_width, int h_active, int num_pipes,
- enum dml2_scaling_transform scaling_transform, int *pipe_vp_x_start, int *pipe_vp_x_end)
-{
- int i, slice_width;
- const char MAX_SCL_VP_OVERLAP = 3;
- bool success = false;
-
- switch (scaling_transform) {
- case dml2_scaling_transform_centered:
- case dml2_scaling_transform_aspect_ratio:
- case dml2_scaling_transform_fullscreen:
- slice_width = full_vp_width / num_pipes;
- for (i = 0; i < num_pipes; i++) {
- pipe_vp_x_start[i] = i * slice_width;
- pipe_vp_x_end[i] = (i + 1) * slice_width - 1;
-
- if (pipe_vp_x_start[i] < MAX_SCL_VP_OVERLAP)
- pipe_vp_x_start[i] = 0;
- else
- pipe_vp_x_start[i] -= MAX_SCL_VP_OVERLAP;
-
- if (pipe_vp_x_end[i] > full_vp_width - MAX_SCL_VP_OVERLAP - 1)
- pipe_vp_x_end[i] = full_vp_width - 1;
- else
- pipe_vp_x_end[i] += MAX_SCL_VP_OVERLAP;
- }
- break;
- case dml2_scaling_transform_explicit:
- default:
- success = false;
- break;
- }
-
- return success;
-}
-
-bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params)
-{
- struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
- struct dml2_top_mcache_validate_admissability_locals *l = &dml->scratch.mcache_validate_admissability_locals;
-
- const int MAX_PIXEL_OVERLAP = 6;
- int max_per_pipe_vp_p0 = 0;
- int max_per_pipe_vp_p1 = 0;
- int temp, p0shift, p1shift;
- unsigned int plane_index = 0;
- unsigned int i;
- unsigned int odm_combine_factor;
- unsigned int mpc_combine_factor;
- unsigned int num_dpps;
- unsigned int num_boundaries;
- enum dml2_scaling_transform scaling_transform;
- const struct dml2_plane_parameters *plane;
- const struct dml2_stream_parameters *stream;
-
- bool p0pass = false;
- bool p1pass = false;
- bool all_pass = true;
-
- for (plane_index = 0; plane_index < params->display_cfg->num_planes; plane_index++) {
- if (!params->display_cfg->plane_descriptors[plane_index].surface.dcc.enable)
- continue;
-
- plane = &params->display_cfg->plane_descriptors[plane_index];
- stream = &params->display_cfg->stream_descriptors[plane->stream_index];
-
- num_dpps = odm_combine_factor = params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
-
- if (odm_combine_factor == 1)
- num_dpps = mpc_combine_factor = (unsigned int)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
- else
- mpc_combine_factor = 1;
-
- if (odm_combine_factor > 1) {
- max_per_pipe_vp_p0 = plane->surface.plane0.width;
- temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane0.h_ratio * stream->timing.h_active / odm_combine_factor);
-
- if (temp < max_per_pipe_vp_p0)
- max_per_pipe_vp_p0 = temp;
-
- max_per_pipe_vp_p1 = plane->surface.plane1.width;
- temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane1.h_ratio * stream->timing.h_active / odm_combine_factor);
-
- if (temp < max_per_pipe_vp_p1)
- max_per_pipe_vp_p1 = temp;
- } else {
- max_per_pipe_vp_p0 = plane->surface.plane0.width / mpc_combine_factor;
- max_per_pipe_vp_p1 = plane->surface.plane1.width / mpc_combine_factor;
- }
-
- max_per_pipe_vp_p0 += 2 * MAX_PIXEL_OVERLAP;
- max_per_pipe_vp_p1 += MAX_PIXEL_OVERLAP;
-
- p0shift = 0;
- p1shift = 0;
-
- // The last element in the unshifted boundary array will always be the first pixel outside the
- // plane, which means theres no mcache associated with it, so -1
- num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1;
- if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
- num_boundaries, max_per_pipe_vp_p0) <= 1) && (num_boundaries <= num_dpps)) {
- p0pass = true;
- }
- num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1;
- if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
- num_boundaries, max_per_pipe_vp_p1) <= 1) && (num_boundaries <= num_dpps)) {
- p1pass = true;
- }
-
- if (!p0pass || !p1pass) {
- if (odm_combine_factor > 1) {
- num_dpps = odm_combine_factor;
- scaling_transform = plane->composition.scaling_transform;
- } else {
- num_dpps = mpc_combine_factor;
- scaling_transform = dml2_scaling_transform_fullscreen;
- }
-
- if (!p0pass) {
- if (plane->composition.viewport.stationary) {
- calculate_h_split_for_scaling_transform(plane->surface.plane0.width,
- stream->timing.h_active, num_dpps, scaling_transform,
- &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
- p0pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
- params->mcache_allocations[plane_index].num_mcaches_plane0,
- &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index], num_dpps,
- params->mcache_allocations[plane_index].shift_granularity.p0, &p0shift);
- }
- }
- if (!p1pass) {
- if (plane->composition.viewport.stationary) {
- calculate_h_split_for_scaling_transform(plane->surface.plane1.width,
- stream->timing.h_active, num_dpps, scaling_transform,
- &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
- p1pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
- params->mcache_allocations[plane_index].num_mcaches_plane1,
- &l->plane1.pipe_vp_startx[plane_index], &l->plane1.pipe_vp_endx[plane_index], num_dpps,
- params->mcache_allocations[plane_index].shift_granularity.p1, &p1shift);
- }
- }
- }
-
- if (p0pass && p1pass) {
- for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane0; i++) {
- params->mcache_allocations[plane_index].mcache_x_offsets_plane0[i] -= p0shift;
- }
- for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane1; i++) {
- params->mcache_allocations[plane_index].mcache_x_offsets_plane1[i] -= p1shift;
- }
- }
-
- params->per_plane_status[plane_index] = p0pass && p1pass;
- all_pass &= p0pass && p1pass;
- }
-
- return all_pass;
-}
-
-static void reset_mcache_allocations(struct dml2_hubp_pipe_mcache_regs *per_plane_pipe_mcache_regs)
-{
- // Initialize all entries to special valid MCache ID and special valid split coordinate
- per_plane_pipe_mcache_regs->main.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p0.split_location = SPLIT_LOCATION_UNDEFINED;
-
- per_plane_pipe_mcache_regs->mall.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p0.split_location = SPLIT_LOCATION_UNDEFINED;
-
- per_plane_pipe_mcache_regs->main.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p1.split_location = SPLIT_LOCATION_UNDEFINED;
-
- per_plane_pipe_mcache_regs->mall.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p1.split_location = SPLIT_LOCATION_UNDEFINED;
-}
-
-bool dml2_top_mcache_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params)
-{
- bool success = true;
- int config_index, pipe_index;
- int first_offset, second_offset;
- int free_per_plane_reg_index = 0;
-
- memset(params->per_plane_pipe_mcache_regs, 0, DML2_MAX_PLANES * DML2_MAX_DCN_PIPES * sizeof(struct dml2_hubp_pipe_mcache_regs *));
-
- for (config_index = 0; config_index < params->num_configurations; config_index++) {
- for (pipe_index = 0; pipe_index < params->mcache_configurations[config_index].num_pipes; pipe_index++) {
- // Allocate storage for the mcache regs
- params->per_plane_pipe_mcache_regs[config_index][pipe_index] = &params->mcache_regs_set[free_per_plane_reg_index++];
-
- reset_mcache_allocations(params->per_plane_pipe_mcache_regs[config_index][pipe_index]);
-
- if (params->mcache_configurations[config_index].plane_descriptor->surface.dcc.enable) {
- // P0 always enabled
- if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0,
- params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane0,
- 0,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start +
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_width - 1,
- &first_offset, &second_offset)) {
- success = false;
- break;
- }
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[first_offset];
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[first_offset];
-
- if (second_offset >= 0) {
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
- }
-
- // Populate P1 if enabled
- if (params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1_enabled) {
- if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1,
- params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane1,
- 0,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start +
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_width - 1,
- &first_offset, &second_offset)) {
- success = false;
- break;
- }
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[first_offset];
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[first_offset];
-
- if (second_offset >= 0) {
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
- }
- }
- }
- }
- }
-
- return success;
-}
-
-void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params)
-{
- int i;
- unsigned int j;
- int next_unused_cache_id = 0;
-
- for (i = 0; i < params->num_allocations; i++) {
- if (!params->allocations[i].valid)
- continue;
-
- for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
- params->allocations[i].global_mcache_ids_plane0[j] = next_unused_cache_id++;
- }
- for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
- params->allocations[i].global_mcache_ids_plane1[j] = next_unused_cache_id++;
- }
-
- // The "psuedo-last" slice is always wrapped around
- params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0] =
- params->allocations[i].global_mcache_ids_plane0[0];
- params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1] =
- params->allocations[i].global_mcache_ids_plane1[0];
-
- // If we need dedicated caches for mall requesting, then we assign them here.
- if (params->allocations[i].requires_dedicated_mall_mcache) {
- for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
- params->allocations[i].global_mcache_ids_mall_plane0[j] = next_unused_cache_id++;
- }
- for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
- params->allocations[i].global_mcache_ids_mall_plane1[j] = next_unused_cache_id++;
- }
-
- // The "psuedo-last" slice is always wrapped around
- params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0] =
- params->allocations[i].global_mcache_ids_mall_plane0[0];
- params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1] =
- params->allocations[i].global_mcache_ids_mall_plane1[0];
- }
-
- // If P0 and P1 are sharing caches, then it means the largest mcache IDs for p0 and p1 can be the same
- // since mcache IDs are always ascending, then it means the largest mcacheID of p1 should be the
- // largest mcacheID of P0
- if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
- params->allocations[i].last_slice_sharing.plane0_plane1) {
- params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
- params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
- }
-
- // If we need dedicated caches handle last slice sharing
- if (params->allocations[i].requires_dedicated_mall_mcache) {
- if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
- params->allocations[i].last_slice_sharing.plane0_plane1) {
- params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
- params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1];
- }
- // If mall_comb_mcache_l is set then it means that largest mcache ID for MALL p0 can be same as regular read p0
- if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p0) {
- params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1] =
- params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
- }
- // If mall_comb_mcache_c is set then it means that largest mcache ID for MALL p1 can be same as regular
- // read p1 (which can be same as regular read p0 if plane0_plane1 is also set)
- if (params->allocations[i].num_mcaches_plane1 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p1) {
- params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
- params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1];
- }
- }
-
- // If you don't need dedicated mall mcaches, the mall mcache assignments are identical to the normal requesting
- if (!params->allocations[i].requires_dedicated_mall_mcache) {
- memcpy(params->allocations[i].global_mcache_ids_mall_plane0, params->allocations[i].global_mcache_ids_plane0,
- sizeof(params->allocations[i].global_mcache_ids_mall_plane0));
- memcpy(params->allocations[i].global_mcache_ids_mall_plane1, params->allocations[i].global_mcache_ids_plane1,
- sizeof(params->allocations[i].global_mcache_ids_mall_plane1));
- }
- }
-}
-
-bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params)
-{
- struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
- struct dml2_top_mcache_verify_mcache_size_locals *l = &dml->scratch.mcache_verify_mcache_size_locals;
-
- unsigned int total_mcaches_required;
- unsigned int i;
- bool result = false;
-
- if (dml->soc_bbox.num_dcc_mcaches == 0) {
- return true;
- }
-
- total_mcaches_required = 0;
- l->calc_mcache_params.instance = &dml->core_instance;
- for (i = 0; i < params->display_config->num_planes; i++) {
- if (!params->display_config->plane_descriptors[i].surface.dcc.enable) {
- memset(&params->mcache_allocations[i], 0, sizeof(struct dml2_mcache_surface_allocation));
- continue;
- }
-
- l->calc_mcache_params.plane_descriptor = &params->display_config->plane_descriptors[i];
- l->calc_mcache_params.mcache_allocation = &params->mcache_allocations[i];
- l->calc_mcache_params.plane_index = i;
-
- if (!dml->core_instance.calculate_mcache_allocation(&l->calc_mcache_params)) {
- result = false;
- break;
- }
-
- if (params->mcache_allocations[i].valid) {
- total_mcaches_required += params->mcache_allocations[i].num_mcaches_plane0 + params->mcache_allocations[i].num_mcaches_plane1;
- if (params->mcache_allocations[i].last_slice_sharing.plane0_plane1)
- total_mcaches_required--;
- }
- }
- dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
-
- if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
- result = false;
- } else {
- result = true;
- }
-
- return result;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
index e9b8e10695ae..f95c7ff56f15 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
@@ -4,6 +4,11 @@
#include "dml2_debug.h"
+int dml2_log_internal(const char *format, ...)
+{
+ return 0;
+}
+
int dml2_printf(const char *format, ...)
{
#ifdef _DEBUG
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index d51a1b6c62f2..a27792b56f7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -8,9 +8,53 @@
#ifdef _DEBUG
#define DML2_ASSERT(condition) dml2_assert(condition)
#else
-#define DML2_ASSERT(condition)
+#define DML2_ASSERT(condition) ((void)0)
+#endif
+/*
+ * DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart.
+ * DML_LOG_ERROR - unexpected but recoverable failures inside DML
+ * DML_LOG_WARN - unexpected inputs or events to DML
+ * DML_LOG_INFO - high level tracing of DML interfaces
+ * DML_LOG_DEBUG - detailed tracing of DML internal components
+ * DML_LOG_VERBOSE - detailed tracing of DML calculation procedure
+ */
+#if !defined(DML_LOG_LEVEL)
+#if defined(_DEBUG) && defined(_DEBUG_PRINTS)
+/* for backward compatibility with old macros */
+#define DML_LOG_LEVEL 5
+#else
+#define DML_LOG_LEVEL 0
+#endif
+#endif
+
+#define DML_LOG_FATAL(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= 1
+#define DML_LOG_ERROR(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_ERROR(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 2
+#define DML_LOG_WARN(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_WARN(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 3
+#define DML_LOG_INFO(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_INFO(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 4
+#define DML_LOG_DEBUG(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_DEBUG(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 5
+#define DML_LOG_VERBOSE(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
#endif
+int dml2_log_internal(const char *format, ...);
int dml2_printf(const char *format, ...);
void dml2_assert(int condition);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index aeac9f159fa5..7fb6026bcb49 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -8,7 +8,6 @@
#include "dml2_external_lib_deps.h"
#include "dml_top_types.h"
#include "dml2_core_shared_types.h"
-
/*
* DML2 MCG Types and Interfaces
*/
@@ -63,7 +62,6 @@ struct dml2_mcg_build_min_clock_table_params_in_out {
*/
struct dml2_mcg_min_clock_table *min_clk_table;
};
-
struct dml2_mcg_instance {
bool (*build_min_clock_table)(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool (*unit_test)(void);
@@ -81,7 +79,6 @@ struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out {
struct dml2_soc_bb *soc_bb;
struct dml2_mcg_min_clock_table *min_clk_table;
const struct display_configuation_with_meta *display_cfg;
-
struct {
bool perform_pseudo_map;
struct dml2_core_internal_soc_bb *soc_bb;
@@ -309,7 +306,7 @@ struct dml2_optimization_stage3_state {
// The pstate support mode for each plane
// The number of valid elements == display_cfg.num_planes
// The indexing of pstate_switch_modes matches plane_descriptors[]
- enum dml2_uclk_pstate_support_method pstate_switch_modes[DML2_MAX_PLANES];
+ enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
// Meta-data for implicit SVP generation, indexed by stream index
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
@@ -356,6 +353,10 @@ struct display_configuation_with_meta {
struct dml2_optimization_stage5_state stage5;
};
+struct dml2_pmo_pstate_strategy {
+ enum dml2_pstate_method per_stream_pstate_method[DML2_MAX_PLANES];
+ bool allow_state_increase;
+};
struct dml2_core_mode_support_in_out {
/*
* Inputs
@@ -365,7 +366,6 @@ struct dml2_core_mode_support_in_out {
struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
-
/*
* Outputs
*/
@@ -395,7 +395,6 @@ struct dml2_core_mode_programming_in_out {
struct dml2_core_instance *instance;
const struct display_configuation_with_meta *display_cfg;
const struct core_display_cfg_support_info *cfg_support_info;
-
/*
* Outputs (also Input the clk freq are also from programming struct)
*/
@@ -445,6 +444,7 @@ struct dml2_core_internal_state_intermediates {
struct dml2_core_mode_support_locals {
struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
struct dml2_display_cfg svp_expanded_display_cfg;
+ struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params;
};
struct dml2_core_mode_programming_locals {
@@ -600,34 +600,11 @@ struct dml2_pmo_optimize_for_stutter_in_out {
struct display_configuation_with_meta *optimized_display_config;
};
-enum dml2_pmo_pstate_method {
- dml2_pmo_pstate_strategy_na = 0,
- /* hw exclusive modes */
- dml2_pmo_pstate_strategy_vactive = 1,
- dml2_pmo_pstate_strategy_vblank = 2,
- dml2_pmo_pstate_strategy_reserved_hw = 5,
- /* fw assisted exclusive modes */
- dml2_pmo_pstate_strategy_fw_svp = 6,
- dml2_pmo_pstate_strategy_reserved_fw = 10,
- /* fw assisted modes requiring drr modulation */
- dml2_pmo_pstate_strategy_fw_vactive_drr = 11,
- dml2_pmo_pstate_strategy_fw_vblank_drr = 12,
- dml2_pmo_pstate_strategy_fw_svp_drr = 13,
- dml2_pmo_pstate_strategy_reserved_fw_drr_clamped = 20,
- dml2_pmo_pstate_strategy_fw_drr = 21,
- dml2_pmo_pstate_strategy_reserved_fw_drr_var = 22,
-};
-
-struct dml2_pmo_pstate_strategy {
- enum dml2_pmo_pstate_method per_stream_pstate_method[DML2_MAX_PLANES];
- bool allow_state_increase;
-};
-
-#define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw - dml2_pmo_pstate_strategy_na + 1)) - 1) << dml2_pmo_pstate_strategy_na)
-#define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
-#define PMO_DRR_CLAMPED_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_clamped - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
-#define PMO_DRR_VAR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_drr)
-#define PMO_FW_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_svp + 1)) - 1) << dml2_pmo_pstate_strategy_fw_svp)
+#define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw - dml2_pstate_method_na + 1)) - 1) << dml2_pstate_method_na)
+#define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_var - dml2_pstate_method_fw_vactive_drr + 1)) - 1) << dml2_pstate_method_fw_vactive_drr)
+#define PMO_DRR_CLAMPED_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_clamped - dml2_pstate_method_fw_vactive_drr + 1)) - 1) << dml2_pstate_method_fw_vactive_drr)
+#define PMO_DRR_VAR_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_var - dml2_pstate_method_fw_drr + 1)) - 1) << dml2_pstate_method_fw_drr)
+#define PMO_FW_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_var - dml2_pstate_method_fw_svp + 1)) - 1) << dml2_pstate_method_fw_svp)
#define PMO_DCN4_MAX_DISPLAYS 4
#define PMO_DCN4_MAX_NUM_VARIANTS 2
@@ -645,6 +622,8 @@ struct dml2_pmo_scratch {
int stream_mask;
} pmo_dcn3;
struct {
+ struct dml2_pmo_pstate_strategy expanded_override_strategy_list[2 * 2 * 2 * 2];
+ unsigned int num_expanded_override_strategies;
struct dml2_pmo_pstate_strategy pstate_strategy_candidates[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
int num_pstate_candidates;
int cur_pstate_candidate;
@@ -706,7 +685,6 @@ struct dml2_pmo_instance {
int mpc_combine_limit;
int odm_combine_limit;
int mcg_clock_table_size;
-
union {
struct {
struct {
@@ -963,7 +941,13 @@ struct dml2_top_mcache_validate_admissability_locals {
struct dml2_top_display_cfg_support_info {
const struct dml2_display_cfg *display_config;
struct core_display_cfg_support_info core_info;
- enum dml2_pstate_support_method per_plane_pstate_method[DML2_MAX_PLANES];
+};
+
+struct dml2_top_funcs {
+ bool (*check_mode_supported)(struct dml2_check_mode_supported_in_out *in_out);
+ bool (*build_mode_programming)(struct dml2_build_mode_programming_in_out *in_out);
+ bool (*build_mcache_programming)(struct dml2_build_mcache_programming_in_out *in_out);
+ bool (*unit_test)(void);
};
struct dml2_instance {
@@ -978,8 +962,8 @@ struct dml2_instance {
struct dml2_ip_capabilities ip_caps;
struct dml2_mcg_min_clock_table min_clk_table;
-
struct dml2_pmo_options pmo_options;
+ struct dml2_top_funcs funcs;
struct {
struct dml2_initialize_instance_locals initialize_instance_locals;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index bde4250853b1..b416320873e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -553,13 +553,53 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
}
- dml2_policy_build_synthetic_soc_states(s, p);
- if (dml2->v20.dml_core_ctx.project == dml_project_dcn35) {
- // Override last out_state with data from last in_state
- // This will ensure that out_state contains max fclk
- memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
- &p->in_states->state_array[p->in_states->num_states - 1],
- sizeof(struct soc_state_bounding_box_st));
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0,
+ max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0, max_socclk_mhz = 0;
+
+ for (i = 0; i < p->in_states->num_states; i++) {
+ if (p->in_states->state_array[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = (int)p->in_states->state_array[i].dcfclk_mhz;
+ if (p->in_states->state_array[i].fabricclk_mhz > max_fclk_mhz)
+ max_fclk_mhz = (int)p->in_states->state_array[i].fabricclk_mhz;
+ if (p->in_states->state_array[i].socclk_mhz > max_socclk_mhz)
+ max_socclk_mhz = (int)p->in_states->state_array[i].socclk_mhz;
+ if (p->in_states->state_array[i].dram_speed_mts > max_uclk_mhz)
+ max_uclk_mhz = (int)p->in_states->state_array[i].dram_speed_mts;
+ if (p->in_states->state_array[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = (int)p->in_states->state_array[i].dispclk_mhz;
+ if (p->in_states->state_array[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = (int)p->in_states->state_array[i].dppclk_mhz;
+ if (p->in_states->state_array[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = (int)p->in_states->state_array[i].phyclk_mhz;
+ if (p->in_states->state_array[i].dtbclk_mhz > max_dtbclk_mhz)
+ max_dtbclk_mhz = (int)p->in_states->state_array[i].dtbclk_mhz;
+ }
+
+ for (i = 0; i < p->in_states->num_states; i++) {
+ /* Independent states - including base (unlisted) parameters from state 0. */
+ p->out_states->state_array[i] = p->in_states->state_array[0];
+
+ p->out_states->state_array[i].dispclk_mhz = max_dispclk_mhz;
+ p->out_states->state_array[i].dppclk_mhz = max_dppclk_mhz;
+ p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
+ p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
+
+ p->out_states->state_array[i].dscclk_mhz = max_dispclk_mhz / 3.0;
+ p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
+ p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
+
+ /* Dependent states. */
+ p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[i].dram_speed_mts;
+ p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz;
+ p->out_states->state_array[i].socclk_mhz = p->in_states->state_array[i].socclk_mhz;
+ p->out_states->state_array[i].dcfclk_mhz = p->in_states->state_array[i].dcfclk_mhz;
+ }
+
+ p->out_states->num_states = p->in_states->num_states;
+ } else {
+ dml2_policy_build_synthetic_soc_states(s, p);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 9190c1328d5b..68b882d28195 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -531,14 +531,21 @@ static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct d
static bool call_dml_mode_support_and_programming(struct dc_state *context)
{
unsigned int result = 0;
- unsigned int min_state;
+ unsigned int min_state = 0;
int min_state_for_g6_temp_read = 0;
+
+
+ if (!context)
+ return false;
+
struct dml2_context *dml2 = context->bw_ctx.dml2;
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
- min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ if (!context->streams[0]->sink->link->dc->caps.is_apu) {
+ min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
- ASSERT(min_state_for_g6_temp_read >= 0);
+ ASSERT(min_state_for_g6_temp_read >= 0);
+ }
if (!dml2->config.use_native_pstate_optimization) {
result = optimize_pstate_with_svp_and_drr(dml2, context);
@@ -549,14 +556,20 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
* Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
*/
- if (min_state_for_g6_temp_read >= 0)
- min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
- else
- min_state = s->mode_support_params.out_lowest_state_idx;
-
- if (result)
- result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
+ if (!context->streams[0]->sink->link->dc->caps.is_apu) {
+ if (min_state_for_g6_temp_read >= 0)
+ min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
+ else
+ min_state = s->mode_support_params.out_lowest_state_idx;
+ }
+ if (result) {
+ if (!context->streams[0]->sink->link->dc->caps.is_apu) {
+ result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
+ } else {
+ result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
+ }
+ }
return result;
}
@@ -685,6 +698,8 @@ static bool dml2_validate_only(struct dc_state *context)
build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
map_dc_state_into_dml_display_cfg(dml2, context, &dml2->v20.scratch.cur_display_config);
+ if (!dml2->config.skip_hw_state_mapping)
+ dml2_apply_det_buffer_allocation_policy(dml2, &dml2->v20.scratch.cur_display_config);
result = pack_and_call_dml_mode_support_ex(dml2,
&dml2->v20.scratch.cur_display_config,
@@ -732,11 +747,10 @@ static inline struct dml2_context *dml2_allocate_memory(void)
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, dml2, config);
return;
- }
+ }
// Store config options
(*dml2)->config = *config;
@@ -771,10 +785,8 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01))
return dml21_create(in_dc, dml2, config);
- }
// Allocate Mode Lib Ctx
*dml2 = dml2_allocate_memory();
@@ -842,8 +854,7 @@ void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
dml21_reinit(in_dc, dml2, config);
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
index 377ef6d01ae5..00d22e542469 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
@@ -427,18 +427,6 @@ void dml_rq_dlg_get_dlg_reg(dml_display_dlg_regs_st *disp_dlg_regs,
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
- // hack for FPGA
- /* NOTE: We dont have getenv defined in driver and it does not make any sense in the driver */
- /*char* fpga_env = getenv("FPGA_FPDIV");
- if(fpga_env !=NULL)
- {
- if(disp_dlg_regs->vratio_prefetch >= (dml_uint_t)dml_pow(2, 22))
- {
- disp_dlg_regs->vratio_prefetch = (dml_uint_t)dml_pow(2, 22)-1;
- dml_print("FPGA msg: vratio_prefetch exceed the max value, the register field is [21:0]\n");
- }
- }*/
-
disp_dlg_regs->refcyc_per_vm_group_vblank = (dml_uint_t)(dml_get_refcyc_per_vm_group_vblank_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_group_flip = (dml_uint_t)(dml_get_refcyc_per_vm_group_flip_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_req_vblank = (dml_uint_t)(dml_get_refcyc_per_vm_req_vblank_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10));
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index d9aaebfa3a0a..11535922b5ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -30,6 +30,9 @@
#include "rc_calc.h"
#include "fixed31_32.h"
+#define DC_LOGGER \
+ dsc->ctx->logger
+
/* This module's internal functions */
/* default DSC policy target bitrate limit is 16bpp */
@@ -480,6 +483,48 @@ bool dc_dsc_compute_bandwidth_range(
return is_dsc_possible;
}
+void dc_dsc_dump_encoder_caps(const struct display_stream_compressor *dsc,
+ const struct dc_crtc_timing *timing)
+{
+ struct dsc_enc_caps dsc_enc_caps;
+
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+
+ DC_LOG_DSC("dsc encoder caps:");
+ DC_LOG_DSC("\tdsc_version 0x%x", dsc_enc_caps.dsc_version);
+ DC_LOG_DSC("\tslice_caps 0x%x", dsc_enc_caps.slice_caps.raw);
+ DC_LOG_DSC("\tlb_bit_depth %d", dsc_enc_caps.lb_bit_depth);
+ DC_LOG_DSC("\tis_block_pred_supported %d", dsc_enc_caps.is_block_pred_supported);
+ DC_LOG_DSC("\tcolor_formats 0x%x", dsc_enc_caps.color_formats.raw);
+ DC_LOG_DSC("\tcolor_depth 0x%x", dsc_enc_caps.color_depth.raw);
+ DC_LOG_DSC("\tmax_total_throughput_mps %d", dsc_enc_caps.max_total_throughput_mps);
+ DC_LOG_DSC("\tmax_slice_width %d", dsc_enc_caps.max_slice_width);
+ DC_LOG_DSC("\tbpp_increment_div %d", dsc_enc_caps.bpp_increment_div);
+}
+
+void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
+ const struct dsc_dec_dpcd_caps *dsc_sink_caps)
+{
+ DC_LOG_DSC("dsc decoder caps:");
+ DC_LOG_DSC("\tis_dsc_supported %d", dsc_sink_caps->is_dsc_supported);
+ DC_LOG_DSC("\tdsc_version 0x%x", dsc_sink_caps->dsc_version);
+ DC_LOG_DSC("\trc_buffer_size %d", dsc_sink_caps->rc_buffer_size);
+ DC_LOG_DSC("\tslice_caps1 0x%x", dsc_sink_caps->slice_caps1.raw);
+ DC_LOG_DSC("\tslice_caps2 0x%x", dsc_sink_caps->slice_caps2.raw);
+ DC_LOG_DSC("\tlb_bit_depth %d", dsc_sink_caps->lb_bit_depth);
+ DC_LOG_DSC("\tis_block_pred_supported %d", dsc_sink_caps->is_block_pred_supported);
+ DC_LOG_DSC("\tedp_max_bits_per_pixel %d", dsc_sink_caps->edp_max_bits_per_pixel);
+ DC_LOG_DSC("\tcolor_formats 0x%x", dsc_sink_caps->color_formats.raw);
+ DC_LOG_DSC("\tthroughput_mode_0_mps %d", dsc_sink_caps->throughput_mode_0_mps);
+ DC_LOG_DSC("\tthroughput_mode_1_mps %d", dsc_sink_caps->throughput_mode_1_mps);
+ DC_LOG_DSC("\tmax_slice_width %d", dsc_sink_caps->max_slice_width);
+ DC_LOG_DSC("\tbpp_increment_div %d", dsc_sink_caps->bpp_increment_div);
+ DC_LOG_DSC("\tbranch_overall_throughput_0_mps %d", dsc_sink_caps->branch_overall_throughput_0_mps);
+ DC_LOG_DSC("\tbranch_overall_throughput_1_mps %d", dsc_sink_caps->branch_overall_throughput_1_mps);
+ DC_LOG_DSC("\tbranch_max_line_width %d", dsc_sink_caps->branch_max_line_width);
+ DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp);
+}
+
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
index fae98cf52020..bc058f682438 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
@@ -270,16 +270,3 @@ void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
dwbc30->dwbc_shift = dwbc_shift;
dwbc30->dwbc_mask = dwbc_mask;
}
-
-void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay)
-{
- struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
-
- /*
- * Set maximum delay of host read access to DWBSCL LUT or OGAM LUT if there are no
- * idle cycles in HW pipeline (in number of clock cycles times 4)
- */
- REG_UPDATE(DWB_HOST_READ_CONTROL, DWB_HOST_READ_RATE_CONTROL, host_read_delay);
-
- DC_LOG_DWB("%s dwb3_rate_control at inst = %d", __func__, dwbc->inst);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
index 0f3f7c5fbaec..7f053f49ec6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
@@ -914,7 +914,6 @@ bool dwb3_ogam_set_input_transfer_func(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
-void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index 22ac2b7e49ae..8364c9f9231a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -140,7 +140,7 @@ void hubp1_vready_workaround(struct hubp *hubp,
void hubp1_program_tiling(
struct hubp *hubp,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -518,6 +518,20 @@ bool hubp1_program_surface_flip_and_addr(
return true;
}
+void hubp1_clear_tiling(struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, 0,
+ SECONDARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_IND_64B_BLK, 0);
+}
+
void hubp1_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size independent_64b_blks)
{
@@ -535,7 +549,7 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable,
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -1363,6 +1377,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
.hubp_init = hubp1_init,
+ .hubp_clear_tiling = hubp1_clear_tiling,
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index 69119b2fdce2..a85dc3be786f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -706,7 +706,7 @@ struct dcn10_hubp {
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -739,7 +739,7 @@ void hubp1_program_rotation(
void hubp1_program_tiling(
struct hubp *hubp,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp1_dcc_control(struct hubp *hubp,
@@ -794,4 +794,6 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset);
void hubp1_set_flip_int(struct hubp *hubp);
+void hubp1_clear_tiling(struct hubp *hubp);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 0637e4c552d8..c74f6a3313a2 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -310,7 +310,7 @@ void hubp2_setup_interdependent(
*/
static void hubp2_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_3(DCSURF_ADDR_CONFIG,
@@ -406,6 +406,20 @@ void hubp2_program_rotation(
H_MIRROR_EN, mirror);
}
+void hubp2_clear_tiling(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, 0,
+ SECONDARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_IND_64B_BLK, 0);
+}
+
void hubp2_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size independent_64b_blks)
{
@@ -536,7 +550,7 @@ void hubp2_program_pixel_format(
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -1676,6 +1690,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_clear_tiling = hubp2_clear_tiling,
};
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 18e194507e36..6968087a3605 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -382,7 +382,7 @@ void hubp2_program_pixel_format(
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -409,6 +409,8 @@ void hubp2_read_state_common(struct hubp *hubp);
void hubp2_read_state(struct hubp *hubp);
+void hubp2_clear_tiling(struct hubp *hubp);
+
#endif /* __DC_MEM_INPUT_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
index cd2bfcc51276..65c628078ca2 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
@@ -42,7 +42,7 @@
static void hubp201_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -131,6 +131,7 @@ static struct hubp_funcs dcn201_hubp_funcs = {
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
+ .hubp_clear_tiling = hubp1_clear_tiling,
};
bool dcn201_hubp_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
index e13d69a22c1c..edbdb8c88d5c 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
@@ -837,6 +837,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_init = hubp21_init,
.validate_dml_output = hubp21_validate_dml_output,
.hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_clear_tiling = hubp1_clear_tiling,
};
bool hubp21_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 60a64d290352..12b282ed7067 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -318,7 +318,7 @@ bool hubp3_program_surface_flip_and_addr(
void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_4(DCSURF_ADDR_CONFIG,
@@ -334,6 +334,22 @@ void hubp3_program_tiling(
}
+void hubp3_clear_tiling(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_6(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ PRIMARY_SURFACE_DCC_IND_BLK, 0,
+ PRIMARY_SURFACE_DCC_IND_BLK_C, 0,
+ SECONDARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_IND_BLK, 0,
+ SECONDARY_SURFACE_DCC_IND_BLK_C, 0);
+}
+
void hubp3_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size)
{
@@ -395,7 +411,7 @@ void hubp3_dmdata_set_attributes(
void hubp3_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -512,6 +528,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index b010531a7fe8..b7d7adf0b58c 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -264,7 +264,7 @@ bool hubp3_program_surface_flip_and_addr(
void hubp3_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -280,7 +280,7 @@ void hubp3_setup(
void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp3_dcc_control(struct hubp *hubp, bool enable,
@@ -297,6 +297,8 @@ void hubp3_read_state(struct hubp *hubp);
void hubp3_init(struct hubp *hubp);
+void hubp3_clear_tiling(struct hubp *hubp);
+
#endif /* __DC_HUBP_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 8394e8c06919..46b804ed05fb 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -96,6 +96,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index ca5b4b28a664..8b5bd73b8094 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -201,7 +201,8 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_force_cursor_pstate_disallow = hubp32_update_force_cursor_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp32_update_mall_sel,
- .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
+ .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index d1f05b82b3dd..faf37febc6fb 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -172,7 +172,7 @@ void hubp35_program_pixel_format(
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -216,6 +216,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp35_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
index 586b43aa5834..d913f80b3130 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
@@ -65,7 +65,7 @@ void hubp35_program_pixel_format(
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index b1ebf5053b4f..28ceceaf9e31 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -40,7 +40,7 @@
#define FN(reg_name, field_name) \
hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
-static void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
+void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
const struct dc_plane_address address)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -49,14 +49,14 @@ static void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
REG_WRITE(HUBP_3DLUT_ADDRESS_LOW, address.lut3d.addr.low_part);
}
-static void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group)
+void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_DLG_PARAM, REFCYC_PER_3DLUT_GROUP, refcyc_per_3dlut_group);
}
-static void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable)
+void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -72,28 +72,28 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp)
return ret;
}
-static void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode)
+void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_ADDRESSING_MODE, addr_mode);
}
-static void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width)
+void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
}
-static void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0);
}
-static void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
+void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r)
@@ -106,21 +106,21 @@ static void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
HUBP_3DLUT_CROSSBAR_SELECT_CR_R, bit_slice_cr_r);
}
-static void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale)
+void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE, HUBP0_3DLUT_FL_BIAS, bias, HUBP0_3DLUT_FL_SCALE, scale);
}
-static void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode)
+void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_MODE, mode);
}
-static void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format)
+void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -145,30 +145,44 @@ void hubp401_init(struct hubp *hubp)
}
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing)
{
- uint32_t value = 0;
+ unsigned int vstartup_lines = pipe_global_sync->dcn4x.vstartup_lines;
+ unsigned int vupdate_offset_pixels = pipe_global_sync->dcn4x.vupdate_offset_pixels;
+ unsigned int vupdate_width_pixels = pipe_global_sync->dcn4x.vupdate_vupdate_width_pixels;
+ unsigned int vready_offset_pixels = pipe_global_sync->dcn4x.vready_offset_pixels;
+ unsigned int htotal = timing->h_total;
+ unsigned int vblank_start = 0;
+ unsigned int vblank_end = 0;
+ unsigned int pixel_width = 0;
+ uint32_t reg_value = 0;
+ bool is_vready_at_or_after_vsync = false;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
/*
* if (VSTARTUP_START - (VREADY_OFFSET+VUPDATE_WIDTH+VUPDATE_OFFSET)/htotal) <= OTG_V_BLANK_END
* Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 1
* else
* Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
- if (pipe_dest->htotal != 0) {
- if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
- + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
- value = 1;
- } else
- value = 0;
+ if (htotal != 0) {
+ vblank_start = timing->v_total - timing->v_front_porch;
+ vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
+ pixel_width = vready_offset_pixels + vupdate_width_pixels + vupdate_offset_pixels;
+
+ is_vready_at_or_after_vsync = (vstartup_lines - pixel_width / htotal) <= vblank_end;
+
+ if (is_vready_at_or_after_vsync)
+ reg_value = 1;
}
- REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, reg_value);
}
void hubp401_program_requestor(
struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs)
+ struct dml2_display_rq_regs *rq_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -196,8 +210,8 @@ void hubp401_program_requestor(
void hubp401_program_deadline(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+ struct dml2_display_dlg_regs *dlg_attr,
+ struct dml2_display_ttu_regs *ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -294,66 +308,64 @@ void hubp401_program_deadline(
void hubp401_setup(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
- struct _vcs_dpi_display_rq_regs_st *rq_regs,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing)
{
/* otg is locked when this func is called. Register are double buffered.
* disable the requestors is not needed
*/
- hubp401_vready_at_or_After_vsync(hubp, pipe_dest);
- hubp401_program_requestor(hubp, rq_regs);
- hubp401_program_deadline(hubp, dlg_attr, ttu_attr);
+ hubp401_vready_at_or_After_vsync(hubp, pipe_global_sync, timing);
+ hubp401_program_requestor(hubp, &pipe_regs->rq_regs);
+ hubp401_program_deadline(hubp, &pipe_regs->dlg_regs, &pipe_regs->ttu_regs);
}
void hubp401_setup_interdependent(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+ struct dml2_dchub_per_pipe_register_set *pipe_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_SET_2(PREFETCH_SETTINGS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+ DST_Y_PREFETCH, pipe_regs->dlg_regs.dst_y_prefetch,
+ VRATIO_PREFETCH, pipe_regs->dlg_regs.vratio_prefetch);
REG_SET(PREFETCH_SETTINGS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+ VRATIO_PREFETCH_C, pipe_regs->dlg_regs.vratio_prefetch_c);
REG_SET_2(VBLANK_PARAMETERS_0, 0,
- DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
- DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+ DST_Y_PER_VM_VBLANK, pipe_regs->dlg_regs.dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, pipe_regs->dlg_regs.dst_y_per_row_vblank);
REG_SET_2(FLIP_PARAMETERS_0, 0,
- DST_Y_PER_VM_FLIP, dlg_attr->dst_y_per_vm_flip,
- DST_Y_PER_ROW_FLIP, dlg_attr->dst_y_per_row_flip);
+ DST_Y_PER_VM_FLIP, pipe_regs->dlg_regs.dst_y_per_vm_flip,
+ DST_Y_PER_ROW_FLIP, pipe_regs->dlg_regs.dst_y_per_row_flip);
REG_SET(VBLANK_PARAMETERS_3, 0,
- REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+ REFCYC_PER_META_CHUNK_VBLANK_L, pipe_regs->dlg_regs.refcyc_per_meta_chunk_vblank_l);
REG_SET(VBLANK_PARAMETERS_4, 0,
- REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+ REFCYC_PER_META_CHUNK_VBLANK_C, pipe_regs->dlg_regs.refcyc_per_meta_chunk_vblank_c);
REG_SET(FLIP_PARAMETERS_2, 0,
- REFCYC_PER_META_CHUNK_FLIP_L, dlg_attr->refcyc_per_meta_chunk_flip_l);
+ REFCYC_PER_META_CHUNK_FLIP_L, pipe_regs->dlg_regs.refcyc_per_meta_chunk_flip_l);
REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
- REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
- REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+ REFCYC_PER_LINE_DELIVERY_PRE_L, pipe_regs->dlg_regs.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, pipe_regs->dlg_regs.refcyc_per_line_delivery_pre_c);
REG_SET(DCN_SURF0_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_l);
+ pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_l);
REG_SET(DCN_SURF1_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_c);
+ pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_c);
REG_SET(DCN_CUR0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+ REFCYC_PER_REQ_DELIVERY_PRE, pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_cur0);
REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
- MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
- QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+ MIN_TTU_VBLANK, pipe_regs->ttu_regs.min_ttu_vblank,
+ QoS_LEVEL_FLIP, pipe_regs->ttu_regs.qos_level_flip);
}
@@ -508,6 +520,18 @@ bool hubp401_program_surface_flip_and_addr(
return true;
}
+void hubp401_clear_tiling(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_EN, 0);
+}
+
void hubp401_dcc_control(struct hubp *hubp,
struct dc_plane_dcc_param *dcc)
{
@@ -520,7 +544,7 @@ void hubp401_dcc_control(struct hubp *hubp,
void hubp401_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
/* DCSURF_ADDR_CONFIG still shows up in reg spec, but does not need to be programmed for DCN4x
@@ -568,7 +592,7 @@ void hubp401_program_size(
void hubp401_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -969,8 +993,8 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_surface_flip_and_addr = hubp401_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp401_program_surface_config,
.hubp_is_flip_pending = hubp2_is_flip_pending,
- .hubp_setup = hubp401_setup,
- .hubp_setup_interdependent = hubp401_setup_interdependent,
+ .hubp_setup2 = hubp401_setup,
+ .hubp_setup_interdependent2 = hubp401_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
@@ -1004,7 +1028,8 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_width = hubp401_program_3dlut_fl_width,
.hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
- .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done
+ .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
+ .hubp_clear_tiling = hubp2_clear_tiling,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index e52fdb5b0cd0..6e1d4c90ddd4 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -256,29 +256,15 @@
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
-void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
-
-void hubp401_program_requestor(
- struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs);
-
-void hubp401_program_deadline(
- struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
-
void hubp401_setup(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
- struct _vcs_dpi_display_rq_regs_st *rq_regs,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
void hubp401_setup_interdependent(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+ struct dml2_dchub_per_pipe_register_set *pipe_regs);
bool hubp401_program_surface_flip_and_addr(
struct hubp *hubp,
@@ -290,7 +276,7 @@ void hubp401_dcc_control(struct hubp *hubp,
void hubp401_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp401_program_size(
@@ -302,7 +288,7 @@ void hubp401_program_size(
void hubp401_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -340,4 +326,42 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp);
void hubp401_set_unbounded_requesting(struct hubp *hubp, bool enable);
+void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale);
+
+void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
+
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled);
+
+void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
+
+void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
+
+void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable);
+
+void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group);
+
+void hubp401_program_3dlut_fl_addr(struct hubp *hubp, const struct dc_plane_address address);
+
+void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format);
+
+void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
+
+void hubp401_clear_tiling(struct hubp *hubp);
+
+void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
+
+void hubp401_program_requestor(
+ struct hubp *hubp,
+ struct dml2_display_rq_regs *rq_regs);
+
+void hubp401_program_deadline(
+ struct hubp *hubp,
+ struct dml2_display_dlg_regs *dlg_attr,
+ struct dml2_display_ttu_regs *ttu_attr);
+
#endif /* __DC_HUBP_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index b029ec1b26d3..a5e18ab72394 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1288,7 +1288,7 @@ static void dcn20_power_on_plane_resources(
}
}
-static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
//if (dc->debug.sanity_checks) {
@@ -1467,7 +1467,7 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_detect_pipe_changes(struct dc_state *old_state,
+void dcn20_detect_pipe_changes(struct dc_state *old_state,
struct dc_state *new_state,
struct pipe_ctx *old_pipe,
struct pipe_ctx *new_pipe)
@@ -1655,7 +1655,7 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
}
}
-static void dcn20_update_dchubp_dpp(
+void dcn20_update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -1678,25 +1678,41 @@ static void dcn20_update_dchubp_dpp(
* VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
* VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
*/
+
if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
- hubp->funcs->hubp_setup(
- hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs,
- &pipe_ctx->rq_regs,
- &pipe_ctx->pipe_dlg_param);
+ if (hubp->funcs->hubp_setup2) {
+ hubp->funcs->hubp_setup2(
+ hubp,
+ &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync,
+ &pipe_ctx->stream->timing);
+ } else {
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
}
if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
- if (pipe_ctx->update_flags.bits.hubp_interdependent)
- hubp->funcs->hubp_setup_interdependent(
- hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ if (pipe_ctx->update_flags.bits.hubp_interdependent) {
+ if (hubp->funcs->hubp_setup_interdependent2) {
+ hubp->funcs->hubp_setup_interdependent2(
+ hubp,
+ &pipe_ctx->hubp_regs);
+ } else {
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+ }
+ }
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
@@ -1756,10 +1772,9 @@ static void dcn20_update_dchubp_dpp(
&pipe_ctx->plane_res.scl_data.viewport_c);
viewport_changed = true;
}
- if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
- hubp->funcs->hubp_program_mcache_id_and_split_coordinate(
- hubp,
- &pipe_ctx->mcache_regs);
+
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, &pipe_ctx->mcache_regs);
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
@@ -1838,7 +1853,7 @@ static void dcn20_update_dchubp_dpp(
hubp->funcs->phantom_hubp_post_enable(hubp);
}
-static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+static int dcn20_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
{
struct pipe_ctx *other_pipe;
int vready_offset = pipe->pipe_dlg_param.vready_offset;
@@ -1864,6 +1879,30 @@ static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
return vready_offset;
}
+static void dcn20_program_tg(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dce_hwseq *hws)
+{
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn20_calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+}
+
static void dcn20_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -1874,33 +1913,17 @@ static void dcn20_program_pipe(
/* Only need to unblank on top pipe */
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.odm ||
- pipe_ctx->stream->update_flags.bits.abm_level)
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
hws->funcs.blank_pixel_data(dc, pipe_ctx,
- !pipe_ctx->plane_state ||
- !pipe_ctx->plane_state->visible);
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
}
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
- && !pipe_ctx->prev_odm_pipe) {
- pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg,
- calculate_vready_offset_for_group(pipe_ctx),
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width,
- pipe_ctx->pipe_dlg_param.pstate_keepout);
-
- if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
-
- pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
-
- if (hws->funcs.setup_vupdate_interrupt)
- hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
- }
+ && !pipe_ctx->prev_odm_pipe)
+ dcn20_program_tg(dc, pipe_ctx, context, hws);
if (pipe_ctx->update_flags.bits.odm)
hws->funcs.update_odm(dc, context, pipe_ctx);
@@ -1931,22 +1954,22 @@ static void dcn20_program_pipe(
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->plane_state->update_flags.bits.hdr_mult))
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (hws->funcs.populate_mcm_luts) {
if (pipe_ctx->plane_state) {
hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
+ pipe_ctx->plane_state->lut_bank_a);
pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
}
}
if (pipe_ctx->plane_state &&
- (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change ||
- pipe_ctx->plane_state->update_flags.bits.lut_3d ||
- pipe_ctx->update_flags.bits.enable))
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
@@ -1954,10 +1977,10 @@ static void dcn20_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf ||
+ (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->update_flags.bits.output_tf_change))
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -1966,7 +1989,7 @@ static void dcn20_program_pipe(
* causes a different pipe to be chosen to odm combine with.
*/
if (pipe_ctx->update_flags.bits.enable
- || pipe_ctx->update_flags.bits.opp_changed) {
+ || pipe_ctx->update_flags.bits.opp_changed) {
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp,
@@ -1996,14 +2019,14 @@ static void dcn20_program_pipe(
memset(&params, 0, sizeof(params));
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
dc->hwss.set_disp_pattern_generator(dc,
- pipe_ctx,
- pipe_ctx->stream_res.test_pattern_params.test_pattern,
- pipe_ctx->stream_res.test_pattern_params.color_space,
- pipe_ctx->stream_res.test_pattern_params.color_depth,
- NULL,
- pipe_ctx->stream_res.test_pattern_params.width,
- pipe_ctx->stream_res.test_pattern_params.height,
- pipe_ctx->stream_res.test_pattern_params.offset);
+ pipe_ctx,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ NULL,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
}
}
@@ -2012,11 +2035,12 @@ void dcn20_program_front_end_for_ctx(
struct dc_state *context)
{
int i;
- struct dce_hwseq *hws = dc->hwseq;
- DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
- struct pipe_ctx *pipe;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct pipe_ctx *pipe = NULL;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
@@ -2029,7 +2053,7 @@ void dcn20_program_front_end_for_ctx(
ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe, pipe->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
@@ -2044,30 +2068,31 @@ void dcn20_program_front_end_for_ctx(
if (prev_hubp_count == 0 && hubp_count > 0) {
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, false);
+ dc->res_pool->hubbub, true, false);
udelay(500);
}
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
- &context->res_ctx.pipe_ctx[i]);
+ &context->res_ctx.pipe_ctx[i]);
/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
* buffer updates properly)
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
- dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
if (tg->funcs->enable_crtc) {
- if (dc->hwseq->funcs.blank_pixel_data) {
+ if (dc->hwseq->funcs.blank_pixel_data)
dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
- }
+
tg->funcs->enable_crtc(tg);
}
}
@@ -2075,15 +2100,15 @@ void dcn20_program_front_end_for_ctx(
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
- && !context->res_ctx.pipe_ctx[i].top_pipe
- && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
- && context->res_ctx.pipe_ctx[i].stream)
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
- || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
struct hubbub *hubbub = dc->res_pool->hubbub;
/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
@@ -2093,13 +2118,18 @@ void dcn20_program_front_end_for_ctx(
* DET allocation.
*/
if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) {
+ (context->res_ctx.pipe_ctx[i].plane_state &&
+ dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i])
+ == SUBVP_PHANTOM))) {
if (hubbub->funcs->program_det_size)
- hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ hubbub->funcs->program_det_size(hubbub,
+ dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
if (dc->res_pool->hubbub->funcs->program_det_segments)
- dc->res_pool->hubbub->funcs->program_det_segments(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
}
- hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -2107,9 +2137,9 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
- !resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->update_flags.bits.odm &&
- hws->funcs.update_odm)
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
hws->funcs.update_odm(dc, context, pipe);
}
@@ -2127,25 +2157,28 @@ void dcn20_program_front_end_for_ctx(
else {
/* Don't program phantom pipes in the regular front end programming sequence.
* There is an MPO transition case where a pipe being used by a video plane is
- * transitioned directly to be a phantom pipe when closing the MPO video. However
- * the phantom pipe will program a new HUBP_VTG_SEL (update takes place right away),
- * but the MPO still exists until the double buffered update of the main pipe so we
- * will get a frame of underflow if the phantom pipe is programmed here.
+ * transitioned directly to be a phantom pipe when closing the MPO video.
+ * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
+ * right away) but the MPO still exists until the double buffered update of the
+ * main pipe so we will get a frame of underflow if the phantom pipe is
+ * programmed here.
*/
- if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
+ if (pipe->stream &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
dcn20_program_pipe(dc, pipe, context);
}
pipe = pipe->bottom_pipe;
}
}
+
/* Program secondary blending tree and writeback pipes */
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->top_pipe && !pipe->prev_odm_pipe
- && pipe->stream && pipe->stream->num_wb_info > 0
- && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
- || pipe->stream->update_flags.raw)
- && hws->funcs.program_all_writeback_pipes_in_tree)
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
@@ -2164,7 +2197,7 @@ void dcn20_program_front_end_for_ctx(
* buffered pending status clear and reset opp head pipe's none double buffered
* registers to their initial state.
*/
-static void post_unlock_reset_opp(struct dc *dc,
+void dcn20_post_unlock_reset_opp(struct dc *dc,
struct pipe_ctx *opp_head)
{
struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
@@ -2201,16 +2234,17 @@ void dcn20_post_unlock_program_front_end(
struct dc *dc,
struct dc_state *context)
{
- int i;
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000;
+ // Timeout for pipe enable
+ unsigned int timeout_us = 100000;
unsigned int polling_interval_us = 1;
struct dce_hwseq *hwseq = dc->hwseq;
+ int i;
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
- !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
- post_unlock_reset_opp(dc,
- &dc->current_state->res_ctx.pipe_ctx[i]);
+ !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
+ dcn20_post_unlock_reset_opp(dc,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
@@ -2226,11 +2260,12 @@ void dcn20_post_unlock_program_front_end(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
- dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
- && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
udelay(polling_interval_us);
}
}
@@ -2244,15 +2279,14 @@ void dcn20_post_unlock_program_front_end(
* before we've transitioned to 2:1 or 4:1
*/
if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
- resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
- dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
int j = 0;
struct timing_generator *tg = pipe->stream_res.tg;
-
if (tg->funcs->get_optc_double_buffer_pending) {
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
- && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
udelay(polling_interval_us);
}
}
@@ -2260,7 +2294,7 @@ void dcn20_post_unlock_program_front_end(
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, false, false);
+ dc->res_pool->hubbub, false, false);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -2291,11 +2325,11 @@ void dcn20_post_unlock_program_front_end(
return;
/* P-State support transitions:
- * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
- * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
- * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
- * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
- * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
*/
if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
@@ -2310,12 +2344,11 @@ void dcn20_post_unlock_program_front_end(
if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
-
/* WA for stutter underflow during MPO transitions when adding 2nd plane */
if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
if (dc->current_state->stream_status[0].plane_count == 1 &&
- context->stream_status[0].plane_count > 1) {
+ context->stream_status[0].plane_count > 1) {
struct timing_generator *tg = dc->res_pool->timing_generators[0];
@@ -2463,7 +2496,7 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
- calculate_vready_offset_for_group(pipe_ctx),
+ dcn20_calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index 5c874f7b0683..9d1ad3b29ca5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -154,6 +154,21 @@ void dcn20_setup_gsl_group_as_lock(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool enable);
-
+void dcn20_detect_pipe_changes(
+ struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
+void dcn20_enable_plane(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn20_post_unlock_reset_opp(
+ struct dc *dc,
+ struct pipe_ctx *opp_head);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 0e8d32e3dbae..c32764aef884 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -86,7 +86,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index 780ce4c064aa..dcb27cdbce73 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
@@ -86,7 +86,6 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 5f8f45b48720..fb2ffb637931 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -89,7 +89,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -98,7 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .set_backlight_level = dcn31_set_backlight_level,
+ .set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 9b88eb72086d..be26c925fdfa 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -162,6 +162,8 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
int opp_inst[MAX_PIPES] = {0};
int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
+ struct mpc *mpc = dc->res_pool->mpc;
+ int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -174,6 +176,16 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ false,
+ 0,
+ NULL);
+ }
+ }
+
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index 6bdfbf22ce87..21ef03a76229 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -91,7 +91,6 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -100,7 +99,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .set_backlight_level = dcn31_set_backlight_level,
+ .set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index fa11f075d1f9..ee4de9ddfef4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -985,6 +985,7 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
+ dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support;
/* for DCN401 testing only */
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
@@ -1398,12 +1399,12 @@ void dcn32_disable_link_output(struct dc_link *link,
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
-
- if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_power_control &&
- !link->skip_implict_edp_power_control)
- link->dc->hwss.edp_power_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 5ecee7e320da..e4d149eff10f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -87,7 +87,6 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index e599cdc465bf..59fc1c114fbe 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -426,6 +426,8 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
int opp_inst[MAX_PIPES] = {0};
int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
+ struct mpc *mpc = dc->res_pool->mpc;
+ int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -438,6 +440,16 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ false,
+ 0,
+ NULL);
+ }
+ }
+
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -1020,8 +1032,13 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
- if (pipe_ctx->stream_res.dsc)
+ if (pipe_ctx->stream_res.dsc) {
update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
+ if (dc->caps.sequential_ono) {
+ update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
+ update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
+ }
+ }
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
@@ -1579,3 +1596,37 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
return false;
}
+
+/*
+ * Set powerup to true for every pipe to match pre-OS configuration.
+ */
+static void dcn35_calc_blocks_to_ungate_for_hw_release(struct dc *dc, struct pg_block_update *update_state)
+{
+ int i = 0, j = 0;
+
+ memset(update_state, 0, sizeof(struct pg_block_update));
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
+ update_state->pg_pipe_res_update[j][i] = true;
+
+ update_state->pg_res_update[PG_HPO] = true;
+ update_state->pg_res_update[PG_DWB] = true;
+}
+
+/*
+ * The purpose is to power up all gatings to restore optimization to pre-OS env.
+ * Re-use hwss func and existing PG&RCG flags to decide powerup sequence.
+ */
+void dcn35_hardware_release(struct dc *dc)
+{
+ struct pg_block_update pg_update_state;
+
+ dcn35_calc_blocks_to_ungate_for_hw_release(dc, &pg_update_state);
+
+ if (dc->hwss.root_clock_control)
+ dc->hwss.root_clock_control(dc, &pg_update_state, true);
+ /*power up required HW block*/
+ if (dc->hwss.hw_block_power_up)
+ dc->hwss.hw_block_power_up(dc, &pg_update_state);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index e27b3609020f..0b1d6f608edd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -99,4 +99,6 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+void dcn35_hardware_release(struct dc *dc);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index fd67779c27a9..c7acaf97974c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -92,7 +92,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -123,6 +122,11 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
+ .hardware_release = dcn35_hardware_release,
+ .detect_pipe_changes = dcn20_detect_pipe_changes,
+ .enable_plane = dcn20_enable_plane,
+ .update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 3c275a1eff58..4f73e7f551ac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -91,7 +91,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 307782592789..555a9f590cd7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -3,6 +3,7 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
@@ -126,91 +127,6 @@ void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
}
-struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc, uint8_t region)
-{
- struct dce_hwseq *hws = dc->hwseq;
- struct ips_ono_region_state state = {0, 0};
-
- switch (region) {
- case 0:
- /* dccg, dio, dcio */
- REG_GET_2(DOMAIN22_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 1:
- /* dchubbub, dchvm, dchubbubmem */
- REG_GET_2(DOMAIN23_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 2:
- /* mpc, opp, optc, dwb */
- REG_GET_2(DOMAIN24_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 3:
- /* hpo */
- REG_GET_2(DOMAIN25_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 4:
- /* dchubp0, dpp0 */
- REG_GET_2(DOMAIN0_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 5:
- /* dsc0 */
- REG_GET_2(DOMAIN16_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 6:
- /* dchubp1, dpp1 */
- REG_GET_2(DOMAIN1_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 7:
- /* dsc1 */
- REG_GET_2(DOMAIN17_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 8:
- /* dchubp2, dpp2 */
- REG_GET_2(DOMAIN2_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 9:
- /* dsc2 */
- REG_GET_2(DOMAIN18_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 10:
- /* dchubp3, dpp3 */
- REG_GET_2(DOMAIN3_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 11:
- /* dsc3 */
- REG_GET_2(DOMAIN19_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- default:
- break;
- }
-
- return state;
-}
-
void dcn401_init_hw(struct dc *dc)
{
struct abm **abms = dc->res_pool->multiple_abms;
@@ -435,7 +351,8 @@ void dcn401_init_hw(struct dc *dc)
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
- dc->debug.fams2_config.bits.enable &= dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver == 2;
+ dc->debug.fams2_config.bits.enable &=
+ dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
@@ -821,7 +738,7 @@ enum dc_status dcn401_enable_stream_timing(
int opp_inst[MAX_PIPES] = {0};
struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
struct dc_crtc_timing patched_crtc_timing = stream->timing;
- bool manual_mode;
+ bool manual_mode = false;
unsigned int tmds_div = PIXEL_RATE_DIV_NA;
unsigned int unused_div = PIXEL_RATE_DIV_NA;
int odm_slice_width;
@@ -880,15 +797,15 @@ enum dc_status dcn401_enable_stream_timing(
patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
pipe_ctx->stream_res.tg->funcs->program_timing(
- pipe_ctx->stream_res.tg,
- &patched_crtc_timing,
- pipe_ctx->pipe_dlg_param.vready_offset,
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width,
- pipe_ctx->pipe_dlg_param.pstate_keepout,
- pipe_ctx->stream->signal,
- true);
+ pipe_ctx->stream_res.tg,
+ &patched_crtc_timing,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
+ pipe_ctx->stream->signal,
+ true);
for (i = 0; i < opp_cnt; i++) {
opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
@@ -2012,3 +1929,730 @@ void dcn401_reset_hw_ctx_wrap(
}
}
}
+
+static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+{
+ struct pipe_ctx *other_pipe;
+ unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
+
+ /* Always use the largest vready_offset of all connected pipes */
+ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+
+ return vready_offset;
+}
+
+static void dcn401_program_tg(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dce_hwseq *hws)
+{
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+}
+
+static void dcn401_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx,
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe)
+ dcn401_program_tg(dc, pipe_ctx, context, hws);
+
+ if (pipe_ctx->update_flags.bits.odm)
+ hws->funcs.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ if (hws->funcs.enable_plane)
+ hws->funcs.enable_plane(dc, pipe_ctx, context);
+ else
+ dc->hwss.enable_plane(dc, pipe_ctx, context);
+
+ if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+ dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+ }
+
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size)
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ }
+
+ if (pipe_ctx->update_flags.raw ||
+ (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
+ pipe_ctx->stream->update_flags.raw)
+ dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
+
+ if (hws->funcs.populate_mcm_luts) {
+ if (pipe_ctx->plane_state) {
+ hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
+ pipe_ctx->plane_state->lut_bank_a);
+ pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
+ }
+ }
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf ||
+ (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ dc->hwss.set_pipe(pipe_ctx);
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
+ pipe_ctx->stream->abm_level);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.test_pattern_changed) {
+ struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
+ struct bit_depth_reduction_params params;
+
+ memset(&params, 0, sizeof(params));
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ dc->hwss.set_disp_pattern_generator(dc,
+ pipe_ctx,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ NULL,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
+ }
+}
+
+void dcn401_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ unsigned int prev_hubp_count = 0;
+ unsigned int hubp_count = 0;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct pipe_ctx *pipe = NULL;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (resource_is_pipe_topology_changed(dc->current_state, context))
+ resource_log_pipe_topology_update(dc, context);
+
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state->triplebuffer_flips)
+ BREAK_TO_DEBUGGER();
+
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
+ }
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ prev_hubp_count++;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ hubp_count++;
+ }
+
+ if (prev_hubp_count == 0 && hubp_count > 0) {
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, false);
+ udelay(500);
+ }
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+
+ /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
+ * buffer updates properly)
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
+ dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+ if (tg->funcs->enable_crtc) {
+ if (dc->hwseq->funcs.blank_pixel_data)
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
+
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+ }
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ /* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
+ * then we want to do the programming here (effectively it's being disabled). If we do
+ * the programming later the DET won't be updated until the OTG for the phantom pipe is
+ * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
+ * DET allocation.
+ */
+ if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
+ (context->res_ctx.pipe_ctx[i].plane_state &&
+ dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
+ SUBVP_PHANTOM))) {
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub,
+ dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ }
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
+ }
+
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ if (hws->funcs.program_pipe)
+ hws->funcs.program_pipe(dc, pipe, context);
+ else {
+ /* Don't program phantom pipes in the regular front end programming sequence.
+ * There is an MPO transition case where a pipe being used by a video plane is
+ * transitioned directly to be a phantom pipe when closing the MPO video.
+ * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
+ * right away) but the MPO still exists until the double buffered update of the
+ * main pipe so we will get a frame of underflow if the phantom pipe is
+ * programmed here.
+ */
+ if (pipe->stream &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
+ dcn401_program_pipe(dc, pipe, context);
+ }
+
+ pipe = pipe->bottom_pipe;
+ }
+ }
+
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+
+ /* Avoid underflow by check of pipe line read when adding 2nd plane. */
+ if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
+ }
+ }
+}
+
+void dcn401_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ // Timeout for pipe enable
+ unsigned int timeout_us = 100000;
+ unsigned int polling_interval_us = 1;
+ struct dce_hwseq *hwseq = dc->hwseq;
+ int i;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
+ !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
+ dc->hwss.post_unlock_reset_opp(dc,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ /*
+ * If we are enabling a pipe, we need to wait for pending clear as this is a critical
+ * part of the enable operation otherwise, DM may request an immediate flip which
+ * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
+ * is unsupported on DCN.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ // Don't check flip pending on phantom pipes
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* When going from a smaller ODM slice count to larger, we must ensure double
+ * buffer update completes before we return to ensure we don't reduce DISPCLK
+ * before we've transitioned to 2:1 or 4:1
+ */
+ if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ int j = 0;
+ struct timing_generator *tg = pipe->stream_res.tg;
+
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+ }
+ }
+
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, false, false);
+
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ /* Program phantom pipe here to prevent a frame of underflow in the MPO transition
+ * case (if a pipe being used for a video plane transitions to a phantom pipe, it
+ * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
+ * programming sequence).
+ */
+ while (pipe) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ /* When turning on the phantom pipe we want to run through the
+ * entire enable sequence, so apply all the "enable" flags.
+ */
+ if (dc->hwss.apply_update_flags_for_phantom)
+ dc->hwss.apply_update_flags_for_phantom(pipe);
+ if (dc->hwss.update_phantom_vp_position)
+ dc->hwss.update_phantom_vp_position(dc, context, pipe);
+ dcn401_program_pipe(dc, pipe, context);
+ }
+ pipe = pipe->bottom_pipe;
+ }
+ }
+ }
+
+ if (!hwseq)
+ return;
+
+ /* P-State support transitions:
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ */
+ if (hwseq->funcs.update_force_pstate)
+ dc->hwseq->funcs.update_force_pstate(dc, context);
+
+ /* Only program the MALL registers after all the main and phantom pipes
+ * are done programming.
+ */
+ if (hwseq->funcs.program_mall_pipe_config)
+ hwseq->funcs.program_mall_pipe_config(dc, context);
+
+ /* WA to apply WM setting*/
+ if (hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
+
+
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
+ tg->funcs->get_frame_count(tg);
+ }
+ }
+}
+
+bool dcn401_update_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* recalculate DML parameters */
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ return false;
+
+ /* apply updated bandwidth parameters */
+ dc->hwss.prepare_bandwidth(dc, context);
+
+ /* update hubp configs for all pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe == NULL) {
+ bool blank = !is_pipe_tree_visible(pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
+
+ if (pipe_ctx->prev_odm_pipe == NULL)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+ }
+
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
+ pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
+ pipe_ctx->plane_res.hubp,
+ &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync,
+ &pipe_ctx->stream->timing);
+ }
+
+ return true;
+}
+
+void dcn401_detect_pipe_changes(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe)
+{
+ bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
+ bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
+
+ unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
+ unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
+ unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
+ unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
+ unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
+ unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
+ unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
+ unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
+
+ new_pipe->update_flags.raw = 0;
+
+ /* If non-phantom pipe is being transitioned to a phantom pipe,
+ * set disable and return immediately. This is because the pipe
+ * that was previously in use must be fully disabled before we
+ * can "enable" it as a phantom pipe (since the OTG will certainly
+ * be different). The post_unlock sequence will set the correct
+ * update flags to enable the phantom pipe.
+ */
+ if (old_pipe->plane_state && !old_is_phantom &&
+ new_pipe->plane_state && new_is_phantom) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
+ return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.unbounded_req = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ new_pipe->update_flags.bits.det_size = 1;
+ if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
+ new_pipe->stream_res.test_pattern_params.width != 0 &&
+ new_pipe->stream_res.test_pattern_params.height != 0)
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+
+ /* For SubVP we need to unconditionally enable because any phantom pipes are
+ * always removed then newly added for every full updates whenever SubVP is in use.
+ * The remove-add sequence of the phantom pipe always results in the pipe
+ * being blanked in enable_stream_timing (DPG).
+ */
+ if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
+ new_pipe->update_flags.bits.enable = 1;
+
+ /* Phantom pipes are effectively disabled, if the pipe was previously phantom
+ * we have to enable
+ */
+ if (old_pipe->plane_state && old_is_phantom &&
+ new_pipe->plane_state && !new_is_phantom)
+ new_pipe->update_flags.bits.enable = 1;
+
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ /* Detect plane change */
+ if (old_pipe->plane_state != new_pipe->plane_state)
+ new_pipe->update_flags.bits.plane_changed = true;
+
+ /* Detect top pipe only changes */
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
+ /* Detect global sync changes */
+ if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
+ || (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
+ || (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
+ || (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+
+ if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
+ new_pipe->update_flags.bits.det_size = 1;
+
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /*
+ * Detect mpcc blending changes, only dpp inst and opp matter here,
+ * mpccs getting removed/inserted update connected ones during their own
+ * programming
+ */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
+ struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
+ struct dml2_display_rq_regs old_rq_regs = old_pipe->hubp_regs.rq_regs;
+ struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
+ struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
+ struct dml2_display_rq_regs *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
+
+ /* Detect pipe interdependent updates */
+ if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
+ || (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
+ || (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
+ || (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
+ || (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
+ || (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
+ || (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
+ || (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
+ || (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
+ || (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
+ || (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
+ || (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
+ new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
+ || (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
+ || (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
+ old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
+ old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
+ old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
+ old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
+ old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
+ old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
+ old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
+ old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
+ old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
+ old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
+ old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
+ old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
+ old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
+ old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
+ memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
+ memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
+
+ if (old_pipe->unbounded_req != new_pipe->unbounded_req)
+ new_pipe->update_flags.bits.unbounded_req = 1;
+
+ if (memcmp(&old_pipe->stream_res.test_pattern_params,
+ &new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 28a513dfc005..17cea748789e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -63,8 +63,6 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx);
bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable);
-struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc,
- uint8_t region);
void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
const struct pipe_ctx *top_pipe_to_program);
@@ -96,5 +94,12 @@ void dcn401_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
-
+void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context);
+void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context);
+bool dcn401_update_bandwidth(struct dc *dc, struct dc_state *context);
+void dcn401_detect_pipe_changes(
+ struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index 23e4f208152e..44cb376f97c1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -17,9 +17,9 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.init_hw = dcn401_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
- .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .program_front_end_for_ctx = dcn401_program_front_end_for_ctx,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
- .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .post_unlock_program_front_end = dcn401_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -42,7 +42,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn401_prepare_bandwidth,
.optimize_bandwidth = dcn401_optimize_bandwidth,
- .update_bandwidth = dcn20_update_bandwidth,
+ .update_bandwidth = dcn401_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn31_set_static_screen_control,
@@ -66,7 +66,6 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -100,6 +99,10 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .detect_pipe_changes = dcn401_detect_pipe_changes,
+ .enable_plane = dcn20_enable_plane,
+ .update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 66fdc5805d0a..a7d66cfd93c9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -194,7 +194,6 @@ enum block_sequence_func {
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
-
};
struct block_sequence {
@@ -331,10 +330,6 @@ struct hw_sequencer_funcs {
void (*disable_writeback)(struct dc *dc,
unsigned int dwb_pipe_inst);
- bool (*mmhubbub_warmup)(struct dc *dc,
- unsigned int num_dwb,
- struct dc_writeback_info *wb_info);
-
/* Clock Related */
enum dc_status (*set_clock)(struct dc *dc,
enum dc_clock_type clock_type,
@@ -462,6 +457,18 @@ struct hw_sequencer_funcs {
struct dc_state *context);
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
void (*wait_for_all_pending_updates)(const struct pipe_ctx *pipe_ctx);
+ void (*detect_pipe_changes)(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
+ void (*enable_plane)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ void (*update_dchubp_dpp)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ void (*post_unlock_reset_opp)(struct dc *dc,
+ struct pipe_ctx *opp_head);
};
void color_space_to_black_color(
@@ -489,11 +496,12 @@ void get_hdr_visual_confirm_color(
void get_mpctree_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
+void get_vabc_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
void get_subvp_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
void get_fams2_visual_confirm_color(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 2edd5b38ce4f..d558efc6e12f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -45,9 +45,6 @@
#define MAX_SVP_PHANTOM_STREAMS 2
#define MAX_SVP_PHANTOM_PLANES 2
-void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
- uint32_t controller_id);
-
#include "grph_object_id.h"
#include "link_encoder.h"
#include "stream_encoder.h"
@@ -220,6 +217,7 @@ struct resource_funcs {
*/
int (*get_power_profile)(const struct dc_state *context);
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
+ unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx);
};
struct audio_support{
@@ -468,6 +466,7 @@ struct pipe_ctx {
unsigned int surface_size_in_mall_bytes;
struct dml2_dchub_per_pipe_register_set hubp_regs;
struct dml2_hubp_pipe_mcache_regs mcache_regs;
+ union dml2_global_sync_programming global_sync;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
@@ -542,7 +541,8 @@ struct dcn_bw_output {
bool legacy_svp_drr_stream_index_valid;
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
struct dmub_cmd_fams2_global_config fams2_global_config;
- struct dmub_fams2_stream_static_state fams2_stream_params[DML2_MAX_PLANES];
+ union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES];
+ union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
struct dml2_display_arb_regs arb_regs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 55529c5f471c..d19a595c2be4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -624,10 +624,6 @@ bool dcn_validate_bandwidth(
struct dc_state *context,
bool fast_validate);
-unsigned int dcn_find_dcfclk_suits_all(
- const struct dc *dc,
- struct dc_clocks *clocks);
-
void dcn_get_soc_clks(
struct dc *dc,
int *min_fclk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 2d06067ff36d..c14d64687a3d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -306,6 +306,9 @@ struct clk_mgr_funcs {
*/
void (*set_hard_min_memclk)(struct clk_mgr *clk_mgr, bool current_mode);
+ int (*get_hard_min_memclk)(struct clk_mgr *clk_mgr);
+ int (*get_hard_min_fclk)(struct clk_mgr *clk_mgr);
+
/* Send message to PMFW to set hard max memclk frequency to highest DPM */
void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index c2dd061892f4..7a1ca1e98059 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -166,6 +166,41 @@ enum dentist_divider_range {
CLK_SR_DCN32(CLK1_CLK4_CURRENT_CNT), \
CLK_SR_DCN32(CLK4_CLK0_CURRENT_CNT)
+#define CLK_REG_LIST_DCN35() \
+ CLK_SR_DCN35(CLK1_CLK_PLL_REQ), \
+ CLK_SR_DCN35(CLK1_CLK0_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK1_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK2_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK3_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK4_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK5_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK0_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK1_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK2_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK3_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK4_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK5_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK0_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK1_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK2_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK3_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK4_BYPASS_CNTL),\
+ CLK_SR_DCN35(CLK1_CLK5_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK0_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK1_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK2_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK3_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK4_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK5_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK0_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK1_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK2_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK3_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
+ CLK_SR_DCN35(CLK5_spll_field_8), \
+ SR(DENTIST_DISPCLK_CNTL), \
+
#define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\
CLK_SF(CLK1_CLK_PLL_REQ, FbMult_int, mask_sh),\
@@ -236,6 +271,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK2_DFS_CNTL;
uint32_t CLK1_CLK3_DFS_CNTL;
uint32_t CLK1_CLK4_DFS_CNTL;
+ uint32_t CLK1_CLK5_DFS_CNTL;
uint32_t CLK2_CLK2_DFS_CNTL;
uint32_t CLK1_CLK0_CURRENT_CNT;
@@ -243,11 +279,34 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK2_CURRENT_CNT;
uint32_t CLK1_CLK3_CURRENT_CNT;
uint32_t CLK1_CLK4_CURRENT_CNT;
+ uint32_t CLK1_CLK5_CURRENT_CNT;
uint32_t CLK0_CLK0_DFS_CNTL;
uint32_t CLK0_CLK1_DFS_CNTL;
uint32_t CLK0_CLK3_DFS_CNTL;
uint32_t CLK0_CLK4_DFS_CNTL;
+ uint32_t CLK1_CLK0_BYPASS_CNTL;
+ uint32_t CLK1_CLK1_BYPASS_CNTL;
+ uint32_t CLK1_CLK2_BYPASS_CNTL;
+ uint32_t CLK1_CLK3_BYPASS_CNTL;
+ uint32_t CLK1_CLK4_BYPASS_CNTL;
+ uint32_t CLK1_CLK5_BYPASS_CNTL;
+
+ uint32_t CLK1_CLK0_DS_CNTL;
+ uint32_t CLK1_CLK1_DS_CNTL;
+ uint32_t CLK1_CLK2_DS_CNTL;
+ uint32_t CLK1_CLK3_DS_CNTL;
+ uint32_t CLK1_CLK4_DS_CNTL;
+ uint32_t CLK1_CLK5_DS_CNTL;
+
+ uint32_t CLK1_CLK0_ALLOW_DS;
+ uint32_t CLK1_CLK1_ALLOW_DS;
+ uint32_t CLK1_CLK2_ALLOW_DS;
+ uint32_t CLK1_CLK3_ALLOW_DS;
+ uint32_t CLK1_CLK4_ALLOW_DS;
+ uint32_t CLK1_CLK5_ALLOW_DS;
+ uint32_t CLK5_spll_field_8;
+
};
struct clk_mgr_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 16580d624278..2a530a4a39f7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -42,6 +42,7 @@
#include "cursor_reg_cache.h"
#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2/dml21/inc/dml_top_types.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -144,11 +145,21 @@ struct hubp_funcs {
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ void (*hubp_setup2)(
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
+
void (*hubp_setup_interdependent)(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+ void (*hubp_setup_interdependent2)(
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *pipe_regs);
+
void (*dcc_control)(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size);
@@ -165,7 +176,7 @@ struct hubp_funcs {
void (*hubp_program_pte_vm)(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation);
void (*hubp_set_vm_system_aperture_settings)(
@@ -179,7 +190,7 @@ struct hubp_funcs {
void (*hubp_program_surface_config)(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -275,6 +286,7 @@ struct hubp_funcs {
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
+ void (*hubp_clear_tiling)(struct hubp *hubp);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index af9183f5d69b..08c16ba52a51 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -168,6 +168,14 @@ struct link_encoder_funcs {
struct link_encoder *enc,
enum encoder_type_select sel,
uint32_t hpo_inst);
+ void (*enable_dpia_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy);
+ void (*disable_dpia_output)(struct link_encoder *link_enc,
+ uint8_t dpia_id,
+ uint8_t digmode);
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index a8b44f398ce6..42fbc70f7056 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -150,7 +150,7 @@ struct mem_input_funcs {
void (*mem_input_program_pte_vm)(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation);
void (*mem_input_set_vm_system_aperture_settings)(
@@ -164,7 +164,7 @@ struct mem_input_funcs {
void (*mem_input_program_surface_config)(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -187,6 +187,8 @@ struct mem_input_funcs {
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param);
+ void (*mem_input_clear_tiling)(
+ struct mem_input *mem_input);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 03cbcbb36f1c..6fdc9809280c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -210,7 +210,7 @@ void optc1_enable_crtc_reset(struct timing_generator *optc,
bool optc1_configure_crc(struct timing_generator *optc, const struct crc_params *params);
-bool optc1_get_crc(struct timing_generator *optc,
+bool optc1_get_crc(struct timing_generator *optc, uint8_t idx,
uint32_t *r_cr,
uint32_t *g_y,
uint32_t *b_cb);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index b74e18cc1e66..9885cb3c310f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -141,6 +141,9 @@ struct crc_params {
bool continuous_mode;
bool enable;
+
+ uint8_t crc_eng_inst;
+ bool reset;
};
/**
@@ -291,7 +294,7 @@ struct timing_generator_funcs {
* @get_crc: Get CRCs for the given timing generator. Return false if
* CRCs are not enabled (via configure_crc).
*/
- bool (*get_crc)(struct timing_generator *tg,
+ bool (*get_crc)(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
void (*program_manual_trigger)(struct timing_generator *optc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index f04292086c08..fd1f9d3db039 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -148,6 +148,10 @@ struct link_service {
const struct dc_stream_state *stream,
const unsigned int num_streams);
+ uint32_t (*dp_required_hblank_size_bytes)(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
/*************************** DPMS *************************************/
void (*set_dpms_on)(struct dc_state *state, struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 4fb9cd6708d5..1d61d475d36f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -30,8 +30,8 @@
#include "../dce110/irq_service_dce110.h"
#include "irq_service_dcn201.h"
-#include "dcn/dcn_2_0_3_offset.h"
-#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dcn/dcn_2_0_1_offset.h"
+#include "dcn/dcn_2_0_1_sh_mask.h"
#include "cyan_skillfish_ip_offset.h"
#include "soc15_hw_ip.h"
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index ff8fe1a94965..96febabf464a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -251,7 +251,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
dp_fixed_vs_pe_read_lane_adjust(
link,
@@ -646,7 +646,7 @@ bool dp_set_test_pattern(
if (IS_DP_PHY_PATTERN(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
dp_fixed_vs_pe_set_retimer_lane_settings(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 3e47a6735912..06faa461067b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -164,7 +164,9 @@ void disable_dio_link_output(struct dc_link *link,
{
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
- link_enc->funcs->disable_output(link_enc, signal);
+ if (link_enc != NULL)
+ link_enc->funcs->disable_output(link_enc, signal);
+
link->dc->link_srv->dp_trace_source_sequence(link,
DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
index 348ea4cb832d..a6d1d7641ab4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -187,7 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
{
- return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
+ return ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
}
const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 6499807af72a..36adf95744fe 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
@@ -77,17 +77,74 @@ static void set_dio_dpia_lane_settings(struct dc_link *link,
{
}
+static void enable_dpia_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc != NULL) {
+ if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->enable_dpia_output) {
+ uint8_t fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
+ uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
+
+ link_enc->funcs->enable_dpia_output(
+ link_enc,
+ link_settings,
+ link->ddc_hw_inst,
+ digmode,
+ fec_rdy);
+ } else {
+ if (dc_is_dp_sst_signal(signal))
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ else
+ link_enc->funcs->enable_dp_mst_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ }
+
+ }
+
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+}
+
+static void disable_dpia_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc != NULL) {
+ if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->disable_dpia_output) {
+ uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
+
+ link_enc->funcs->disable_dpia_output(link_enc, link->ddc_hw_inst, digmode);
+ } else
+ link_enc->funcs->disable_output(link_enc, signal);
+ }
+
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
static const struct link_hwss dpia_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
- .disable_link_output = disable_dio_link_output,
+ .disable_link_output = disable_dpia_link_output,
.setup_audio_output = setup_dio_audio_output,
.enable_audio_packet = enable_dio_audio_packet,
.disable_audio_packet = disable_dio_audio_packet,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
- .enable_dp_link_output = enable_dio_dp_link_output,
+ .enable_dp_link_output = enable_dpia_link_output,
.set_dp_link_test_pattern = set_dio_dpia_link_test_pattern,
.set_dp_lane_settings = set_dio_dpia_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
index ad16ec5d9bb7..259e0f4775e1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
@@ -27,6 +27,9 @@
#include "link_hwss.h"
+#define DIG_SST_MODE 0
+#define DIG_MST_MODE 5
+
const struct link_hwss *get_dpia_link_hwss(void);
bool can_use_dpia_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index e026c728042a..550e1a098fa2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -829,7 +829,8 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
if (link->dc->debug.skip_detection_link_training ||
dc_is_embedded_signal(link->local_sink->sink_signal) ||
- link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)) {
destrictive = false;
} else if (link_dp_get_encoding_format(&max_link_cap) ==
DP_8b_10b_ENCODING) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 5d66bfc7fe6e..ec7de9c01fab 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -772,6 +772,20 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
return result;
}
+static bool dp_set_hblank_reduction_on_rx(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ bool result = false;
+
+ if (dc_is_virtual_signal(stream->signal))
+ result = true;
+ else
+ result = dm_helpers_dp_write_hblank_reduction(dc->ctx, stream);
+ return result;
+}
+
+
/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
* i.e. after dp_enable_dsc_on_rx() had been called
*/
@@ -1953,11 +1967,15 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
if (stream->phy_pix_clk > 340000)
is_over_340mhz = true;
+ if (dc_is_tmds_signal(stream->signal) && stream->phy_pix_clk > 6000000UL) {
+ ASSERT(false);
+ return;
+ }
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
/* DP159, Retimer settings */
eng_id = pipe_ctx->stream_res.stream_enc->id;
@@ -1968,7 +1986,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
write_i2c_default_retimer_setting(pipe_ctx,
is_vga_mode, is_over_340mhz);
}
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ } else if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
/* PI3EQX1204, Redriver settings */
write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
}
@@ -2024,7 +2042,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
int lt_attempts = LINK_TRAINING_ATTEMPTS;
// Increase retry count if attempting DP1.x on FIXED_VS link
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
lt_attempts = 10;
@@ -2039,7 +2057,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
/* Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
*/
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)
do_fallback = true;
/*
@@ -2375,13 +2394,13 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
unsigned short masked_chip_caps = link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
//Need to inform that sink is going to use legacy HDMI mode.
write_scdc_data(
link->ddc,
165000,//vbios only handles 165Mhz.
false);
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
/* DP159, Retimer settings */
if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
write_i2c_retimer_setting(pipe_ctx,
@@ -2389,7 +2408,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
else
write_i2c_default_retimer_setting(pipe_ctx,
false, false);
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ } else if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
/* PI3EQX1204, Redriver settings */
write_i2c_redriver_setting(pipe_ctx, false);
}
@@ -2529,6 +2548,15 @@ void link_set_dpms_on(
if (pipe_ctx->stream->dpms_off)
return;
+ /* For Dp tunneling link, a pending HPD means that we have a race condition between processing
+ * current link and processing the pending HPD. If we enable the link now, we may end up with a
+ * link that is not actually connected to a sink. So we skip enabling the link in this case.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->is_hpd_pending) {
+ DC_LOG_DEBUG("%s, Link%d HPD is pending, not enable it.\n", __func__, link->link_index);
+ return;
+ }
+
/* Have to setup DSC before DIG FE and BE are connected (which happens before the
* link training). This is to make sure the bandwidth sent to DIG BE won't be
* bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
@@ -2594,6 +2622,9 @@ void link_set_dpms_on(
}
}
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_set_hblank_reduction_on_rx(pipe_ctx);
+
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
allocate_usb4_bandwidth(pipe_ctx->stream);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 5e1b5ab9fbc6..a7877d57a00f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -101,6 +101,7 @@ static void construct_link_service_validation(struct link_service *link_srv)
link_srv->validate_mode_timing = link_validate_mode_timing;
link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth;
+ link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes;
}
/* link dpms owns the programming sequence of stream's dpms state associated
@@ -698,7 +699,7 @@ static bool construct_phy(struct dc_link *link,
link->chip_caps);
}
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
link->bios_forced_drive_settings.VOLTAGE_SWING =
(bios->integrated_info->ext_disp_conn_info.fixdpvoltageswing & 0x3);
link->bios_forced_drive_settings.PRE_EMPHASIS =
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 60f15a9ba7a5..29606fda029d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -409,3 +409,182 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
}
+
+struct dp_audio_layout_config {
+ uint8_t layouts_per_sample_denom;
+ uint8_t symbols_per_layout;
+ uint8_t max_layouts_per_audio_sdp;
+};
+
+static void get_audio_layout_config(
+ uint32_t channel_count,
+ enum dp_link_encoding encoding,
+ struct dp_audio_layout_config *output)
+{
+ memset(output, 0, sizeof(struct dp_audio_layout_config));
+
+ /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP,
+ * with each layout being the same size (8ch layout).
+ */
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ }
+}
+
+static uint32_t get_av_stream_map_lane_count(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t av_stream_map_lane_count = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (!is_mst)
+ av_stream_map_lane_count = lane_count;
+ else
+ av_stream_map_lane_count = 4;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ av_stream_map_lane_count = 4;
+ }
+
+ ASSERT(av_stream_map_lane_count != 0);
+
+ return av_stream_map_lane_count;
+}
+
+static uint32_t get_audio_sdp_overhead(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t audio_sdp_overhead = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (is_mst)
+ audio_sdp_overhead = 16; /* 4 * 2 + 8 */
+ else
+ audio_sdp_overhead = lane_count * 2 + 8;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ audio_sdp_overhead = 10; /* 4 x 2.5 */
+ }
+
+ ASSERT(audio_sdp_overhead != 0);
+
+ return audio_sdp_overhead;
+}
+
+/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST.
+ */
+static uint32_t calculate_overhead_hblank_bw_in_symbols(
+ uint32_t max_slice_h)
+{
+ uint32_t overhead_hblank_bw = 0; /* in stream symbols */
+
+ overhead_hblank_bw += max_slice_h * 4; /* EOC overhead */
+ overhead_hblank_bw += 12; /* Main link overhead (VBID, BS/BE) */
+
+ return overhead_hblank_bw;
+}
+
+uint32_t dp_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params)
+{
+ /* Main logic from dce_audio is duplicated here, with the main
+ * difference being:
+ * - Pre-determined lane count of 4
+ * - Assumed 16 dsc slices for worst case
+ * - Assumed SDP split disabled for worst case
+ * TODO: Unify logic from dce_audio to prevent duplicated logic.
+ */
+
+ const struct dc_crtc_timing *timing = audio_params->crtc_timing;
+ const uint32_t channel_count = audio_params->channel_count;
+ const uint32_t sample_rate_hz = audio_params->sample_rate_hz;
+ const enum dp_link_encoding link_encoding = audio_params->link_encoding;
+
+ // 8b/10b MST and 128b/132b are always 4 logical lanes.
+ const uint32_t lane_count = 4;
+ const bool is_mst = (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT);
+ // Maximum slice count is with ODM 4:1, 4 slices per DSC
+ const uint32_t max_slices_h = 16;
+
+ const uint32_t av_stream_map_lane_count = get_av_stream_map_lane_count(
+ link_encoding, lane_count, is_mst);
+ const uint32_t audio_sdp_overhead = get_audio_sdp_overhead(
+ link_encoding, lane_count, is_mst);
+ struct dp_audio_layout_config layout_config;
+
+ if (link_encoding == DP_8b_10b_ENCODING && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
+ return 0;
+
+ get_audio_layout_config(
+ channel_count, link_encoding, &layout_config);
+
+ /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */
+ struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100);
+ struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction(
+ timing->pix_clk_100hz, (long long)timing->h_total * 10);
+ struct fixed31_32 samples_per_line;
+ struct fixed31_32 layouts_per_line;
+ struct fixed31_32 symbols_per_sdp_max_layout;
+ struct fixed31_32 remainder;
+ uint32_t num_sdp_with_max_layouts;
+ uint32_t required_symbols_per_hblank;
+ uint32_t required_bytes_per_hblank = 0;
+
+ samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000);
+ samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz);
+ layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config.layouts_per_sample_denom);
+ // HBlank expansion usage assumes SDP split disabled to allow for worst case.
+ layouts_per_line = dc_fixpt_from_int(dc_fixpt_ceil(layouts_per_line));
+
+ num_sdp_with_max_layouts = dc_fixpt_floor(
+ dc_fixpt_div_int(layouts_per_line, layout_config.max_layouts_per_audio_sdp));
+ symbols_per_sdp_max_layout = dc_fixpt_from_int(
+ layout_config.max_layouts_per_audio_sdp * layout_config.symbols_per_layout);
+ symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead);
+ symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin);
+ required_symbols_per_hblank = num_sdp_with_max_layouts;
+ required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+
+ if (num_sdp_with_max_layouts != dc_fixpt_ceil(
+ dc_fixpt_div_int(layouts_per_line, layout_config.max_layouts_per_audio_sdp))) {
+ remainder = dc_fixpt_sub_int(layouts_per_line,
+ num_sdp_with_max_layouts * layout_config.max_layouts_per_audio_sdp);
+ remainder = dc_fixpt_mul_int(remainder, layout_config.symbols_per_layout);
+ remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead);
+ remainder = dc_fixpt_mul(remainder, audio_sdp_margin);
+ required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+ }
+
+ required_symbols_per_hblank += calculate_overhead_hblank_bw_in_symbols(max_slices_h);
+
+ if (link_encoding == DP_8b_10b_ENCODING)
+ required_bytes_per_hblank = required_symbols_per_hblank; // 8 bits per 8b/10b symbol
+ else if (link_encoding == DP_128b_132b_ENCODING)
+ required_bytes_per_hblank = required_symbols_per_hblank * 4; // 32 bits per 128b/132b symbol
+
+ return required_bytes_per_hblank;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 595fb05946e9..bf398c49c3e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -37,4 +37,9 @@ uint32_t dp_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
+
+uint32_t dp_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
#endif /* __LINK_VALIDATION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index d6d5bbf2108c..267180e7bc48 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -505,7 +505,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc,
bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
- if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((ddc->link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
!ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
ddc->ctx->dce_version == DCN_VERSION_3_1) {
/* Fixed VS workaround for AUX timeout */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 9dabaf682171..44c3023a7731 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1554,7 +1554,7 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
/* If this chip cap is set, at least one retimer must exist in the chain
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
(dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
/* If you see this message consistently, either the host platform has FIXED_VS flag
* incorrectly configured or the sink device is returning an invalid count.
@@ -1632,13 +1632,6 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
}
- /* Read DP tunneling information. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dpcd_get_tunneling_device_data(link);
- if (status != DC_OK)
- dm_error("%s: Read tunneling device data failed.\n", __func__);
- }
-
dpcd_set_source_specific_data(link);
/* Sink may need to configure internals based on vendor, so allow some
* time before proceeding with possibly vendor specific transactions
@@ -1711,7 +1704,7 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
if (status != DC_OK)
- dm_error("%s: Read DPRX caps data failed.\n", __func__);
+ dm_error("%s: Read DPRX feature list failed.\n", __func__);
/* AdaptiveSyncCapability */
dpcd_dprx_data = 0;
@@ -1726,15 +1719,13 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.adaptive_sync_caps.dp_adap_sync_caps.raw = dpcd_dprx_data;
if (status != DC_OK)
- dm_error("%s: Read DPRX caps data failed. Addr:%#x\n",
+ dm_error("%s: Read DPRX feature list_1 failed. Addr:%#x\n",
__func__, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1);
}
-
else {
link->dpcd_caps.dprx_feature.raw = 0;
}
-
/* Error condition checking...
* It is impossible for Sink to report Max Lane Count = 0.
* It is possible for Sink to report Max Link Rate = 0, if it is
@@ -1788,6 +1779,11 @@ static bool retrieve_link_cap(struct dc_link *link)
link->test_pattern_enabled = false;
link->compliance_test_state.raw = 0;
+ link->dpcd_caps.receive_port0_cap.raw[0] =
+ dpcd_data[DP_RECEIVE_PORT_0_CAP_0 - DP_DPCD_REV];
+ link->dpcd_caps.receive_port0_cap.raw[1] =
+ dpcd_data[DP_RECEIVE_PORT_0_BUFFER_SIZE - DP_DPCD_REV];
+
/* read sink count */
core_link_read_dpcd(link,
DP_SINK_COUNT,
@@ -1918,6 +1914,7 @@ static bool retrieve_link_cap(struct dc_link *link)
if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
+ /* Read 128b/132b suppoerted link rates */
core_link_read_dpcd(link,
DP_128B132B_SUPPORTED_LINK_RATES,
&link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
@@ -1965,6 +1962,13 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw,
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
+ /* Read DP tunneling information. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ status = dpcd_get_tunneling_device_data(link);
+ if (status != DC_OK)
+ dm_error("%s: Read DP tunneling device data failed.\n", __func__);
+ }
+
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
@@ -2308,6 +2312,14 @@ bool dp_verify_link_cap_with_retries(
} else {
link->verified_link_cap = last_verified_link_cap;
}
+
+ /* For Dp tunneling link, a pending HPD means that we have a race condition between processing
+ * current link and processing the pending HPD. Since the training is failed, we should just brak
+ * the loop so that we have chance to process the pending HPD.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->is_hpd_pending)
+ break;
+
fsleep(10 * 1000);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 48abeaa88678..a08403c022ea 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -226,6 +226,8 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) {
bool allow_active;
+ link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
+
if (link->replay_settings.config.force_disable_desync_error_check)
return;
@@ -237,6 +239,9 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_configuration.raw,
sizeof(replay_configuration.raw));
+ /* Update desync error counter */
+ link->replay_settings.replay_desync_error_fail_count++;
+
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -408,7 +413,8 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)
link->skip_fallback_on_link_loss = true;
device_service_clear.bits.AUTOMATED_TEST = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index bafa52a0165a..2c73ac87cd66 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -104,7 +104,7 @@ void dp_set_hw_lane_settings(
// Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b
if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
!is_immediate_downstream(link, offset) &&
- (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
+ (!((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING))
return;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 754c895e1bfb..88d4288cde0f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -739,7 +739,7 @@ void override_training_settings(
if (overrides->ffe_preset != NULL)
lt_settings->ffe_preset = overrides->ffe_preset;
/* Override HW lane settings with BIOS forced values if present */
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
@@ -1574,7 +1574,7 @@ enum link_training_result dp_perform_link_training(
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index fe26fde12eeb..85298b8a1b5e 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -110,6 +110,23 @@ void mpc3_disable_dwb_mux(
MPC_DWB0_MUX, 0xf);
}
+void mpc3_set_out_rate_control(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ /* Always disable mpc out rate and flow control.
+ * MPC flow rate control is not needed for DCN30 and above.
+ */
+ REG_UPDATE_2(MUX[opp_id],
+ MPC_OUT_RATE_CONTROL_DISABLE, 1,
+ MPC_OUT_RATE_CONTROL, 0);
+}
+
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
@@ -1519,6 +1536,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
+ .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc3_program_shaper,
.acquire_rmu = mpcc3_acquire_rmu,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
index ce93003dae01..103f29900a2c 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
@@ -1085,6 +1085,13 @@ bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id);
+void mpc3_set_out_rate_control(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control);
+
void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 097d06023e64..19d5ebc6763c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -302,7 +302,6 @@ void optc1_program_timing(
/* Enable stereo - only when we need to pack 3D frame. Other types
* of stereo handled in explicit call
*/
-
if (optc->funcs->is_two_pixels_per_container(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
@@ -1471,37 +1470,71 @@ bool optc1_configure_crc(struct timing_generator *optc,
if (!optc1_is_tg_enabled(optc))
return false;
- REG_WRITE(OTG_CRC_CNTL, 0);
+ if (!params->enable || params->reset)
+ REG_WRITE(OTG_CRC_CNTL, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
- OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
- OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
-
- /* Window A y axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
- OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
-
- /* Window B x axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
- OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
- OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
-
- /* Window B y axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
- OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- REG_UPDATE_3(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_X_CONTROL,
+ OTG_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_Y_CONTROL,
+ OTG_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_X_CONTROL,
+ OTG_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_Y_CONTROL,
+ OTG_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
@@ -1510,6 +1543,7 @@ bool optc1_configure_crc(struct timing_generator *optc,
* optc1_get_crc - Capture CRC result per component
*
* @optc: timing_generator instance.
+ * @idx: index of crc engine to get CRC from
* @r_cr: 16-bit primary CRC signature for red data.
* @g_y: 16-bit primary CRC signature for green data.
* @b_cb: 16-bit primary CRC signature for blue data.
@@ -1521,7 +1555,7 @@ bool optc1_configure_crc(struct timing_generator *optc,
* If CRC is disabled, return false; otherwise, return true, and the CRC
* results in the parameters.
*/
-bool optc1_get_crc(struct timing_generator *optc,
+bool optc1_get_crc(struct timing_generator *optc, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t field = 0;
@@ -1533,14 +1567,30 @@ bool optc1_get_crc(struct timing_generator *optc,
if (!field)
return false;
- /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
- REG_GET_2(OTG_CRC0_DATA_RG,
- CRC0_R_CR, r_cr,
- CRC0_G_Y, g_y);
+ switch (idx) {
+ case 0:
+ /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
+ REG_GET_2(OTG_CRC0_DATA_RG,
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
- /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
- REG_GET(OTG_CRC0_DATA_B,
- CRC0_B_CB, b_cb);
+ /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
+ REG_GET(OTG_CRC0_DATA_B,
+ CRC0_B_CB, b_cb);
+ break;
+ case 1:
+ /* OTG_CRC1_DATA_RG has the CRC16 results for the red and green component */
+ REG_GET_2(OTG_CRC1_DATA_RG,
+ CRC1_R_CR, r_cr,
+ CRC1_G_Y, g_y);
+
+ /* OTG_CRC1_DATA_B has the CRC16 results for the blue component */
+ REG_GET(OTG_CRC1_DATA_B,
+ CRC1_B_CB, b_cb);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 40757f20d73f..159172178d51 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -86,6 +86,12 @@
SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_DATA_RG, OTG, inst),\
+ SRI(OTG_CRC1_DATA_B, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWA_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWA_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWB_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWB_Y_CONTROL, OTG, inst),\
SR(GSL_SOURCE_SELECT),\
SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst)
@@ -315,6 +321,7 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC1_SELECT, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
@@ -327,6 +334,17 @@ struct dcn_optc_registers {
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_RG, CRC1_R_CR, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_RG, CRC1_G_Y, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_B, CRC1_B_CB, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_END, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
@@ -482,6 +500,7 @@ struct dcn_optc_registers {
type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
type OTG_CRC_CONT_EN;\
type OTG_CRC0_SELECT;\
+ type OTG_CRC1_SELECT;\
type OTG_CRC_EN;\
type CRC0_R_CR;\
type CRC0_G_Y;\
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index dfa9364fe5a6..d21e82b927d0 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -183,34 +183,87 @@ static bool optc35_configure_crc(struct timing_generator *optc,
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ /* Cannot configure crc on a CRTC that is disabled */
if (!optc1_is_tg_enabled(optc))
return false;
- REG_WRITE(OTG_CRC_CNTL, 0);
+
+ if (!params->enable || params->reset)
+ REG_WRITE(OTG_CRC_CNTL, 0);
+
if (!params->enable)
return true;
- REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
- OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
- OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
- OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
- OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
- OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
- OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
- if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0) {
- REG_UPDATE_4(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1,
- OTG_CRC_WINDOW_DB_EN, 1);
- } else
- REG_UPDATE_3(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1);
+
+ /* Program frame boundaries */
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0)
+ REG_UPDATE_4(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1,
+ OTG_CRC_WINDOW_DB_EN, 1);
+ else
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_X_CONTROL,
+ OTG_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_Y_CONTROL,
+ OTG_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_X_CONTROL,
+ OTG_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_Y_CONTROL,
+ OTG_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0)
+ REG_UPDATE_4(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1,
+ OTG_CRC_WINDOW_DB_EN, 1);
+ else
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 783ca9acc762..338a0cad23a5 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -315,7 +315,7 @@ void optc401_set_drr(
struct drr_params amended_params = { 0 };
bool program_manual_trigger = false;
- if (dc->caps.dmub_caps.fams_ver >= 2 && dc->debug.fams2_config.bits.enable) {
+ if (dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver && dc->debug.fams2_config.bits.enable) {
if (params != NULL &&
params->vertical_total_max > 0 &&
params->vertical_total_min > 0) {
@@ -380,7 +380,7 @@ void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, i
{
struct dc *dc = optc->ctx->dc;
- if (dc->caps.dmub_caps.fams_ver >= 2 && dc->debug.fams2_config.bits.enable) {
+ if (dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver && dc->debug.fams2_config.bits.enable) {
/* FAMS2 */
dc_dmub_srv_fams2_drr_update(dc, optc->inst,
vtotal_min,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index 770a380cc03d..e92f14d50adb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -1258,6 +1258,11 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
return NULL;
}
+unsigned int dcn10_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx)
+{
+ return pipe_ctx->pipe_dlg_param.vstartup_start;
+}
+
static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
@@ -1272,7 +1277,8 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
.patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
index bf8e33cd8147..7bc1be53e800 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
@@ -51,6 +51,7 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
const struct resource_pool *pool,
struct dc_stream_state *stream);
+unsigned int dcn10_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx);
#endif /* __DC_RESOURCE_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 7a5b9aa5292c..5c6dc710e96c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -1509,60 +1509,9 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
if (prev_odm_pipe->plane_state) {
- struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
- struct output_pixel_processor *opp = next_odm_pipe->stream_res.opp;
- int new_width;
-
- /* HACTIVE halved for odm combine */
- sd->h_active /= 2;
- /* Calculate new vp and recout for left pipe */
- /* Need at least 16 pixels width per side */
- if (sd->recout.x + 16 >= sd->h_active)
- return false;
- new_width = sd->h_active - sd->recout.x;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
-
- /* Calculate new vp and recout for right pipe */
- sd = &next_odm_pipe->plane_res.scl_data;
- /* HACTIVE halved for odm combine */
- sd->h_active /= 2;
- /* Need at least 16 pixels width per side */
- if (new_width <= 16)
- return false;
- new_width = sd->recout.width + sd->recout.x - sd->h_active;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
- sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->h_active - sd->recout.x));
- sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->h_active - sd->recout.x));
- sd->recout.x = 0;
-
- /*
- * When odm is used in YcbCr422 or 420 colour space, a split screen
- * will be seen with the previous calculations since the extra left
- * edge pixel is accounted for in fmt but not in viewport.
- *
- * Below are calculations which fix the split by fixing the calculations
- * if there is an extra left edge pixel.
- */
- if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count
- && opp->funcs->opp_get_left_edge_extra_pixel_count(
- opp, next_odm_pipe->stream->timing.pixel_encoding,
- resource_is_pipe_type(next_odm_pipe, OTG_MASTER)) == 1) {
- sd->h_active += 1;
- sd->recout.width += 1;
- sd->viewport.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
- sd->viewport_c.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
- sd->viewport_c.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
- sd->viewport.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
+ if (!resource_build_scaling_params(prev_odm_pipe) ||
+ !resource_build_scaling_params(next_odm_pipe)) {
+ return false;
}
}
@@ -2280,7 +2229,8 @@ static const struct resource_funcs dcn20_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index d3d67d366523..43fa2cb117f3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -59,8 +59,8 @@
#include "cyan_skillfish_ip_offset.h"
-#include "dcn/dcn_2_0_3_offset.h"
-#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dcn/dcn_2_0_1_offset.h"
+#include "dcn/dcn_2_0_1_sh_mask.h"
#include "dpcs/dpcs_2_0_3_offset.h"
#include "dpcs/dpcs_2_0_3_sh_mask.h"
@@ -1079,7 +1079,8 @@ static struct resource_funcs dcn201_res_pool_funcs = {
.populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn201_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 021ba8ac5c8c..2615c36d5ffe 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -1378,6 +1378,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
.get_panel_config_defaults = dcn21_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn21_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index bfd0eccbed28..13202ce30d66 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -2250,6 +2250,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
.update_bw_bounding_box = dcn30_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn30_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
#define CTX ctx
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index a9816affd312..121a86a59833 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -671,9 +671,9 @@ static const struct dc_plane_cap plane_cap = {
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 167,
- .nv12 = 167,
- .fp16 = 167
+ .argb8888 = 358,
+ .nv12 = 358,
+ .fp16 = 358
},
64,
64
@@ -693,7 +693,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 7680,/*upto 8K*/
+ .max_downscale_src_width = 4096,/*upto true 4k*/
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
@@ -1400,7 +1400,8 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn301_update_bw_bounding_box,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn301_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 7baefc910a3d..012c5fd52cb1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -1151,6 +1151,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.update_bw_bounding_box = dcn302_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn302_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct dc_cap_funcs cap_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 8a57d46ad15f..a8d0b4686f9a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -1096,6 +1096,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {
.update_bw_bounding_box = dcn303_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn303_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct dc_cap_funcs cap_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 54ec3d8e920c..911bd60d4fbc 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1849,6 +1849,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 2794473f2aff..e3ba105034f8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1778,6 +1778,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 4ee33eb3381d..14acef036b5a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1846,6 +1846,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
.get_power_profile = dcn315_get_power_profile,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 79eddbafe3c2..568094827212 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -1720,6 +1720,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 12d247a7ec45..664302876019 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -2066,6 +2066,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 06b9479c8bd3..38d76434683e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1624,6 +1624,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 89e2adcf2a28..8ee3d99ea2aa 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1752,6 +1752,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
return out;
}
+enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+ plane_state->tiling_info.gfxversion = DcGfxVersion9;
+ dcn20_patch_unknown_plane_state(plane_state);
+ return DC_OK;
+}
+
static struct resource_funcs dcn35_res_pool_funcs = {
.destroy = dcn35_destroy_resource_pool,
@@ -1775,10 +1782,11 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn35_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index f97bb4cb3761..9d03a55d90cf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -35,6 +35,7 @@
extern struct _vcs_dpi_ip_params_st dcn3_5_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc;
+enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state);
struct dcn35_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 263a37c1cd3a..14f7c3acdc96 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1754,10 +1754,11 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn351_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 2a3dabfe3cea..c1ebc6b1c937 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -726,6 +726,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_unbounded_requesting = false,
.enable_legacy_fast_update = false,
.dcc_meta_propagation_delay_us = 10,
+ .fams_version = {
+ .minor = 1,
+ .major = 2,
+ }, //v2.1
.fams2_config = {
.bits = {
.enable = true,
@@ -733,7 +737,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_stall_recovery = true,
}
},
- .force_cositing = CHROMA_COSITING_TOPLEFT + 1,
+ .force_cositing = CHROMA_COSITING_NONE + 1,
};
static struct dce_aux *dcn401_aux_engine_create(
@@ -1293,6 +1297,29 @@ static struct hpo_dp_link_encoder *dcn401_hpo_dp_link_encoder_create(
return &hpo_dp_enc31->base;
}
+static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans)
+{
+ unsigned int num_available_chans = 1;
+
+ /* channels for MALL must be a power of 2 */
+ while (num_chans > 1) {
+ num_available_chans = (num_available_chans << 1);
+ num_chans = (num_chans >> 1);
+ }
+
+ /* cannot be odd */
+ num_available_chans &= ~1;
+
+ /* clamp to max available channels for MALL per ASIC */
+ if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 16 ? 16 : num_available_chans;
+ } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 8 ? 8 : num_available_chans;
+ }
+
+ return num_available_chans;
+}
+
static struct dce_hwseq *dcn401_hwseq_create(
struct dc_context *ctx)
{
@@ -1588,6 +1615,14 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
+ /* re-calculate the available MALL size if required */
+ if (bw_params->num_channels > 0) {
+ dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
+ dc, bw_params->num_channels) *
+ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
+ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
+ }
+
DC_FP_START();
dcn401_update_bw_bounding_box_fpu(dc, bw_params);
@@ -1605,6 +1640,7 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
+ plane_state->tiling_info.gfxversion = DcGfxAddr3;
plane_state->tiling_info.gfx_addr3.swizzle = DC_ADDR3_SW_64KB_2D;
return DC_OK;
}
@@ -1704,27 +1740,9 @@ static int dcn401_get_power_profile(const struct dc_state *context)
return dpm_level;
}
-static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans)
+static unsigned int dcn401_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx)
{
- unsigned int num_available_chans = 1;
-
- /* channels for MALL must be a power of 2 */
- while (num_chans > 1) {
- num_available_chans = (num_available_chans << 1);
- num_chans = (num_chans >> 1);
- }
-
- /* cannot be odd */
- num_available_chans &= ~1;
-
- /* clamp to max available channels for MALL per ASIC */
- if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) {
- num_available_chans = num_available_chans > 16 ? 16 : num_available_chans;
- } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) {
- num_available_chans = num_available_chans > 8 ? 8 : num_available_chans;
- }
-
- return num_available_chans;
+ return pipe_ctx->global_sync.dcn4x.vstartup_lines;
}
static struct resource_funcs dcn401_res_pool_funcs = {
@@ -1754,6 +1772,7 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
.get_power_profile = dcn401_get_power_profile,
+ .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index 73a65913cb12..38a9a0d68058 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -11,6 +11,41 @@
#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
+static bool spl_is_yuv420(enum spl_pixel_format format)
+{
+ if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
+ (format <= SPL_PIXEL_FORMAT_420BPP10))
+ return true;
+
+ return false;
+}
+
+static bool spl_is_rgb8(enum spl_pixel_format format)
+{
+ if (format == SPL_PIXEL_FORMAT_ARGB8888)
+ return true;
+
+ return false;
+}
+
+static bool spl_is_video_format(enum spl_pixel_format format)
+{
+ if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
+ && format <= SPL_PIXEL_FORMAT_VIDEO_END)
+ return true;
+ else
+ return false;
+}
+
+static bool spl_is_subsampled_format(enum spl_pixel_format format)
+{
+ if (format >= SPL_PIXEL_FORMAT_SUBSAMPLED_BEGIN
+ && format <= SPL_PIXEL_FORMAT_SUBSAMPLED_END)
+ return true;
+ else
+ return false;
+}
+
static struct spl_rect intersect_rec(const struct spl_rect *r0, const struct spl_rect *r1)
{
struct spl_rect rec;
@@ -137,15 +172,32 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
struct spl_in *spl_in,
struct spl_rect *plane_clip_rec)
{
- int mpc_slice_count = spl_in->basic_in.mpc_combine_h;
- int mpc_slice_idx = spl_in->basic_in.mpc_combine_v;
+ bool use_recout_width_aligned =
+ spl_in->basic_in.num_h_slices_recout_width_align.use_recout_width_aligned;
+ int mpc_slice_count =
+ spl_in->basic_in.num_h_slices_recout_width_align.num_slices_recout_width.mpc_num_h_slices;
+ int recout_width_align =
+ spl_in->basic_in.num_h_slices_recout_width_align.num_slices_recout_width.mpc_recout_width_align;
+ int mpc_slice_idx = spl_in->basic_in.mpc_h_slice_index;
int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
struct spl_rect mpc_rec;
- mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
- mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
- mpc_rec.height = plane_clip_rec->height;
- mpc_rec.y = plane_clip_rec->y;
+ if (use_recout_width_aligned) {
+ mpc_rec.width = recout_width_align;
+ if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) {
+ mpc_rec.width = plane_clip_rec->width % recout_width_align;
+ mpc_rec.x = plane_clip_rec->x + recout_width_align * mpc_slice_idx;
+ } else
+ mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+
+ } else {
+ mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
+ mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ }
SPL_ASSERT(mpc_slice_count == 1 ||
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE ||
mpc_rec.width % 2 == 0);
@@ -391,8 +443,7 @@ static void spl_calculate_scaling_ratios(struct spl_in *spl_in,
spl_scratch->scl_data.ratios.horz_c = spl_scratch->scl_data.ratios.horz;
spl_scratch->scl_data.ratios.vert_c = spl_scratch->scl_data.ratios.vert;
- if (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
- || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) {
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
spl_scratch->scl_data.ratios.horz_c.value /= 2;
spl_scratch->scl_data.ratios.vert_c.value /= 2;
}
@@ -529,23 +580,6 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
*vp_offset = src_size - *vp_offset - *vp_size;
}
-static bool spl_is_yuv420(enum spl_pixel_format format)
-{
- if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
- (format <= SPL_PIXEL_FORMAT_420BPP10))
- return true;
-
- return false;
-}
-
-static bool spl_is_rgb8(enum spl_pixel_format format)
-{
- if (format == SPL_PIXEL_FORMAT_ARGB8888)
- return true;
-
- return false;
-}
-
/*Calculate inits and viewport */
static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
struct spl_scratch *spl_scratch)
@@ -556,8 +590,7 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
struct spl_rect recout_clip_in_recout_dst;
struct spl_rect overlap_in_active_timing;
struct spl_rect odm_slice = calculate_odm_slice_in_timing_active(spl_in);
- int vpc_div = (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
- || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ int vpc_div = spl_is_subsampled_format(spl_in->basic_in.format) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
struct spl_fixed31_32 init_adj_h = spl_fixpt_zero;
struct spl_fixed31_32 init_adj_v = spl_fixpt_zero;
@@ -585,12 +618,7 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
&flip_vert_scan_dir,
&flip_horz_scan_dir);
- if (orthogonal_rotation) {
- spl_swap(src.width, src.height);
- spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
- }
-
- if (spl_is_yuv420(spl_in->basic_in.format)) {
+ if (spl_is_subsampled_format(spl_in->basic_in.format)) {
/* this gives the direction of the cositing (negative will move
* left, right otherwise)
*/
@@ -598,15 +626,15 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
switch (spl_in->basic_in.cositing) {
- case CHROMA_COSITING_LEFT:
- init_adj_h = spl_fixpt_zero;
+ case CHROMA_COSITING_TOPLEFT:
+ init_adj_h = spl_fixpt_from_fraction(sign, 4);
init_adj_v = spl_fixpt_from_fraction(sign, 4);
break;
- case CHROMA_COSITING_NONE:
+ case CHROMA_COSITING_LEFT:
init_adj_h = spl_fixpt_from_fraction(sign, 4);
- init_adj_v = spl_fixpt_from_fraction(sign, 4);
+ init_adj_v = spl_fixpt_zero;
break;
- case CHROMA_COSITING_TOPLEFT:
+ case CHROMA_COSITING_NONE:
default:
init_adj_h = spl_fixpt_zero;
init_adj_v = spl_fixpt_zero;
@@ -614,6 +642,12 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
}
}
+ if (orthogonal_rotation) {
+ spl_swap(src.width, src.height);
+ spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
+ spl_swap(init_adj_h, init_adj_v);
+ }
+
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
@@ -678,7 +712,7 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
* since 3d is special and needs to calculate vp as if there is no recout offset
* This may break with rotation, good thing we aren't mixing hw rotation and 3d
*/
- if (spl_in->basic_in.mpc_combine_v) {
+ if (spl_in->basic_in.mpc_h_slice_index) {
SPL_ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
(spl_in->basic_out.view_format != SPL_VIEW_3D_TOP_AND_BOTTOM &&
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE));
@@ -698,24 +732,6 @@ static void spl_clamp_viewport(struct spl_rect *viewport)
viewport->width = MIN_VIEWPORT_SIZE;
}
-static bool spl_dscl_is_420_format(enum spl_pixel_format format)
-{
- if (format == SPL_PIXEL_FORMAT_420BPP8 ||
- format == SPL_PIXEL_FORMAT_420BPP10)
- return true;
- else
- return false;
-}
-
-static bool spl_dscl_is_video_format(enum spl_pixel_format format)
-{
- if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
- && format <= SPL_PIXEL_FORMAT_VIDEO_END)
- return true;
- else
- return false;
-}
-
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
const struct spl_scaler_data *data,
bool enable_isharp, bool enable_easf)
@@ -732,8 +748,8 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
&& !enable_isharp)
return SCL_MODE_SCALING_444_BYPASS;
- if (!spl_dscl_is_420_format(pixel_format)) {
- if (spl_dscl_is_video_format(pixel_format))
+ if (!spl_is_subsampled_format(pixel_format)) {
+ if (spl_is_video_format(pixel_format))
return SCL_MODE_SCALING_444_YCBCR_ENABLE;
else
return SCL_MODE_SCALING_444_RGB_ENABLE;
@@ -756,7 +772,7 @@ static bool spl_choose_lls_policy(enum spl_pixel_format format,
enum spl_transfer_func_predefined tf_predefined_type,
enum linear_light_scaling *lls_pref)
{
- if (spl_is_yuv420(format)) {
+ if (spl_is_video_format(format)) {
*lls_pref = LLS_PREF_NO;
if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
(tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
@@ -815,7 +831,7 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
/* Check if video is in fullscreen mode */
static bool spl_is_video_fullscreen(struct spl_in *spl_in)
{
- if (spl_is_yuv420(spl_in->basic_in.format) && spl_in->is_fullscreen)
+ if (spl_is_video_format(spl_in->basic_in.format) && spl_in->is_fullscreen)
return true;
return false;
}
@@ -846,10 +862,10 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
* Apply sharpness to RGB and YUV (NV12/P010)
* surfaces based on policy setting
*/
- if (!spl_is_yuv420(spl_in->basic_in.format) &&
+ if (!spl_is_video_format(spl_in->basic_in.format) &&
(spl_in->sharpen_policy == SHARPEN_YUV))
return enable_isharp;
- else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) &&
+ else if ((spl_is_video_format(spl_in->basic_in.format) && !fullscreen) &&
(spl_in->sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV))
return enable_isharp;
else if (!spl_in->is_fullscreen &&
@@ -882,8 +898,8 @@ static void spl_get_taps_non_adaptive_scaler(
if (in_taps->v_taps == 0) {
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1)
- spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
- spl_scratch->scl_data.ratios.vert, 2)), 8);
+ spl_scratch->scl_data.taps.v_taps = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.vert), 8);
else
spl_scratch->scl_data.taps.v_taps = 4;
} else
@@ -891,8 +907,8 @@ static void spl_get_taps_non_adaptive_scaler(
if (in_taps->v_taps_c == 0) {
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1)
- spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
- spl_scratch->scl_data.ratios.vert_c, 2)), 8);
+ spl_scratch->scl_data.taps.v_taps_c = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.vert_c), 8);
else
spl_scratch->scl_data.taps.v_taps_c = 4;
} else
@@ -932,7 +948,7 @@ static bool spl_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
bool skip_easf = false;
- bool is_ycbcr = spl_dscl_is_video_format(spl_in->basic_in.format);
+ bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
@@ -964,7 +980,7 @@ static bool spl_get_optimal_number_of_taps(
if (skip_easf)
spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
else {
- if (spl_is_yuv420(spl_in->basic_in.format)) {
+ if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
spl_scratch->scl_data.taps.v_taps = 6;
spl_scratch->scl_data.taps.h_taps_c = 4;
@@ -982,8 +998,7 @@ static bool spl_get_optimal_number_of_taps(
min_taps_c = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c);
/* Use LB_MEMORY_CONFIG_3 for 4:2:0 */
- if ((spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8)
- || (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10))
+ if (spl_is_yuv420(spl_in->basic_in.format))
lb_config = LB_MEMORY_CONFIG_3;
else
lb_config = LB_MEMORY_CONFIG_0;
@@ -1039,13 +1054,11 @@ static bool spl_get_optimal_number_of_taps(
if (spl_scratch->scl_data.taps.h_taps_c == 5)
spl_scratch->scl_data.taps.h_taps_c = 4;
- if (spl_is_yuv420(spl_in->basic_in.format)) {
- if ((spl_scratch->scl_data.taps.h_taps <= 4) ||
- (spl_scratch->scl_data.taps.h_taps_c <= 3)) {
+ if (spl_is_video_format(spl_in->basic_in.format)) {
+ if (spl_scratch->scl_data.taps.h_taps <= 4) {
*enable_easf_v = false;
*enable_easf_h = false;
- } else if ((spl_scratch->scl_data.taps.v_taps <= 3) ||
- (spl_scratch->scl_data.taps.v_taps_c <= 3)) {
+ } else if (spl_scratch->scl_data.taps.v_taps <= 3) {
*enable_easf_v = false;
*enable_easf_h = true;
} else {
@@ -1086,10 +1099,10 @@ static bool spl_get_optimal_number_of_taps(
spl_scratch->scl_data.taps.h_taps = 1;
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_ycbcr)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_ycbcr)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled)
spl_scratch->scl_data.taps.v_taps_c = 1;
*enable_easf_v = false;
@@ -1103,11 +1116,11 @@ static bool spl_get_optimal_number_of_taps(
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)))
spl_scratch->scl_data.taps.v_taps = 1;
- if ((!*enable_easf_h) && !is_ycbcr &&
+ if ((!*enable_easf_h) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)))
spl_scratch->scl_data.taps.h_taps_c = 1;
- if ((!*enable_easf_v) && !is_ycbcr &&
+ if ((!*enable_easf_v) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
spl_scratch->scl_data.taps.v_taps_c = 1;
}
@@ -1118,7 +1131,7 @@ static bool spl_get_optimal_number_of_taps(
static void spl_set_black_color_data(enum spl_pixel_format format,
struct scl_black_color *scl_black_color)
{
- bool ycbcr = spl_dscl_is_video_format(format);
+ bool ycbcr = spl_is_video_format(format);
if (ycbcr) {
scl_black_color->offset_rgb_y = BLACK_OFFSET_RGB_Y;
scl_black_color->offset_rgb_cbcr = BLACK_OFFSET_CBCR;
@@ -1585,7 +1598,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
0x0; // fp1.5.10, C3 coefficient
}
- if (spl_is_yuv420(format)) { /* TODO: 0 = RGB, 1 = YUV */
+ if (spl_is_subsampled_format(format)) { /* TODO: 0 = RGB, 1 = YUV */
dscl_prog_data->easf_matrix_mode = 1;
/*
* 2-bit, BF3 chroma mode correction calculation mode
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index 55d557df4aa5..467af9dd90de 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -63,13 +63,13 @@ enum spl_pixel_format {
SPL_PIXEL_FORMAT_420BPP8,
SPL_PIXEL_FORMAT_420BPP10,
/*end of pixel format definition*/
- SPL_PIXEL_FORMAT_INVALID,
- SPL_PIXEL_FORMAT_422BPP8,
- SPL_PIXEL_FORMAT_422BPP10,
SPL_PIXEL_FORMAT_GRPH_BEGIN = SPL_PIXEL_FORMAT_INDEX8,
SPL_PIXEL_FORMAT_GRPH_END = SPL_PIXEL_FORMAT_FP16,
+ SPL_PIXEL_FORMAT_SUBSAMPLED_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
+ SPL_PIXEL_FORMAT_SUBSAMPLED_END = SPL_PIXEL_FORMAT_420BPP10,
SPL_PIXEL_FORMAT_VIDEO_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
SPL_PIXEL_FORMAT_VIDEO_END = SPL_PIXEL_FORMAT_420BPP10,
+ SPL_PIXEL_FORMAT_INVALID,
SPL_PIXEL_FORMAT_UNKNOWN
};
@@ -436,8 +436,14 @@ struct basic_in {
struct spl_rect clip_rect; // Clip rect
enum spl_rotation_angle rotation; // Rotation
bool horizontal_mirror; // Horizontal mirror
- int mpc_combine_h; // MPC Horizontal Combine Factor (split_count)
- int mpc_combine_v; // MPC Vertical Combine Factor (split_idx)
+ struct { // previous mpc_combine_h - split count
+ bool use_recout_width_aligned;
+ union {
+ int mpc_num_h_slices;
+ int mpc_recout_width_align;
+ } num_slices_recout_width;
+ } num_h_slices_recout_width_align;
+ int mpc_h_slice_index; // previous mpc_combine_v - split_idx
// Inputs for adaptive scaler - TODO
enum spl_transfer_func_type tf_type; /* Transfer function type */
enum spl_transfer_func_predefined tf_predefined_type; /* Transfer function predefined type */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index b353c4ceb60d..4b3ccbca0da2 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -69,6 +69,9 @@
#define DMUB_PC_SNAPSHOT_COUNT 10
+/* Default tracebuffer size if meta is absent. */
+#define DMUB_TRACE_BUFFER_SIZE (64 * 1024)
+
/* Forward declarations */
struct dmub_srv;
struct dmub_srv_common_regs;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index b800a507d1e0..d0fe324cb537 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -431,7 +431,68 @@ union replay_debug_flags {
*/
uint32_t enable_ips_residency_profiling : 1;
- uint32_t reserved : 20;
+ /**
+ * 0x1000 (bit 12)
+ * @enable_coasting_vtotal_check: Enable Coasting_vtotal_check
+ */
+ uint32_t enable_coasting_vtotal_check : 1;
+ /**
+ * 0x2000 (bit 13)
+ * @enable_visual_confirm_debug: Enable Visual Confirm Debug
+ */
+ uint32_t enable_visual_confirm_debug : 1;
+
+ uint32_t reserved : 18;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+/**
+ * Flags record error state.
+ */
+union replay_visual_confirm_error_state_flags {
+ struct {
+ /**
+ * 0x1 (bit 0) - Desync Error flag.
+ */
+ uint32_t desync_error : 1;
+
+ /**
+ * 0x2 (bit 1) - State Transition Error flag.
+ */
+ uint32_t state_transition_error : 1;
+
+ /**
+ * 0x4 (bit 2) - Crc Error flag
+ */
+ uint32_t crc_error : 1;
+
+ /**
+ * 0x8 (bit 3) - Reserved
+ */
+ uint32_t reserved_3 : 1;
+
+ /**
+ * 0x10 (bit 4) - Incorrect Coasting vtotal checking --> use debug flag to control DPCD write.
+ * Added new debug flag to control DPCD.
+ */
+ uint32_t incorrect_vtotal_in_static_screen : 1;
+
+ /**
+ * 0x20 (bit 5) - No doubled Refresh Rate.
+ */
+ uint32_t no_double_rr : 1;
+
+ /**
+ * Reserved bit 6-7
+ */
+ uint32_t reserved_6_7 : 2;
+
+ /**
+ * Reserved bit 9-31
+ */
+ uint32_t reserved_9_31 : 24;
} bitfields;
uint32_t u32All;
@@ -475,11 +536,23 @@ union replay_hw_flags {
* Use TPS3 signal when restore main link.
*/
uint32_t force_wakeup_by_tps3 : 1;
+ /**
+ * @is_alpm_initialized: Indicates whether ALPM is initialized
+ */
+ uint32_t is_alpm_initialized : 1;
} bitfields;
uint32_t u32All;
};
+union fw_assisted_mclk_switch_version {
+ struct {
+ uint8_t minor : 5;
+ uint8_t major : 3;
+ };
+ uint8_t ver;
+};
+
/**
* DMUB feature capabilities.
* After DMUB init, driver will query FW capabilities prior to enabling certain features.
@@ -1823,52 +1896,11 @@ enum fams2_stream_type {
FAMS2_STREAM_TYPE_SUBVP = 4,
};
-/* dynamic stream state */
-struct dmub_fams2_legacy_stream_dynamic_state {
- uint8_t force_allow_at_vblank;
- uint8_t pad[3];
-};
-
-struct dmub_fams2_subvp_stream_dynamic_state {
- uint16_t viewport_start_hubp_vline;
- uint16_t viewport_height_hubp_vlines;
- uint16_t viewport_start_c_hubp_vline;
- uint16_t viewport_height_c_hubp_vlines;
- uint16_t phantom_viewport_height_hubp_vlines;
- uint16_t phantom_viewport_height_c_hubp_vlines;
- uint16_t microschedule_start_otg_vline;
- uint16_t mall_start_otg_vline;
- uint16_t mall_start_hubp_vline;
- uint16_t mall_start_c_hubp_vline;
- uint8_t force_allow_at_vblank_only;
- uint8_t pad[3];
-};
-
-struct dmub_fams2_drr_stream_dynamic_state {
- uint16_t stretched_vtotal;
- uint8_t use_cur_vtotal;
- uint8_t pad;
-};
-
-struct dmub_fams2_stream_dynamic_state {
- uint64_t ref_tick;
- uint32_t cur_vtotal;
- uint16_t adjusted_allow_end_otg_vline;
- uint8_t pad[2];
- struct dmub_optc_position ref_otg_pos;
- struct dmub_optc_position target_otg_pos;
- union {
- struct dmub_fams2_legacy_stream_dynamic_state legacy;
- struct dmub_fams2_subvp_stream_dynamic_state subvp;
- struct dmub_fams2_drr_stream_dynamic_state drr;
- } sub_state;
-};
-
/* static stream state */
struct dmub_fams2_legacy_stream_static_state {
uint8_t vactive_det_fill_delay_otg_vlines;
uint8_t programming_delay_otg_vlines;
-};
+}; //v0
struct dmub_fams2_subvp_stream_static_state {
uint16_t vratio_numerator;
@@ -1887,14 +1919,59 @@ struct dmub_fams2_subvp_stream_static_state {
uint8_t phantom_otg_inst;
uint8_t phantom_pipe_mask;
uint8_t phantom_plane_pipe_masks[DMUB_MAX_PHANTOM_PLANES]; // phantom pipe mask per plane (for flip passthrough)
-};
+}; //v0
struct dmub_fams2_drr_stream_static_state {
uint16_t nom_stretched_vtotal;
uint8_t programming_delay_otg_vlines;
uint8_t only_stretch_if_required;
uint8_t pad[2];
-};
+}; //v0
+
+struct dmub_fams2_cmd_legacy_stream_static_state {
+ uint16_t vactive_det_fill_delay_otg_vlines;
+ uint16_t programming_delay_otg_vlines;
+}; //v1
+
+struct dmub_fams2_cmd_subvp_stream_static_state {
+ uint16_t vratio_numerator;
+ uint16_t vratio_denominator;
+ uint16_t phantom_vtotal;
+ uint16_t phantom_vactive;
+ uint16_t programming_delay_otg_vlines;
+ uint16_t prefetch_to_mall_otg_vlines;
+ union {
+ struct {
+ uint8_t is_multi_planar : 1;
+ uint8_t is_yuv420 : 1;
+ } bits;
+ uint8_t all;
+ } config;
+ uint8_t phantom_otg_inst;
+ uint8_t phantom_pipe_mask;
+ uint8_t pad0;
+ uint8_t phantom_plane_pipe_masks[DMUB_MAX_PHANTOM_PLANES]; // phantom pipe mask per plane (for flip passthrough)
+ uint8_t pad1[4 - (DMUB_MAX_PHANTOM_PLANES % 4)];
+}; //v1
+
+struct dmub_fams2_cmd_drr_stream_static_state {
+ uint16_t nom_stretched_vtotal;
+ uint16_t programming_delay_otg_vlines;
+ uint8_t only_stretch_if_required;
+ uint8_t pad[3];
+}; //v1
+
+union dmub_fams2_stream_static_sub_state {
+ struct dmub_fams2_legacy_stream_static_state legacy;
+ struct dmub_fams2_subvp_stream_static_state subvp;
+ struct dmub_fams2_drr_stream_static_state drr;
+}; //v0
+
+union dmub_fams2_cmd_stream_static_sub_state {
+ struct dmub_fams2_cmd_legacy_stream_static_state legacy;
+ struct dmub_fams2_cmd_subvp_stream_static_state subvp;
+ struct dmub_fams2_cmd_drr_stream_static_state drr;
+}; //v1
struct dmub_fams2_stream_static_state {
enum fams2_stream_type type;
@@ -1924,13 +2001,45 @@ struct dmub_fams2_stream_static_state {
uint8_t pipe_mask; // pipe mask for the whole config
uint8_t num_planes;
uint8_t plane_pipe_masks[DMUB_MAX_PLANES]; // pipe mask per plane (for flip passthrough)
- uint8_t pad[DMUB_MAX_PLANES % 4];
+ uint8_t pad[4 - (DMUB_MAX_PLANES % 4)];
+ union dmub_fams2_stream_static_sub_state sub_state;
+}; //v0
+
+struct dmub_fams2_cmd_stream_static_base_state {
+ enum fams2_stream_type type;
+ uint32_t otg_vline_time_ns;
+ uint32_t otg_vline_time_ticks;
+ uint16_t htotal;
+ uint16_t vtotal; // nominal vtotal
+ uint16_t vblank_start;
+ uint16_t vblank_end;
+ uint16_t max_vtotal;
+ uint16_t allow_start_otg_vline;
+ uint16_t allow_end_otg_vline;
+ uint16_t drr_keepout_otg_vline; // after this vline, vtotal cannot be changed
+ uint16_t scheduling_delay_otg_vlines; // min time to budget for ready to microschedule start
+ uint16_t contention_delay_otg_vlines; // time to budget for contention on execution
+ uint16_t vline_int_ack_delay_otg_vlines; // min time to budget for vertical interrupt firing
+ uint16_t allow_to_target_delay_otg_vlines; // time from allow vline to target vline
union {
- struct dmub_fams2_legacy_stream_static_state legacy;
- struct dmub_fams2_subvp_stream_static_state subvp;
- struct dmub_fams2_drr_stream_static_state drr;
- } sub_state;
-};
+ struct {
+ uint8_t is_drr : 1; // stream is DRR enabled
+ uint8_t clamp_vtotal_min : 1; // clamp vtotal to min instead of nominal
+ uint8_t min_ttu_vblank_usable : 1; // if min ttu vblank is above wm, no force pstate is needed in blank
+ } bits;
+ uint8_t all;
+ } config;
+ uint8_t otg_inst;
+ uint8_t pipe_mask; // pipe mask for the whole config
+ uint8_t num_planes;
+ uint8_t plane_pipe_masks[DMUB_MAX_PLANES]; // pipe mask per plane (for flip passthrough)
+ uint8_t pad[4 - (DMUB_MAX_PLANES % 4)];
+}; //v1
+
+struct dmub_fams2_stream_static_state_v1 {
+ struct dmub_fams2_cmd_stream_static_base_state base;
+ union dmub_fams2_cmd_stream_static_sub_state sub_state;
+}; //v1
/**
* enum dmub_fams2_allow_delay_check_mode - macroscheduler mode for breaking on excessive
@@ -1970,7 +2079,11 @@ struct dmub_cmd_fams2_global_config {
union dmub_cmd_fams2_config {
struct dmub_cmd_fams2_global_config global;
- struct dmub_fams2_stream_static_state stream;
+ struct dmub_fams2_stream_static_state stream; //v0
+ union {
+ struct dmub_fams2_cmd_stream_static_base_state base;
+ union dmub_fams2_cmd_stream_static_sub_state sub_state;
+ } stream_v1; //v1
};
/**
@@ -3592,6 +3705,8 @@ enum dmub_cmd_replay_general_subtype {
*/
REPLAY_GENERAL_CMD_DISABLED_ADAPTIVE_SYNC_SDP,
REPLAY_GENERAL_CMD_DISABLED_DESYNC_ERROR_DETECTION,
+ REPLAY_GENERAL_CMD_UPDATE_ERROR_STATUS,
+ REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index a3f3ff5d49ac..15ea216e903d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -61,10 +61,6 @@
/* Default state size if meta is absent. */
#define DMUB_FW_STATE_SIZE (64 * 1024)
-/* Default tracebuffer size if meta is absent. */
-#define DMUB_TRACE_BUFFER_SIZE (64 * 1024)
-
-
/* Default scratch mem size. */
#define DMUB_SCRATCH_MEM_SIZE (1024)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 95838c7ab054..29ccd3532d13 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -996,9 +996,9 @@ void set_replay_coasting_vtotal(struct dc_link *link,
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
-void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
+void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
{
- link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal;
+ link->replay_settings.low_rr_full_screen_video_pseudo_vtotal = vtotal;
}
void calculate_replay_link_off_frame_count(struct dc_link *link,
@@ -1039,3 +1039,8 @@ bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_back
memcpy(caps->data_points, custom_backlight_profiles[config_no].data_points, data_points_size);
return true;
}
+
+void reset_replay_dsync_error_count(struct dc_link *link)
+{
+ link->replay_settings.replay_desync_error_fail_count = 0;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index cac302e8fa10..758a8aa31fbe 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -62,7 +62,7 @@ void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
uint32_t vtotal);
void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
enum replay_coasting_vtotal_type type);
-void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
+void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
@@ -78,4 +78,5 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
bool fill_custom_backlight_caps(unsigned int config_no,
struct dm_acpi_atif_backlight_caps *caps);
+void reset_replay_dsync_error_count(struct dc_link *link);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 7eefcb0f5070..05bdb4e020ae 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -344,6 +344,11 @@ enum DC_DEBUG_MASK {
* eDP display from ACPI _DDC method.
*/
DC_DISABLE_ACPI_EDID = 0x8000,
+
+ /*
+ * @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver.
+ */
+ DC_DISABLE_HDMI_CEC = 0x10000,
};
enum amd_dpm_forced_level;
@@ -401,9 +406,9 @@ struct amd_ip_funcs {
int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block);
int (*soft_reset)(struct amdgpu_ip_block *ip_block);
int (*post_soft_reset)(struct amdgpu_ip_block *ip_block);
- int (*set_clockgating_state)(void *handle,
+ int (*set_clockgating_state)(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
- int (*set_powergating_state)(void *handle,
+ int (*set_powergating_state)(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
void (*get_clockgating_state)(void *handle, u64 *flags);
void (*dump_ip_state)(struct amdgpu_ip_block *ip_block);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h
index cae1a7e74323..73c5dd5e83d4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h
@@ -19,8 +19,8 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _dcn_2_0_3_OFFSET_HEADER
-#define _dcn_2_0_3_OFFSET_HEADER
+#ifndef _dcn_2_0_1_OFFSET_HEADER
+#define _dcn_2_0_1_OFFSET_HEADER
// addressBlock: dce_dc_dccg_dccg_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h
index ca1e1eb39256..290d807800a6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h
@@ -18,8 +18,8 @@
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _dcn_2_0_3_SH_MASK_HEADER
-#define _dcn_2_0_3_SH_MASK_HEADER
+#ifndef _dcn_2_0_1_SH_MASK_HEADER
+#define _dcn_2_0_1_SH_MASK_HEADER
// addressBlock: dce_dc_dccg_dccg_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h
new file mode 100644
index 000000000000..0e8f12728d5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_8_14_0_OFFSET_HEADER
+#define _umc_8_14_0_OFFSET_HEADER
+
+#define regUMCCH0_GeccErrCntSel 0x0328
+#define regUMCCH0_GeccErrCntSel_BASE_IDX 0
+#define regUMCCH0_GeccErrCnt 0x0329
+#define regUMCCH0_GeccErrCnt_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h
new file mode 100644
index 000000000000..5d723b5d9b87
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_8_14_0_SH_MASK_HEADER
+#define _umc_8_14_0_SH_MASK_HEADER
+
+//UMCCH0_GeccErrCntSel
+#define UMCCH0_GeccErrCntSel__GeccErrInt__SHIFT 0xc
+#define UMCCH0_GeccErrCntSel__GeccErrCntEn__SHIFT 0xf
+#define UMCCH0_GeccErrCntSel__PoisonCntEn__SHIFT 0x10
+#define UMCCH0_GeccErrCntSel__GeccErrInt_MASK 0x00003000L
+#define UMCCH0_GeccErrCntSel__GeccErrCntEn_MASK 0x00008000L
+#define UMCCH0_GeccErrCntSel__PoisonCntEn_MASK 0x00030000L
+//UMCCH0_GeccErrCnt
+#define UMCCH0_GeccErrCnt__GeccErrCnt__SHIFT 0x0
+#define UMCCH0_GeccErrCnt__GeccUnCorrErrCnt__SHIFT 0x10
+#define UMCCH0_GeccErrCnt__GeccErrCnt_MASK 0x0000FFFFL
+#define UMCCH0_GeccErrCnt__GeccUnCorrErrCnt_MASK 0xFFFF0000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index b0fc22383e28..0160d65f3f5e 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1300,12 +1300,17 @@ struct atom_ext_display_path
//usCaps
enum ext_display_path_cap_def {
- EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE = 0x0001,
- EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = 0x0002,
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007C,
- EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x01 << 2), //PI redriver chip
- EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x02 << 2), //TI retimer chip
- EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x03 << 2) //Parade DP->HDMI recoverter chip
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007E,
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007E,
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = (0x01 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x02 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2 = (0x03 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x04 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x06 << 1),
+ EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = (0x07 << 1),
+ EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x08 << 1), //PI redriver chip
+ EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x09 << 1), //TI retimer chip
+ EXT_DISPLAY_PATH_CAPS__AMD_INTERNAL = (0x0a << 1), //AMD internal customer chip placeholder
};
struct atom_external_display_connection_info
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
new file mode 100644
index 000000000000..64b553e7de1a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __IRQSRCS_VCN_5_0_H__
+#define __IRQSRCS_VCN_5_0_H__
+
+#define VCN_5_0__SRCID__UVD_TRAP 114 // 0x72 UVD_TRAP
+#define VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE 119 // 0x77 Encoder General Purpose
+#define VCN_5_0__SRCID__UVD_ENC_LOW_LATENCY 120 // 0x78 Encoder Low Latency
+#define VCN_5_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 // 0x7c UVD system message interrupt
+#define VCN_5_0__SRCID__JPEG_ENCODE 151 // 0x97 JRBC Encode interrupt
+#define VCN_5_0__SRCID__JPEG_DECODE 153 // 0x99 JRBC Decode interrupt
+#define VCN_5_0__SRCID__JPEG1_DECODE 149 // 0x95 JRBC1 Decode interrupt
+#define VCN_5_0__SRCID__JPEG2_DECODE 151 // 0x97 JRBC2 Decode interrupt
+#define VCN_5_0__SRCID__JPEG3_DECODE 171 // 0xab JRBC3 Decode interrupt
+#define VCN_5_0__SRCID__JPEG4_DECODE 172 // 0xac JRBC4 Decode interrupt
+#define VCN_5_0__SRCID__JPEG5_DECODE 173 // 0xad JRBC5 Decode interrupt
+#define VCN_5_0__SRCID__JPEG6_DECODE 174 // 0xae JRBC6 Decode interrupt
+#define VCN_5_0__SRCID__JPEG7_DECODE 175 // 0xaf JRBC7 Decode interrupt
+#define VCN_5_0__SRCID__JPEG8_DECODE 177 // 0xb1 JRBC8 Decode interrupt
+#define VCN_5_0__SRCID__JPEG9_DECODE 178 // 0xb2 JRBC9 Decode interrupt
+
+#define VCN_5_0__SRCID_UVD_POISON 160
+#define VCN_5_0__SRCID_DJPEG0_POISON 161
+#define VCN_5_0__SRCID_EJPEG0_POISON 162
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index d7acdd42d80f..9189dcb65188 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -421,7 +421,9 @@ struct amd_pm_funcs {
int (*load_firmware)(void *handle);
int (*wait_for_fw_loading_complete)(void *handle);
int (*set_powergating_by_smu)(void *handle,
- uint32_t block_type, bool gate);
+ uint32_t block_type,
+ bool gate,
+ int inst);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
int (*set_power_limit)(void *handle, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 9dc82f4d7c93..6a9e26905edf 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -70,13 +70,18 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
return ret;
}
-int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
+int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
+ bool is_vcn = (block_type == AMD_IP_BLOCK_TYPE_UVD || block_type == AMD_IP_BLOCK_TYPE_VCN);
- if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
+ if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
+ (!is_vcn || adev->vcn.num_vcn_inst == 1)) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate");
return 0;
@@ -88,7 +93,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
case AMD_IP_BLOCK_TYPE_GFX:
- case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
case AMD_IP_BLOCK_TYPE_JPEG:
case AMD_IP_BLOCK_TYPE_GMC:
@@ -96,7 +100,12 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_VPE:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
+ (adev)->powerplay.pp_handle, block_type, gate, 0));
+ break;
+ case AMD_IP_BLOCK_TYPE_VCN:
+ if (pp_funcs && pp_funcs->set_powergating_by_smu)
+ ret = (pp_funcs->set_powergating_by_smu(
+ (adev)->powerplay.pp_handle, block_type, gate, inst));
break;
default:
break;
@@ -566,7 +575,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
return;
}
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+}
+
+void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -591,7 +610,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
return;
}
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -601,7 +620,7 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -611,7 +630,7 @@ void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
enable ? "enable" : "disable", ret);
@@ -700,6 +719,21 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
return ret;
}
+int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_sdma(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -953,6 +987,24 @@ enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device
return level;
}
+static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
+{
+ /* enter UMD Pstate */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+}
+
+static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
+{
+ /* exit UMD Pstate */
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+}
+
int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
enum amd_dpm_forced_level level)
{
@@ -973,6 +1025,10 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
if (current_level == level)
return 0;
+ if (!(current_level & profile_mode_mask) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
+ return -EINVAL;
+
if (adev->asic_type == CHIP_RAVEN) {
if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
@@ -984,35 +1040,25 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
}
}
- if (!(current_level & profile_mode_mask) &&
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
- return -EINVAL;
-
- if (!(current_level & profile_mode_mask) &&
- (level & profile_mode_mask)) {
- /* enter UMD Pstate */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_UNGATE);
- amdgpu_device_ip_set_clockgating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- } else if ((current_level & profile_mode_mask) &&
- !(level & profile_mode_mask)) {
- /* exit UMD Pstate */
- amdgpu_device_ip_set_clockgating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
- }
+ if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
+ amdgpu_dpm_enter_umd_state(adev);
+ else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask))
+ amdgpu_dpm_exit_umd_state(adev);
mutex_lock(&adev->pm.mutex);
if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
level)) {
mutex_unlock(&adev->pm.mutex);
+ /* If new level failed, retain the umd state as before */
+ if (!(current_level & profile_mode_mask) &&
+ (level & profile_mode_mask))
+ amdgpu_dpm_exit_umd_state(adev);
+ else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask))
+ amdgpu_dpm_enter_umd_state(adev);
+
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 363af8990aa2..1f5ac7e0230d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -397,7 +397,7 @@ int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit
int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit);
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
- uint32_t block_type, bool gate);
+ uint32_t block_type, bool gate, int inst);
extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
@@ -446,6 +446,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable);
@@ -601,5 +602,6 @@ int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
int policy_level);
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
+int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 8908646ad620..67a8e22b1126 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3177,13 +3177,13 @@ static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int kv_dpm_set_clockgating_state(void *handle,
+static int kv_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int kv_dpm_set_powergating_state(void *handle,
+static int kv_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3276,7 +3276,9 @@ static int kv_dpm_read_sensor(void *handle, int idx,
}
static int kv_set_powergating_by_smu(void *handle,
- uint32_t block_type, bool gate)
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index ee23a0f897c5..a87dcf0974bc 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7709,7 +7709,8 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s_smc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_smc.bin", chip_name);
if (err) {
DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s_smc.bin\"\n",
err, chip_name);
@@ -7849,13 +7850,13 @@ static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int si_dpm_set_clockgating_state(void *handle,
+static int si_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int si_dpm_set_powergating_state(void *handle,
+static int si_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 26624a716fc6..686345f75f26 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -244,7 +244,7 @@ static bool pp_is_idle(void *handle)
return false;
}
-static int pp_set_powergating_state(void *handle,
+static int pp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -267,7 +267,7 @@ static int pp_resume(struct amdgpu_ip_block *ip_block)
return hwmgr_resume(hwmgr);
}
-static int pp_set_clockgating_state(void *handle,
+static int pp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
@@ -1227,7 +1227,9 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate)
}
static int pp_set_powergating_by_smu(void *handle,
- uint32_t block_type, bool gate)
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index fe24219c3bf4..4bd92fd782be 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -992,6 +992,8 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
GetIndexIntoMasterTable(DATA, SMU_Info),
&size, &frev, &crev);
+ if (!psmu_info)
+ return -EINVAL;
for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
index 3007b054c873..776d58ea63ae 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
@@ -1120,13 +1120,14 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
if (0 != result)
- return result;
+ goto exit_safe_mode;
vega10_didt_set_mask(hwmgr, false);
+exit_safe_mode:
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
- return 0;
+ return result;
}
static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 21bd635bcdfc..8ca793c222ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -238,7 +238,8 @@ static bool is_vcn_enabled(struct amdgpu_device *adev)
}
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
- bool enable)
+ bool enable,
+ int inst)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -253,12 +254,12 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
if (!smu->ppt_funcs->dpm_set_vcn_enable)
return 0;
- if (atomic_read(&power_gate->vcn_gated) ^ enable)
+ if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)
return 0;
- ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff);
+ ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);
if (!ret)
- atomic_set(&power_gate->vcn_gated, !enable);
+ atomic_set(&power_gate->vcn_gated[inst], !enable);
return ret;
}
@@ -345,8 +346,9 @@ static int smu_set_mall_enable(struct smu_context *smu)
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
* @handle: smu_context pointer
- * @block_type: the IP block to power gate/ungate
- * @gate: to power gate if true, ungate otherwise
+ * @block_type: the IP block to power gate/ungate
+ * @gate: to power gate if true, ungate otherwise
+ * @inst: the instance of the IP block to power gate/ungate
*
* This API uses no smu->mutex lock protection due to:
* 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
@@ -357,7 +359,8 @@ static int smu_set_mall_enable(struct smu_context *smu)
*/
static int smu_dpm_set_power_gate(void *handle,
uint32_t block_type,
- bool gate)
+ bool gate,
+ int inst)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -376,10 +379,10 @@ static int smu_dpm_set_power_gate(void *handle,
*/
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCN:
- ret = smu_dpm_set_vcn_enable(smu, !gate);
+ ret = smu_dpm_set_vcn_enable(smu, !gate, inst);
if (ret)
- dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
- gate ? "gate" : "ungate");
+ dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",
+ gate ? "gate" : "ungate", inst);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = smu_gfx_off_control(smu, gate);
@@ -724,6 +727,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
smu_v13_0_6_set_ppt_funcs(smu);
/* Enable pp_od_clk_voltage node */
smu->od_enabled = true;
@@ -782,21 +786,25 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
- int vcn_gate, jpeg_gate;
+ int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;
int ret = 0;
if (!smu->ppt_funcs->set_default_dpm_table)
return 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
- vcn_gate = atomic_read(&power_gate->vcn_gated);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);
+ }
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
jpeg_gate = atomic_read(&power_gate->jpeg_gated);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
- ret = smu_dpm_set_vcn_enable(smu, true);
- if (ret)
- return ret;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ ret = smu_dpm_set_vcn_enable(smu, true, i);
+ if (ret)
+ return ret;
+ }
}
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
@@ -813,8 +821,10 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
err_out:
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
- smu_dpm_set_vcn_enable(smu, !vcn_gate);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);
+ }
return ret;
}
@@ -1268,7 +1278,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret;
+ int i, ret;
smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
@@ -1280,7 +1290,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
atomic64_set(&smu->throttle_int_counter, 0);
smu->watermarks_bitmap = 0;
- atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
@@ -1810,7 +1821,7 @@ static int smu_start_smc_engine(struct smu_context *smu)
static int smu_hw_init(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int i, ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -1836,7 +1847,8 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
ret = smu_set_gfx_imu_enable(smu);
if (ret)
return ret;
- smu_dpm_set_vcn_enable(smu, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ smu_dpm_set_vcn_enable(smu, true, i);
smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
@@ -2034,12 +2046,13 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret;
+ int i, ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- smu_dpm_set_vcn_enable(smu, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ smu_dpm_set_vcn_enable(smu, false, i);
smu_dpm_set_jpeg_enable(smu, false);
smu_dpm_set_vpe_enable(smu, false);
smu_dpm_set_umsch_mm_enable(smu, false);
@@ -2191,13 +2204,13 @@ static int smu_display_configuration_change(void *handle,
return 0;
}
-static int smu_set_clockgating_state(void *handle,
+static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int smu_set_powergating_state(void *handle,
+static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -2979,9 +2992,10 @@ static int smu_read_sensor(void *handle,
int *size_arg)
{
struct smu_context *smu = handle;
+ struct amdgpu_device *adev = smu->adev;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
- int ret = 0;
+ int i, ret = 0;
uint32_t *size, size_val;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -3027,7 +3041,13 @@ static int smu_read_sensor(void *handle,
*size = 4;
break;
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
- *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
+ *(uint32_t *)data = 0;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {
+ *(uint32_t *)data = 1;
+ break;
+ }
+ }
*size = 4;
break;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
@@ -3895,3 +3915,13 @@ int smu_send_rma_reason(struct smu_context *smu)
return ret;
}
+
+int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)
+ ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3925815358ce..3630593bce61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -399,7 +399,7 @@ struct smu_dpm_context {
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
- atomic_t vcn_gated;
+ atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
atomic_t jpeg_gated;
atomic_t vpe_gated;
atomic_t umsch_mm_gated;
@@ -1373,6 +1373,11 @@ struct pptable_funcs {
int (*send_rma_reason)(struct smu_context *smu);
/**
+ * @reset_sdma: message SMU to soft reset sdma instance.
+ */
+ int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1631,6 +1636,7 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
+int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 0f96b8c59a0e..274b3e1cc4fb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -34,6 +34,8 @@
#define NUM_PCIE_BITRATES 4
#define NUM_XGMI_BITRATES 4
#define NUM_XGMI_WIDTHS 3
+#define NUM_SOC_P2S_TABLES 3
+#define NUM_TDP_GROUPS 4
typedef enum {
/*0*/ FEATURE_DATA_CALCULATION = 0,
@@ -80,8 +82,10 @@ typedef enum {
/*41*/ FEATURE_CXL_QOS = 41,
/*42*/ FEATURE_SOC_DC_RTC = 42,
/*43*/ FEATURE_GFX_DC_RTC = 43,
+/*44*/ FEATURE_DVM_MIN_PSM = 44,
+/*45*/ FEATURE_PRC = 45,
-/*44*/ NUM_FEATURES = 44
+/*46*/ NUM_FEATURES = 46
} FEATURE_LIST_e;
//enum for MPIO PCIe gen speed msgs
@@ -123,7 +127,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xE
+#define SMU_METRICS_TABLE_VERSION 0xF
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -234,6 +238,9 @@ typedef struct __attribute__((packed, aligned(4))) {
//PCIE BW Data and error count
uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitAcc[8];
} MetricsTableX_t;
typedef struct __attribute__((packed, aligned(4))) {
@@ -328,13 +335,14 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t JpegBusy[32];
} MetricsTableA_t;
-#define SMU_VF_METRICS_TABLE_VERSION 0x3
+#define SMU_VF_METRICS_TABLE_VERSION 0x5
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
uint64_t AccGfxclk_TargFreq;
uint64_t AccGfxRsmuDpm_Busy;
+ uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 41cb681927e2..7b65a27fb302 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -93,7 +93,9 @@
#define PPSMC_MSG_SelectPLPDMode 0x40
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SelectPstatePolicy 0x44
-#define PPSMC_Message_Count 0x45
+#define PPSMC_MSG_ResetSDMA2 0x45
+#define PPSMC_MSG_ResetSDMA 0x4D
+#define PPSMC_Message_Count 0x4E
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index a299dc4a8071..b0dab9797c70 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -275,7 +275,9 @@
__SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
__SMU_DUMMY_MAP(SelectPstatePolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
- __SMU_DUMMY_MAP(MALLPowerState),
+ __SMU_DUMMY_MAP(MALLPowerState), \
+ __SMU_DUMMY_MAP(ResetSDMA), \
+ __SMU_DUMMY_MAP(ResetSDMA2),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 286777ada1df..19a25fdc2f5b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1157,19 +1157,15 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu,
int inst)
{
struct amdgpu_device *adev = smu->adev;
- int i, ret = 0;
+ int ret = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* vcn dpm on is a prerequisite for vcn power gate messages */
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- 0x10000 * i, NULL);
- if (ret)
- return ret;
- }
+ if (adev->vcn.harvest_config & (1 << inst))
+ return ret;
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ 0x10000 * inst, NULL);
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 480cf3cb204d..189c6a32b6bd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -105,7 +105,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 2d1e7ebd1bac..fbbdfa54f6a2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -103,7 +103,8 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -2108,18 +2109,14 @@ int smu_v13_0_set_vcn_enable(struct smu_context *smu,
int inst)
{
struct amdgpu_device *adev = smu->adev;
- int i, ret = 0;
+ int ret = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- i << 16U, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ inst << 16U, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 2a09b27788e8..0551a3311217 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2643,11 +2643,12 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
&backend_workload_mask);
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
+ if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) &&
+ ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500))) {
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_POWERSAVING);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index ab3c93ddce46..8ab30b2f7119 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -119,6 +119,21 @@ static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *
}
}
+static inline bool smu_v13_0_6_is_blw_host_limit_available(struct smu_context *smu)
+{
+ if (smu->adev->flags & AMD_IS_APU)
+ return smu->smc_fw_version >= 0x04556F00;
+
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ return smu->smc_fw_version >= 0x557900;
+ case IP_VERSION(13, 0, 14):
+ return smu->smc_fw_version >= 0x05551000;
+ default:
+ return false;
+ }
+}
+
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -193,6 +208,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0),
+ MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetSDMA2, PPSMC_MSG_ResetSDMA2, 0),
};
// clang-format on
@@ -304,7 +321,8 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
sizeof(ucode_prefix));
- ret = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ ret = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (ret)
goto out;
@@ -2356,6 +2374,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->average_umc_activity =
SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+ gpu_metrics->mem_max_bandwidth =
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, flag));
+
gpu_metrics->curr_socket_power =
SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag));
/* Energy counter reported in 15.259uJ (2^-16) units */
@@ -2494,6 +2515,11 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
SMUQ10_ROUND(metrics_x->GfxBusy[inst]);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
+
+ if (smu_v13_0_6_is_blw_host_limit_available(smu))
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
+ SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc
+ [inst]);
idx++;
}
}
@@ -2716,6 +2742,41 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
+{
+ uint32_t smu_program;
+ int ret = 0;
+
+ smu_program = (smu->smc_fw_version >> 24) & 0xff;
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ if (((smu_program == 7) && (smu->smc_fw_version > 0x07550700)) ||
+ ((smu_program == 0) && (smu->smc_fw_version > 0x00557700)))
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ResetSDMA, inst_mask, NULL);
+ else if ((smu_program == 4) &&
+ (smu->smc_fw_version > 0x4556e6c))
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ResetSDMA2, inst_mask, NULL);
+ break;
+ case IP_VERSION(13, 0, 14):
+ if ((smu_program == 5) &&
+ (smu->smc_fw_version > 0x05550f00))
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ResetSDMA2, inst_mask, NULL);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetSDMA event with mask 0x%x\n",
+ inst_mask);
+
+ return ret;
+}
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3385,6 +3446,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
+ .reset_sdma = smu_v13_0_6_reset_sdma,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index a87040cb2f2e..9b2f4fe1578b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -79,7 +79,8 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -1511,29 +1512,24 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
int inst)
{
struct amdgpu_device *adev = smu->adev;
- int i, ret = 0;
+ int ret = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return ret;
- if (smu->is_apu) {
- if (i == 0)
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
- i << 16U, NULL);
- else if (i == 1)
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
- i << 16U, NULL);
- } else {
+ if (smu->is_apu) {
+ if (inst == 0)
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- i << 16U, NULL);
- }
-
- if (ret)
- return ret;
+ SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
+ inst << 16U, NULL);
+ else if (inst == 1)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
+ inst << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ inst << 16U, NULL);
}
return ret;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index d981d721e796..358c1512b087 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -9,7 +9,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 1e7b1fcb2848..6ed504099188 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -63,7 +63,6 @@ static const struct drm_driver komeda_kms_driver = {
.fops = &komeda_cma_fops,
.name = "komeda",
.desc = "Arm Komeda Display Processor driver",
- .date = "20181101",
.major = 0,
.minor = 1,
};
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 191b806624df..c3179d74f3f5 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -22,8 +22,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -233,7 +233,6 @@ static const struct drm_driver hdlcd_driver = {
.fops = &fops,
.name = "hdlcd",
.desc = "ARM HDLCD Controller DRM",
- .date = "20151021",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index fd2be80f3bf5..e083021e9e99 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -16,9 +16,9 @@
#include <linux/pm_runtime.h>
#include <linux/debugfs.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -570,7 +570,6 @@ static const struct drm_driver malidp_driver = {
.fops = &fops,
.name = "mali-dp",
.desc = "ARM Mali Display Processor driver",
- .date = "20160106",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 650e450cc19b..cae25ad66c74 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -11,8 +11,8 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
@@ -45,7 +45,6 @@ static const struct drm_driver armada_drm_driver = {
.minor = 0,
.name = "armada-drm",
.desc = "Armada SoC DRM",
- .date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.num_ioctls = ARRAY_SIZE(armada_ioctls),
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index b7e608ba6194..397e677a691c 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -13,8 +13,8 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_device.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -252,7 +252,6 @@ static const struct drm_driver aspeed_gfx_driver = {
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
- .date = "20180319",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 4afe4be072ef..ff3bcdd1cff2 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -31,8 +31,8 @@
#include <linux/of.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -60,7 +60,6 @@ static const struct drm_driver ast_driver = {
.fops = &ast_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 21ce3769bf0d..6b4305ac07d4 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -43,7 +43,6 @@
#define DRIVER_NAME "ast"
#define DRIVER_DESC "AST"
-#define DRIVER_DATE "20120228"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 7b209af7cf45..fa8ad94e431a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -16,9 +16,9 @@
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -846,7 +846,6 @@ static const struct drm_driver atmel_hlcdc_dc_driver = {
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
- .date = "20141504",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 8f786592143b..657bc3dd18df 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -214,7 +214,8 @@ static void audio_shutdown(struct device *dev, void *data)
}
static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index b754947e3e00..83d711ee3a2e 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -793,7 +793,7 @@ static void anx6345_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id anx6345_id[] = {
- { "anx6345", 0 },
+ { "anx6345" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, anx6345_id);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index a2675b121fe4..4be34d5c7a3b 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1952,7 +1952,8 @@ static void anx7625_audio_shutdown(struct device *dev, void *data)
}
static int anx7625_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -2002,8 +2003,10 @@ static int anx7625_audio_get_eld(struct device *dev, void *data,
memset(buf, 0, len);
} else {
dev_dbg(dev, "audio copy eld\n");
+ mutex_lock(&ctx->connector->eld_mutex);
memcpy(buf, ctx->connector->eld,
min(sizeof(ctx->connector->eld), len));
+ mutex_unlock(&ctx->connector->eld_mutex);
}
return 0;
@@ -2137,49 +2140,6 @@ static void hdcp_check_work_func(struct work_struct *work)
drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
}
-static int anx7625_connector_atomic_check(struct anx7625_data *ctx,
- struct drm_connector_state *state)
-{
- struct device *dev = ctx->dev;
- int cp;
-
- dev_dbg(dev, "hdcp state check\n");
- cp = state->content_protection;
-
- if (cp == ctx->hdcp_cp)
- return 0;
-
- if (cp == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
- if (ctx->dp_en) {
- dev_dbg(dev, "enable HDCP\n");
- anx7625_hdcp_enable(ctx);
-
- queue_delayed_work(ctx->hdcp_workqueue,
- &ctx->hdcp_work,
- msecs_to_jiffies(2000));
- }
- }
-
- if (cp == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- if (ctx->hdcp_cp != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- dev_err(dev, "current CP is not ENABLED\n");
- return -EINVAL;
- }
- anx7625_hdcp_disable(ctx);
- ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
- drm_hdcp_update_content_protection(ctx->connector,
- ctx->hdcp_cp);
- dev_dbg(dev, "update CP to UNDESIRE\n");
- }
-
- if (cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- dev_err(dev, "Userspace illegal set to PROTECTION ENABLE\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int anx7625_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -2416,7 +2376,7 @@ static int anx7625_bridge_atomic_check(struct drm_bridge *bridge,
anx7625_bridge_mode_fixup(bridge, &crtc_state->mode,
&crtc_state->adjusted_mode);
- return anx7625_connector_atomic_check(ctx, conn_state);
+ return 0;
}
static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
@@ -2425,6 +2385,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
dev_dbg(dev, "drm atomic enable\n");
@@ -2439,6 +2400,22 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
_anx7625_hpd_polling(ctx, 5000 * 100);
anx7625_dp_start(ctx);
+
+ conn_state = drm_atomic_get_new_connector_state(state->base.state, connector);
+
+ if (WARN_ON(!conn_state))
+ return;
+
+ if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (ctx->dp_en) {
+ dev_dbg(dev, "enable HDCP\n");
+ anx7625_hdcp_enable(ctx);
+
+ queue_delayed_work(ctx->hdcp_workqueue,
+ &ctx->hdcp_work,
+ msecs_to_jiffies(2000));
+ }
+ }
}
static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -2449,6 +2426,17 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
dev_dbg(dev, "drm atomic disable\n");
+ flush_workqueue(ctx->hdcp_workqueue);
+
+ if (ctx->connector &&
+ ctx->hdcp_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ anx7625_hdcp_disable(ctx);
+ ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_hdcp_update_content_protection(ctx->connector,
+ ctx->hdcp_cp);
+ dev_dbg(dev, "update CP to DESIRE\n");
+ }
+
ctx->connector = NULL;
anx7625_dp_stop(ctx);
@@ -2795,7 +2783,7 @@ static void anx7625_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id anx7625_id[] = {
- {"anx7625", 0},
+ { "anx7625" },
{}
};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 31832ba4017f..42248f179b69 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -500,34 +500,6 @@ static void cdns_mhdp_hdcp_prop_work(struct work_struct *work)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
-int cdns_mhdp_hdcp_set_lc(struct cdns_mhdp_device *mhdp, u8 *val)
-{
- int ret;
-
- mutex_lock(&mhdp->mbox_mutex);
- ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_GENERAL,
- HDCP_GENERAL_SET_LC_128,
- 16, val);
- mutex_unlock(&mhdp->mbox_mutex);
-
- return ret;
-}
-
-int
-cdns_mhdp_hdcp_set_public_key_param(struct cdns_mhdp_device *mhdp,
- struct cdns_hdcp_tx_public_key_param *val)
-{
- int ret;
-
- mutex_lock(&mhdp->mbox_mutex);
- ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
- HDCP2X_TX_SET_PUBLIC_KEY_PARAMS,
- sizeof(*val), (u8 *)val);
- mutex_unlock(&mhdp->mbox_mutex);
-
- return ret;
-}
-
int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
{
int ret;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
index 334c0b8b0d4f..3b6ec9c3a8d8 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
@@ -82,9 +82,6 @@ struct cdns_hdcp_tx_public_key_param {
u8 E[DLP_E];
};
-int cdns_mhdp_hdcp_set_public_key_param(struct cdns_mhdp_device *mhdp,
- struct cdns_hdcp_tx_public_key_param *val);
-int cdns_mhdp_hdcp_set_lc(struct cdns_mhdp_device *mhdp, u8 *val);
int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type);
int cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp);
void cdns_mhdp_hdcp_init(struct cdns_mhdp_device *mhdp);
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 9eecac457dcf..d47703559b0d 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -785,7 +785,7 @@ static struct mipi_dsi_driver chipone_dsi_driver = {
},
};
-static struct i2c_device_id chipone_i2c_id[] = {
+static const struct i2c_device_id chipone_i2c_id[] = {
{ "chipone,icn6211" },
{},
};
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index c83486cf6b15..da17f0978a79 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -597,7 +597,7 @@ static const struct of_device_id ch7033_dt_ids[] = {
MODULE_DEVICE_TABLE(of, ch7033_dt_ids);
static const struct i2c_device_id ch7033_ids[] = {
- { "ch7033", 0 },
+ { "ch7033" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ch7033_ids);
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index cbabd4e20d3e..306b5e374b9e 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -48,6 +48,7 @@
#define REG_COL_DEP GENMASK(1, 0)
#define BIT8 FIELD_PREP(REG_COL_DEP, 1)
#define OUT_MAP BIT(4)
+#define VESA BIT(4)
#define JEIDA 0
#define REG_DESSC_ENB BIT(6)
#define DMODE BIT(7)
@@ -428,12 +429,30 @@ static inline void it6263_lvds_reset(struct it6263 *it)
fsleep(10000);
}
+static inline bool it6263_is_input_bus_fmt_valid(int input_fmt)
+{
+ switch (input_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ return true;
+ }
+ return false;
+}
+
static inline void it6263_lvds_set_interface(struct it6263 *it)
{
+ u8 fmt;
+
/* color depth */
regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_COL_DEP, BIT8);
+
+ if (it->lvds_data_mapping == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG)
+ fmt = VESA;
+ else
+ fmt = JEIDA;
+
/* output mapping */
- regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, JEIDA);
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, fmt);
if (it->lvds_dual_link) {
regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, DISO);
@@ -550,15 +569,6 @@ static int it6263_read_edid(void *data, u8 *buf, unsigned int block, size_t len)
return 0;
}
-static int it6263_bridge_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return drm_atomic_helper_connector_hdmi_check(conn_state->connector,
- conn_state->state);
-}
-
static void
it6263_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
@@ -714,14 +724,14 @@ it6263_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
*num_input_fmts = 0;
- if (it->lvds_data_mapping != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA)
+ if (!it6263_is_input_bus_fmt_valid(it->lvds_data_mapping))
return NULL;
input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
return NULL;
- input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
+ input_fmts[0] = it->lvds_data_mapping;
*num_input_fmts = 1;
return input_fmts;
@@ -793,7 +803,6 @@ static const struct drm_bridge_funcs it6263_bridge_funcs = {
.mode_valid = it6263_bridge_mode_valid,
.atomic_disable = it6263_bridge_atomic_disable,
.atomic_enable = it6263_bridge_atomic_enable,
- .atomic_check = it6263_bridge_atomic_check,
.detect = it6263_bridge_detect,
.edid_read = it6263_bridge_edid_read,
.atomic_get_input_bus_fmts = it6263_bridge_atomic_get_input_bus_fmts,
@@ -845,8 +854,8 @@ static int it6263_probe(struct i2c_client *client)
it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter,
LVDS_INPUT_CTRL_I2C_ADDR);
if (IS_ERR(it->lvds_i2c))
- dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
- "failed to allocate I2C device for LVDS\n");
+ return dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
+ "failed to allocate I2C device for LVDS\n");
it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c,
&it6263_lvds_regmap_config);
@@ -878,7 +887,7 @@ static const struct of_device_id it6263_of_match[] = {
MODULE_DEVICE_TABLE(of, it6263_of_match);
static const struct i2c_device_id it6263_i2c_ids[] = {
- { "it6263", 0 },
+ { "it6263" },
{ }
};
MODULE_DEVICE_TABLE(i2c, it6263_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 008d86cc562a..88ef76a37fe6 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -19,6 +19,7 @@
#include <linux/regulator/consumer.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/bitfield.h>
#include <crypto/hash.h>
@@ -126,6 +127,7 @@
#define REG_AUX_OUT_DATA0 0x27
#define REG_AUX_CMD_REQ 0x2B
+#define M_AUX_REQ_CMD 0x0F
#define AUX_BUSY BIT(5)
#define REG_AUX_DATA_0_7 0x2C
@@ -266,6 +268,18 @@
#define REG_SSC_CTRL1 0x189
#define REG_SSC_CTRL2 0x18A
+#define REG_AUX_USER_CTRL 0x190
+#define EN_USER_AUX BIT(0)
+#define USER_AUX_DONE BIT(1)
+#define AUX_EVENT BIT(4)
+
+#define REG_AUX_USER_DATA_REC 0x191
+#define M_AUX_IN_REC 0xF0
+#define M_AUX_OUT_REC 0x0F
+
+#define REG_AUX_USER_REPLY 0x19A
+#define REG_AUX_USER_RXB(n) (n + 0x19B)
+
#define RBR DP_LINK_BW_1_62
#define HBR DP_LINK_BW_2_7
#define HBR2 DP_LINK_BW_5_4
@@ -296,11 +310,13 @@
#define MAX_LANE_COUNT 4
#define MAX_LINK_RATE HBR
#define AUTO_TRAIN_RETRY 3
-#define MAX_HDCP_DOWN_STREAM_COUNT 10
+#define MAX_HDCP_DOWN_STREAM_COUNT 127
#define MAX_CR_LEVEL 0x03
#define MAX_EQ_LEVEL 0x03
#define AUX_WAIT_TIMEOUT_MS 15
-#define AUX_FIFO_MAX_SIZE 32
+#define AUX_FIFO_MAX_SIZE 16
+#define AUX_I2C_MAX_SIZE 4
+#define AUX_I2C_DEFER_RETRY 4
#define PIXEL_CLK_DELAY 1
#define PIXEL_CLK_INVERSE 0
#define ADJUST_PHASE_THRESHOLD 80000
@@ -323,7 +339,15 @@
enum aux_cmd_type {
CMD_AUX_NATIVE_READ = 0x0,
CMD_AUX_NATIVE_WRITE = 0x5,
+ CMD_AUX_GI2C_ADR = 0x08,
+ CMD_AUX_GI2C_READ = 0x09,
+ CMD_AUX_GI2C_WRITE = 0x0A,
CMD_AUX_I2C_EDID_READ = 0xB,
+ CMD_AUX_I2C_READ = 0x0D,
+ CMD_AUX_I2C_WRITE = 0x0C,
+
+ /* KSV read with AUX FIFO extend from CMD_AUX_NATIVE_READ*/
+ CMD_AUX_GET_KSV_LIST = 0x10,
};
enum aux_cmd_reply {
@@ -965,7 +989,8 @@ static ssize_t it6505_aux_operation(struct it6505 *it6505,
it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, AUX_USER_MODE);
aux_op_start:
- if (cmd == CMD_AUX_I2C_EDID_READ) {
+ /* HW AUX FIFO supports only EDID and DCPD KSV FIFO area */
+ if (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) {
/* AUX EDID FIFO has max length of AUX_FIFO_MAX_SIZE bytes. */
size = min_t(size_t, size, AUX_FIFO_MAX_SIZE);
/* Enable AUX FIFO read back and clear FIFO */
@@ -996,7 +1021,7 @@ aux_op_start:
size);
/* Aux Fire */
- it6505_write(it6505, REG_AUX_CMD_REQ, cmd);
+ it6505_write(it6505, REG_AUX_CMD_REQ, FIELD_GET(M_AUX_REQ_CMD, cmd));
ret = it6505_aux_wait(it6505);
if (ret < 0)
@@ -1030,7 +1055,7 @@ aux_op_start:
goto aux_op_start;
}
- if (cmd == CMD_AUX_I2C_EDID_READ) {
+ if (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) {
for (i = 0; i < size; i++) {
ret = it6505_read(it6505, REG_AUX_DATA_FIFO);
if (ret < 0)
@@ -1055,7 +1080,7 @@ aux_op_start:
ret = i;
aux_op_err:
- if (cmd == CMD_AUX_I2C_EDID_READ) {
+ if (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) {
/* clear AUX FIFO */
it6505_set_bits(it6505, REG_AUX_CTRL,
AUX_EN_FIFO_READ | CLR_EDID_FIFO,
@@ -1076,10 +1101,14 @@ static ssize_t it6505_aux_do_transfer(struct it6505 *it6505,
size_t size, enum aux_cmd_reply *reply)
{
int i, ret_size, ret = 0, request_size;
+ int fifo_max_size = (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) ?
+ AUX_FIFO_MAX_SIZE : 4;
mutex_lock(&it6505->aux_lock);
- for (i = 0; i < size; i += 4) {
- request_size = min((int)size - i, 4);
+ i = 0;
+ do {
+ request_size = min_t(int, (int)size - i, fifo_max_size);
+
ret_size = it6505_aux_operation(it6505, cmd, address + i,
buffer + i, request_size,
reply);
@@ -1088,14 +1117,170 @@ static ssize_t it6505_aux_do_transfer(struct it6505 *it6505,
goto aux_op_err;
}
+ i += request_size;
ret += ret_size;
- }
+ } while (i < size);
aux_op_err:
mutex_unlock(&it6505->aux_lock);
return ret;
}
+static bool it6505_aux_i2c_reply_defer(u8 reply)
+{
+ if (reply == DP_AUX_NATIVE_REPLY_DEFER || reply == DP_AUX_I2C_REPLY_DEFER)
+ return true;
+ return false;
+}
+
+static bool it6505_aux_i2c_reply_nack(u8 reply)
+{
+ if (reply == DP_AUX_NATIVE_REPLY_NACK || reply == DP_AUX_I2C_REPLY_NACK)
+ return true;
+ return false;
+}
+
+static int it6505_aux_i2c_wait(struct it6505 *it6505, u8 *reply)
+{
+ int err = 0;
+ unsigned long timeout;
+ struct device *dev = it6505->dev;
+
+ timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
+
+ do {
+ if (it6505_read(it6505, REG_AUX_USER_CTRL) & AUX_EVENT)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "Timed out waiting AUX I2C, BUSY = %X\n",
+ it6505_aux_op_finished(it6505));
+ err = -ETIMEDOUT;
+ goto end_aux_i2c_wait;
+ }
+ usleep_range(300, 800);
+ } while (!it6505_aux_op_finished(it6505));
+
+ *reply = it6505_read(it6505, REG_AUX_USER_REPLY) >> 4;
+
+ if (*reply == 0)
+ goto end_aux_i2c_wait;
+
+ if (it6505_aux_i2c_reply_defer(*reply))
+ err = -EBUSY;
+ else if (it6505_aux_i2c_reply_nack(*reply))
+ err = -ENXIO;
+
+end_aux_i2c_wait:
+ it6505_set_bits(it6505, REG_AUX_USER_CTRL, USER_AUX_DONE, USER_AUX_DONE);
+ return err;
+}
+
+static int it6505_aux_i2c_readb(struct it6505 *it6505, u8 *buf, size_t size, u8 *reply)
+{
+ int ret, i;
+ int retry;
+
+ for (retry = 0; retry < AUX_I2C_DEFER_RETRY; retry++) {
+ it6505_write(it6505, REG_AUX_CMD_REQ, CMD_AUX_GI2C_READ);
+
+ ret = it6505_aux_i2c_wait(it6505, reply);
+ if (it6505_aux_i2c_reply_defer(*reply))
+ continue;
+ if (ret >= 0)
+ break;
+ }
+
+ for (i = 0; i < size; i++)
+ buf[i] = it6505_read(it6505, REG_AUX_USER_RXB(0 + i));
+
+ return size;
+}
+
+static int it6505_aux_i2c_writeb(struct it6505 *it6505, u8 *buf, size_t size, u8 *reply)
+{
+ int i, ret;
+ int retry;
+
+ for (i = 0; i < size; i++)
+ it6505_write(it6505, REG_AUX_OUT_DATA0 + i, buf[i]);
+
+ for (retry = 0; retry < AUX_I2C_DEFER_RETRY; retry++) {
+ it6505_write(it6505, REG_AUX_CMD_REQ, CMD_AUX_GI2C_WRITE);
+
+ ret = it6505_aux_i2c_wait(it6505, reply);
+ if (it6505_aux_i2c_reply_defer(*reply))
+ continue;
+ if (ret >= 0)
+ break;
+ }
+ return size;
+}
+
+static ssize_t it6505_aux_i2c_operation(struct it6505 *it6505,
+ struct drm_dp_aux_msg *msg)
+{
+ int ret;
+ ssize_t request_size, data_cnt = 0;
+ u8 *buffer = msg->buffer;
+
+ /* set AUX user mode */
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_USER_MODE | AUX_NO_SEGMENT_WR, AUX_USER_MODE);
+ it6505_set_bits(it6505, REG_AUX_USER_CTRL, EN_USER_AUX, EN_USER_AUX);
+ /* clear AUX FIFO */
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO);
+
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO, 0x00);
+
+ it6505_write(it6505, REG_AUX_ADR_0_7, 0x00);
+ it6505_write(it6505, REG_AUX_ADR_8_15, msg->address << 1);
+
+ if (msg->size == 0) {
+ /* IIC Start/STOP dummy write */
+ it6505_write(it6505, REG_AUX_ADR_16_19, msg->request);
+ it6505_write(it6505, REG_AUX_CMD_REQ, CMD_AUX_GI2C_ADR);
+ ret = it6505_aux_i2c_wait(it6505, &msg->reply);
+ goto end_aux_i2c_transfer;
+ }
+
+ /* IIC data transfer */
+ data_cnt = 0;
+ do {
+ request_size = min_t(ssize_t, msg->size - data_cnt, AUX_I2C_MAX_SIZE);
+ it6505_write(it6505, REG_AUX_ADR_16_19,
+ msg->request | ((request_size - 1) << 4));
+ if ((msg->request & DP_AUX_I2C_READ) == DP_AUX_I2C_READ)
+ ret = it6505_aux_i2c_readb(it6505, &buffer[data_cnt],
+ request_size, &msg->reply);
+ else
+ ret = it6505_aux_i2c_writeb(it6505, &buffer[data_cnt],
+ request_size, &msg->reply);
+
+ if (ret < 0)
+ goto end_aux_i2c_transfer;
+
+ data_cnt += request_size;
+ } while (data_cnt < msg->size);
+ ret = data_cnt;
+end_aux_i2c_transfer:
+
+ it6505_set_bits(it6505, REG_AUX_USER_CTRL, EN_USER_AUX, 0);
+ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, 0);
+ return ret;
+}
+
+static ssize_t it6505_aux_i2c_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct it6505 *it6505 = container_of(aux, struct it6505, aux);
+
+ guard(mutex)(&it6505->aux_lock);
+ return it6505_aux_i2c_operation(it6505, msg);
+}
+
static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -1105,9 +1290,8 @@ static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux,
int ret;
enum aux_cmd_reply reply;
- /* IT6505 doesn't support arbitrary I2C read / write. */
if (is_i2c)
- return -EINVAL;
+ return it6505_aux_i2c_transfer(aux, msg);
switch (msg->request) {
case DP_AUX_NATIVE_READ:
@@ -1178,6 +1362,37 @@ static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block,
return 0;
}
+static int it6505_get_ksvlist(struct it6505 *it6505, u8 *buf, size_t len)
+{
+ struct device *dev = it6505->dev;
+ enum aux_cmd_reply reply;
+ int request_size, ret;
+ int i = 0;
+
+ do {
+ request_size = min_t(int, (int)len - i, 15);
+
+ ret = it6505_aux_do_transfer(it6505, CMD_AUX_GET_KSV_LIST,
+ DP_AUX_HDCP_KSV_FIFO,
+ buf + i, request_size, &reply);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "request_size = %d, ret =%d", request_size, ret);
+ if (ret < 0)
+ return ret;
+
+ i += request_size;
+ } while (i < len);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "ksv read cnt = %d down_stream_cnt=%d ", i, i / 5);
+
+ for (i = 0 ; i < len; i += 5) {
+ DRM_DEV_DEBUG_DRIVER(dev, "ksv[%d] = %02X%02X%02X%02X%02X",
+ i / 5, buf[i], buf[i + 1], buf[i + 2], buf[i + 3], buf[i + 4]);
+ }
+
+ return len;
+}
+
static void it6505_variable_config(struct it6505 *it6505)
{
it6505->link_rate_bw_code = HBR;
@@ -1959,7 +2174,7 @@ static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
{
struct device *dev = it6505->dev;
u8 binfo[2];
- int down_stream_count, i, err, msg_count = 0;
+ int down_stream_count, err, msg_count = 0;
err = it6505_get_dpcd(it6505, DP_AUX_HDCP_BINFO, binfo,
ARRAY_SIZE(binfo));
@@ -1984,18 +2199,11 @@ static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
down_stream_count);
return 0;
}
+ err = it6505_get_ksvlist(it6505, sha1_input, down_stream_count * 5);
+ if (err < 0)
+ return err;
- for (i = 0; i < down_stream_count; i++) {
- err = it6505_get_dpcd(it6505, DP_AUX_HDCP_KSV_FIFO +
- (i % 3) * DRM_HDCP_KSV_LEN,
- sha1_input + msg_count,
- DRM_HDCP_KSV_LEN);
-
- if (err < 0)
- return err;
-
- msg_count += 5;
- }
+ msg_count += down_stream_count * 5;
it6505->hdcp_down_stream_count = down_stream_count;
sha1_input[msg_count++] = binfo[0];
@@ -2023,7 +2231,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
{
struct device *dev = it6505->dev;
u8 av[5][4], bv[5][4];
- int i, err;
+ int i, err, retry;
i = it6505_setup_sha1_input(it6505, it6505->sha1_input);
if (i <= 0) {
@@ -2032,22 +2240,28 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
}
it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
+ /*1B-05 V' must retry 3 times */
+ for (retry = 0; retry < 3; retry++) {
+ err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
+ sizeof(bv));
- err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
- sizeof(bv));
+ if (err < 0) {
+ dev_err(dev, "Read V' value Fail %d", retry);
+ continue;
+ }
- if (err < 0) {
- dev_err(dev, "Read V' value Fail");
- return false;
- }
+ for (i = 0; i < 5; i++) {
+ if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
+ av[i][1] != av[i][2] || bv[i][0] != av[i][3])
+ break;
- for (i = 0; i < 5; i++)
- if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
- bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
- return false;
+ DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i);
+ return true;
+ }
+ }
- DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!");
- return true;
+ DRM_DEV_DEBUG_DRIVER(dev, "V' NOT match!! %d", retry);
+ return false;
}
static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
@@ -2055,12 +2269,13 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
struct it6505 *it6505 = container_of(work, struct it6505,
hdcp_wait_ksv_list);
struct device *dev = it6505->dev;
- unsigned int timeout = 5000;
- u8 bstatus = 0;
+ u8 bstatus;
bool ksv_list_check;
+ /* 1B-04 wait ksv list for 5s */
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(5000) + 1;
- timeout /= 20;
- while (timeout > 0) {
+ for (;;) {
if (!it6505_get_sink_hpd_status(it6505))
return;
@@ -2069,27 +2284,23 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
if (bstatus & DP_BSTATUS_READY)
break;
- msleep(20);
- timeout--;
- }
+ if (time_after(jiffies, timeout)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "KSV list wait timeout");
+ goto timeout;
+ }
- if (timeout == 0) {
- DRM_DEV_DEBUG_DRIVER(dev, "timeout and ksv list wait failed");
- goto timeout;
+ msleep(20);
}
ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505);
DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s",
ksv_list_check ? "pass" : "fail");
- if (ksv_list_check) {
- it6505_set_bits(it6505, REG_HDCP_TRIGGER,
- HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
+
+ if (ksv_list_check)
return;
- }
+
timeout:
- it6505_set_bits(it6505, REG_HDCP_TRIGGER,
- HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL,
- HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL);
+ it6505_start_hdcp(it6505);
}
static void it6505_hdcp_work(struct work_struct *work)
@@ -2312,14 +2523,20 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector);
if (dp_irq_vector & DP_CP_IRQ) {
- it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
- HDCP_TRIGGER_CPIRQ);
-
bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS);
if (bstatus < 0)
return bstatus;
DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus);
+
+ /*Check BSTATUS when recive CP_IRQ */
+ if (bstatus & DP_BSTATUS_R0_PRIME_READY &&
+ it6505->hdcp_status == HDCP_AUTH_GOING)
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
+ HDCP_TRIGGER_CPIRQ);
+ else if (bstatus & (DP_BSTATUS_REAUTH_REQ | DP_BSTATUS_LINK_FAILURE) &&
+ it6505->hdcp_status == HDCP_AUTH_DONE)
+ it6505_start_hdcp(it6505);
}
ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status);
@@ -2456,7 +2673,11 @@ static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
{
struct device *dev = it6505->dev;
- DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt");
+ DRM_DEV_DEBUG_DRIVER(dev, "HDCP repeater R0 event Interrupt");
+ /* 1B01 HDCP encription should start when R0 is ready*/
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+ HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
+
schedule_work(&it6505->hdcp_wait_ksv_list);
}
@@ -3497,7 +3718,7 @@ static void it6505_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id it6505_id[] = {
- { "it6505", 0 },
+ { "it6505" },
{ }
};
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 35ae3f0e8f51..23edcde6b9a7 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -1450,8 +1450,10 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
dev_dbg(dev, "No connector present, passing empty EDID data");
memset(buf, 0, len);
} else {
+ mutex_lock(&ctx->connector->eld_mutex);
memcpy(buf, ctx->connector->eld,
min(sizeof(ctx->connector->eld), len));
+ mutex_unlock(&ctx->connector->eld_mutex);
}
mutex_unlock(&ctx->lock);
@@ -1464,7 +1466,6 @@ static const struct hdmi_codec_ops it66121_audio_codec_ops = {
.audio_shutdown = it66121_audio_shutdown,
.mute_stream = it66121_audio_mute,
.get_eld = it66121_audio_get_eld,
- .no_capture_mute = 1,
};
static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
@@ -1474,11 +1475,12 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
.i2s = 1, /* Only i2s support for now */
.spdif = 0,
.max_i2s_channels = 8,
+ .no_capture_mute = 1,
};
dev_dbg(dev, "%s\n", __func__);
- if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) {
+ if (!of_property_present(dev->of_node, "#sound-dai-cells")) {
dev_info(dev, "No \"#sound-dai-cells\", no audio\n");
return 0;
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index e265ab3c8c92..52da204f5740 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -815,8 +815,8 @@ static const struct of_device_id lt8912_dt_match[] = {
MODULE_DEVICE_TABLE(of, lt8912_dt_match);
static const struct i2c_device_id lt8912_id[] = {
- {"lt8912", 0},
- {},
+ { "lt8912" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, lt8912_id);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index c8881796fba4..999ddebb832d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -773,7 +773,7 @@ static void lt9211_remove(struct i2c_client *client)
drm_bridge_remove(&ctx->bridge);
}
-static struct i2c_device_id lt9211_id[] = {
+static const struct i2c_device_id lt9211_id[] = {
{ "lontium,lt9211" },
{},
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 1b31fdebe164..e650cd83fc8d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -45,7 +45,6 @@ struct lt9611 {
struct device_node *dsi1_node;
struct mipi_dsi_device *dsi0;
struct mipi_dsi_device *dsi1;
- struct platform_device *audio_pdev;
bool ac_mode;
@@ -757,7 +756,6 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- unsigned long long rate;
if (mode->hdisplay > 3840)
return MODE_BAD_HVALUE;
@@ -765,17 +763,7 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->hdisplay > 2000 && !lt9611->dsi1_node)
return MODE_PANEL;
- rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
- return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate);
-}
-
-static int lt9611_bridge_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return drm_atomic_helper_connector_hdmi_check(conn_state->connector,
- conn_state->state);
+ return MODE_OK;
}
static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
@@ -866,6 +854,10 @@ static int lt9611_hdmi_clear_infoframe(struct drm_bridge *bridge,
unsigned int mask;
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ mask = LT9611_INFOFRAME_AUDIO;
+ break;
+
case HDMI_INFOFRAME_TYPE_AVI:
mask = LT9611_INFOFRAME_AVI;
break;
@@ -899,6 +891,11 @@ static int lt9611_hdmi_write_infoframe(struct drm_bridge *bridge,
int i;
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ mask = LT9611_INFOFRAME_AUDIO;
+ addr = 0x84b2;
+ break;
+
case HDMI_INFOFRAME_TYPE_AVI:
mask = LT9611_INFOFRAME_AVI;
addr = 0x8440;
@@ -942,6 +939,55 @@ lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
return MODE_OK;
}
+static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ regmap_write(lt9611->regmap, 0x82d6, 0x8c);
+ regmap_write(lt9611->regmap, 0x82d7, 0x04);
+
+ regmap_write(lt9611->regmap, 0x8406, 0x08);
+ regmap_write(lt9611->regmap, 0x8407, 0x10);
+
+ regmap_write(lt9611->regmap, 0x8434, 0xd5);
+
+ return 0;
+}
+
+static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ if (hparms->sample_rate == 48000)
+ regmap_write(lt9611->regmap, 0x840f, 0x2b);
+ else if (hparms->sample_rate == 96000)
+ regmap_write(lt9611->regmap, 0x840f, 0xab);
+ else
+ return -EINVAL;
+
+ regmap_write(lt9611->regmap, 0x8435, 0x00);
+ regmap_write(lt9611->regmap, 0x8436, 0x18);
+ regmap_write(lt9611->regmap, 0x8437, 0x00);
+
+ return drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &hparms->cea);
+}
+
+static void lt9611_hdmi_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
+
+ regmap_write(lt9611->regmap, 0x8406, 0x00);
+ regmap_write(lt9611->regmap, 0x8407, 0x00);
+}
+
static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.attach = lt9611_bridge_attach,
.mode_valid = lt9611_bridge_mode_valid,
@@ -949,7 +995,6 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.edid_read = lt9611_bridge_edid_read,
.hpd_enable = lt9611_bridge_hpd_enable,
- .atomic_check = lt9611_bridge_atomic_check,
.atomic_pre_enable = lt9611_bridge_atomic_pre_enable,
.atomic_enable = lt9611_bridge_atomic_enable,
.atomic_disable = lt9611_bridge_atomic_disable,
@@ -962,6 +1007,10 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.hdmi_tmds_char_rate_valid = lt9611_hdmi_tmds_char_rate_valid,
.hdmi_write_infoframe = lt9611_hdmi_write_infoframe,
.hdmi_clear_infoframe = lt9611_hdmi_clear_infoframe,
+
+ .hdmi_audio_startup = lt9611_hdmi_audio_startup,
+ .hdmi_audio_prepare = lt9611_hdmi_audio_prepare,
+ .hdmi_audio_shutdown = lt9611_hdmi_audio_shutdown,
};
static int lt9611_parse_dt(struct device *dev,
@@ -1015,101 +1064,6 @@ static int lt9611_read_device_rev(struct lt9611 *lt9611)
return ret;
}
-static int lt9611_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
-{
- struct lt9611 *lt9611 = data;
-
- if (hparms->sample_rate == 48000)
- regmap_write(lt9611->regmap, 0x840f, 0x2b);
- else if (hparms->sample_rate == 96000)
- regmap_write(lt9611->regmap, 0x840f, 0xab);
- else
- return -EINVAL;
-
- regmap_write(lt9611->regmap, 0x8435, 0x00);
- regmap_write(lt9611->regmap, 0x8436, 0x18);
- regmap_write(lt9611->regmap, 0x8437, 0x00);
-
- return 0;
-}
-
-static int lt9611_audio_startup(struct device *dev, void *data)
-{
- struct lt9611 *lt9611 = data;
-
- regmap_write(lt9611->regmap, 0x82d6, 0x8c);
- regmap_write(lt9611->regmap, 0x82d7, 0x04);
-
- regmap_write(lt9611->regmap, 0x8406, 0x08);
- regmap_write(lt9611->regmap, 0x8407, 0x10);
-
- regmap_write(lt9611->regmap, 0x8434, 0xd5);
-
- return 0;
-}
-
-static void lt9611_audio_shutdown(struct device *dev, void *data)
-{
- struct lt9611 *lt9611 = data;
-
- regmap_write(lt9611->regmap, 0x8406, 0x00);
- regmap_write(lt9611->regmap, 0x8407, 0x00);
-}
-
-static int lt9611_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
-{
- struct of_endpoint of_ep;
- int ret;
-
- ret = of_graph_parse_endpoint(endpoint, &of_ep);
- if (ret < 0)
- return ret;
-
- /*
- * HDMI sound should be located as reg = <2>
- * Then, it is sound port 0
- */
- if (of_ep.port == 2)
- return 0;
-
- return -EINVAL;
-}
-
-static const struct hdmi_codec_ops lt9611_codec_ops = {
- .hw_params = lt9611_hdmi_hw_params,
- .audio_shutdown = lt9611_audio_shutdown,
- .audio_startup = lt9611_audio_startup,
- .get_dai_id = lt9611_hdmi_i2s_get_dai_id,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &lt9611_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-static int lt9611_audio_init(struct device *dev, struct lt9611 *lt9611)
-{
- codec_data.data = lt9611;
- lt9611->audio_pdev =
- platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(lt9611->audio_pdev);
-}
-
-static void lt9611_audio_exit(struct lt9611 *lt9611)
-{
- if (lt9611->audio_pdev) {
- platform_device_unregister(lt9611->audio_pdev);
- lt9611->audio_pdev = NULL;
- }
-}
-
static int lt9611_probe(struct i2c_client *client)
{
struct lt9611 *lt9611;
@@ -1173,6 +1127,9 @@ static int lt9611_probe(struct i2c_client *client)
i2c_set_clientdata(client, lt9611);
+ /* Disable Audio InfoFrame, enabled by default */
+ regmap_update_bits(lt9611->regmap, 0x843d, LT9611_INFOFRAME_AUDIO, 0);
+
lt9611->bridge.funcs = &lt9611_bridge_funcs;
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
@@ -1181,6 +1138,9 @@ static int lt9611_probe(struct i2c_client *client)
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
lt9611->bridge.vendor = "Lontium";
lt9611->bridge.product = "LT9611";
+ lt9611->bridge.hdmi_audio_dev = dev;
+ lt9611->bridge.hdmi_audio_max_i2s_playback_channels = 8;
+ lt9611->bridge.hdmi_audio_dai_port = 2;
drm_bridge_add(&lt9611->bridge);
@@ -1202,10 +1162,6 @@ static int lt9611_probe(struct i2c_client *client)
lt9611_enable_hpd_interrupts(lt9611);
- ret = lt9611_audio_init(dev, lt9611);
- if (ret)
- goto err_remove_bridge;
-
return 0;
err_remove_bridge:
@@ -1226,7 +1182,6 @@ static void lt9611_remove(struct i2c_client *client)
struct lt9611 *lt9611 = i2c_get_clientdata(client);
disable_irq(client->irq);
- lt9611_audio_exit(lt9611);
drm_bridge_remove(&lt9611->bridge);
regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies);
@@ -1235,8 +1190,8 @@ static void lt9611_remove(struct i2c_client *client)
of_node_put(lt9611->dsi0_node);
}
-static struct i2c_device_id lt9611_id[] = {
- { "lontium,lt9611", 0 },
+static const struct i2c_device_id lt9611_id[] = {
+ { "lontium,lt9611" },
{}
};
MODULE_DEVICE_TABLE(i2c, lt9611_id);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 4d1d40e1f1b4..f4c3ff1fdc69 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -522,7 +522,8 @@ static void lt9611uxc_audio_shutdown(struct device *dev, void *data)
}
static int lt9611uxc_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -913,8 +914,8 @@ static void lt9611uxc_remove(struct i2c_client *client)
of_node_put(lt9611uxc->dsi0_node);
}
-static struct i2c_device_id lt9611uxc_id[] = {
- { "lontium,lt9611uxc", 0 },
+static const struct i2c_device_id lt9611uxc_id[] = {
+ { "lontium,lt9611uxc" },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 37f1acf5c0f8..a3dcee62e7a5 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -318,8 +318,8 @@ static void stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
}
static const struct i2c_device_id stdp4028_ge_b850v3_fw_i2c_table[] = {
- {"stdp4028_ge_fw", 0},
- {},
+ { "stdp4028_ge_fw" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, stdp4028_ge_b850v3_fw_i2c_table);
@@ -365,8 +365,8 @@ static void stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
}
static const struct i2c_device_id stdp2690_ge_b850v3_fw_i2c_table[] = {
- {"stdp2690_ge_fw", 0},
- {},
+ { "stdp2690_ge_fw" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, stdp2690_ge_b850v3_fw_i2c_table);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index e77aab965fcf..44e36ae66db4 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -319,8 +319,8 @@ static void ptn3460_remove(struct i2c_client *client)
}
static const struct i2c_device_id ptn3460_i2c_table[] = {
- {"ptn3460", 0},
- {},
+ { "ptn3460" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 9be9cc5b9025..bf2d1632b020 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -815,7 +815,8 @@ static int sii902x_audio_get_eld(struct device *dev, void *data,
}
static int sii902x_audio_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -840,7 +841,6 @@ static const struct hdmi_codec_ops sii902x_audio_codec_ops = {
.mute_stream = sii902x_audio_mute,
.get_eld = sii902x_audio_get_eld,
.get_dai_id = sii902x_audio_get_dai_id,
- .no_capture_mute = 1,
};
static int sii902x_audio_codec_init(struct sii902x *sii902x,
@@ -863,11 +863,12 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x,
.i2s = 1, /* Only i2s support for now. */
.spdif = 0,
.max_i2s_channels = 0,
+ .no_capture_mute = 1,
};
u8 lanes[4];
int num_lanes, i;
- if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) {
+ if (!of_property_present(dev->of_node, "#sound-dai-cells")) {
dev_dbg(dev, "%s: No \"#sound-dai-cells\", no audio\n",
__func__);
return 0;
@@ -1239,8 +1240,8 @@ static const struct of_device_id sii902x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, sii902x_dt_ids);
static const struct i2c_device_id sii902x_i2c_ids[] = {
- { "sii9022", 0 },
- { },
+ { "sii9022" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 0c74cdc07032..cd7837c9a6e0 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -945,8 +945,8 @@ static const struct of_device_id sii9234_dt_match[] = {
MODULE_DEVICE_TABLE(of, sii9234_dt_match);
static const struct i2c_device_id sii9234_id[] = {
- { "SII9234", 0 },
- { },
+ { "SII9234" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sii9234_id);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 26b8d137bce0..28a2e1ee04b2 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2368,8 +2368,8 @@ static const struct of_device_id sii8620_dt_match[] = {
MODULE_DEVICE_TABLE(of, sii8620_dt_match);
static const struct i2c_device_id sii8620_id[] = {
- { "sii8620", 0 },
- { },
+ { "sii8620" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sii8620_id);
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index ca416dab156d..f3ab2f985f8c 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -59,3 +59,9 @@ config DRM_DW_MIPI_DSI
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
+
+config DRM_DW_MIPI_DSI2
+ tristate
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 9869d9651ed1..9dc376d220ad 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
obj-$(CONFIG_DRM_DW_HDMI_QP) += dw-hdmi-qp.o
obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o
+obj-$(CONFIG_DRM_DW_MIPI_DSI2) += dw-mipi-dsi2.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index f1c5a8d0fa90..2c903c9fe805 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -148,7 +148,8 @@ static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf,
}
static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 181c5164b231..b281cabfe992 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -361,22 +361,6 @@ static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi,
return 0;
}
-static int dw_hdmi_qp_bridge_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct dw_hdmi_qp *hdmi = bridge->driver_private;
- int ret;
-
- ret = drm_atomic_helper_connector_hdmi_check(conn_state->connector,
- conn_state->state);
- if (ret)
- dev_dbg(hdmi->dev, "%s failed: %d\n", __func__, ret);
-
- return ret;
-}
-
static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
@@ -442,16 +426,14 @@ dw_hdmi_qp_bridge_edid_read(struct drm_bridge *bridge,
}
static enum drm_mode_status
-dw_hdmi_qp_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_info *info,
- const struct drm_display_mode *mode)
+dw_hdmi_qp_bridge_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long rate)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
- unsigned long long rate;
- rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
if (rate > HDMI14_MAX_TMDSCLK) {
- dev_dbg(hdmi->dev, "Unsupported mode clock: %d\n", mode->clock);
+ dev_dbg(hdmi->dev, "Unsupported TMDS char rate: %lld\n", rate);
return MODE_CLOCK_HIGH;
}
@@ -505,12 +487,11 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
- .atomic_check = dw_hdmi_qp_bridge_atomic_check,
.atomic_enable = dw_hdmi_qp_bridge_atomic_enable,
.atomic_disable = dw_hdmi_qp_bridge_atomic_disable,
.detect = dw_hdmi_qp_bridge_detect,
.edid_read = dw_hdmi_qp_bridge_edid_read,
- .mode_valid = dw_hdmi_qp_bridge_mode_valid,
+ .hdmi_tmds_char_rate_valid = dw_hdmi_qp_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe,
.hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe,
};
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
index 2115b8ef0bd6..72987e6c4689 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Algea Cao <algea.cao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
new file mode 100644
index 000000000000..d7569bf2d9c3
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2024, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Modified by Heiko Stuebner <heiko.stuebner@cherry.de>
+ * This generic Synopsys DesignWare MIPI DSI2 host driver is based on the
+ * Rockchip version from rockchip/dw-mipi-dsi2.c converted to use bridge APIs.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/bridge/dw_mipi_dsi2.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+
+#define DSI2_PWR_UP 0x000c
+#define RESET 0
+#define POWER_UP BIT(0)
+#define CMD_TX_MODE(x) FIELD_PREP(BIT(24), x)
+#define DSI2_SOFT_RESET 0x0010
+#define SYS_RSTN BIT(2)
+#define PHY_RSTN BIT(1)
+#define IPI_RSTN BIT(0)
+#define INT_ST_MAIN 0x0014
+#define DSI2_MODE_CTRL 0x0018
+#define DSI2_MODE_STATUS 0x001c
+#define DSI2_CORE_STATUS 0x0020
+#define PRI_RD_DATA_AVAIL BIT(26)
+#define PRI_FIFOS_NOT_EMPTY BIT(25)
+#define PRI_BUSY BIT(24)
+#define CRI_RD_DATA_AVAIL BIT(18)
+#define CRT_FIFOS_NOT_EMPTY BIT(17)
+#define CRI_BUSY BIT(16)
+#define IPI_FIFOS_NOT_EMPTY BIT(9)
+#define IPI_BUSY BIT(8)
+#define CORE_FIFOS_NOT_EMPTY BIT(1)
+#define CORE_BUSY BIT(0)
+#define MANUAL_MODE_CFG 0x0024
+#define MANUAL_MODE_EN BIT(0)
+#define DSI2_TIMEOUT_HSTX_CFG 0x0048
+#define TO_HSTX(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_HSTXRDY_CFG 0x004c
+#define TO_HSTXRDY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPRX_CFG 0x0050
+#define TO_LPRXRDY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPTXRDY_CFG 0x0054
+#define TO_LPTXRDY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPTXTRIG_CFG 0x0058
+#define TO_LPTXTRIG(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPTXULPS_CFG 0x005c
+#define TO_LPTXULPS(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_BTA_CFG 0x60
+#define TO_BTA(x) FIELD_PREP(GENMASK(15, 0), x)
+
+#define DSI2_PHY_MODE_CFG 0x0100
+#define PPI_WIDTH(x) FIELD_PREP(GENMASK(9, 8), x)
+#define PHY_LANES(x) FIELD_PREP(GENMASK(5, 4), (x) - 1)
+#define PHY_TYPE(x) FIELD_PREP(BIT(0), x)
+#define DSI2_PHY_CLK_CFG 0X0104
+#define PHY_LPTX_CLK_DIV(x) FIELD_PREP(GENMASK(12, 8), x)
+#define CLK_TYPE_MASK BIT(0)
+#define NON_CONTINUOUS_CLK BIT(0)
+#define CONTINUOUS_CLK 0
+#define DSI2_PHY_LP2HS_MAN_CFG 0x010c
+#define PHY_LP2HS_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+#define DSI2_PHY_HS2LP_MAN_CFG 0x0114
+#define PHY_HS2LP_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+#define DSI2_PHY_MAX_RD_T_MAN_CFG 0x011c
+#define PHY_MAX_RD_TIME(x) FIELD_PREP(GENMASK(26, 0), x)
+#define DSI2_PHY_ESC_CMD_T_MAN_CFG 0x0124
+#define PHY_ESC_CMD_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+#define DSI2_PHY_ESC_BYTE_T_MAN_CFG 0x012c
+#define PHY_ESC_BYTE_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+
+#define DSI2_PHY_IPI_RATIO_MAN_CFG 0x0134
+#define PHY_IPI_RATIO(x) FIELD_PREP(GENMASK(21, 0), x)
+#define DSI2_PHY_SYS_RATIO_MAN_CFG 0x013C
+#define PHY_SYS_RATIO(x) FIELD_PREP(GENMASK(16, 0), x)
+
+#define DSI2_DSI_GENERAL_CFG 0x0200
+#define BTA_EN BIT(1)
+#define EOTP_TX_EN BIT(0)
+#define DSI2_DSI_VCID_CFG 0x0204
+#define TX_VCID(x) FIELD_PREP(GENMASK(1, 0), x)
+#define DSI2_DSI_SCRAMBLING_CFG 0x0208
+#define SCRAMBLING_SEED(x) FIELD_PREP(GENMASK(31, 16), x)
+#define SCRAMBLING_EN BIT(0)
+#define DSI2_DSI_VID_TX_CFG 0x020c
+#define LPDT_DISPLAY_CMD_EN BIT(20)
+#define BLK_VFP_HS_EN BIT(14)
+#define BLK_VBP_HS_EN BIT(13)
+#define BLK_VSA_HS_EN BIT(12)
+#define BLK_HFP_HS_EN BIT(6)
+#define BLK_HBP_HS_EN BIT(5)
+#define BLK_HSA_HS_EN BIT(4)
+#define VID_MODE_TYPE(x) FIELD_PREP(GENMASK(1, 0), x)
+#define DSI2_CRI_TX_HDR 0x02c0
+#define CMD_TX_MODE(x) FIELD_PREP(BIT(24), x)
+#define DSI2_CRI_TX_PLD 0x02c4
+#define DSI2_CRI_RX_HDR 0x02c8
+#define DSI2_CRI_RX_PLD 0x02cc
+
+#define DSI2_IPI_COLOR_MAN_CFG 0x0300
+#define IPI_DEPTH(x) FIELD_PREP(GENMASK(7, 4), x)
+#define IPI_DEPTH_5_6_5_BITS 0x02
+#define IPI_DEPTH_6_BITS 0x03
+#define IPI_DEPTH_8_BITS 0x05
+#define IPI_DEPTH_10_BITS 0x06
+#define IPI_FORMAT(x) FIELD_PREP(GENMASK(3, 0), x)
+#define IPI_FORMAT_RGB 0x0
+#define IPI_FORMAT_DSC 0x0b
+#define DSI2_IPI_VID_HSA_MAN_CFG 0x0304
+#define VID_HSA_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_HBP_MAN_CFG 0x030c
+#define VID_HBP_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_HACT_MAN_CFG 0x0314
+#define VID_HACT_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_HLINE_MAN_CFG 0x031c
+#define VID_HLINE_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_VSA_MAN_CFG 0x0324
+#define VID_VSA_LINES(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DSI2_IPI_VID_VBP_MAN_CFG 0X032C
+#define VID_VBP_LINES(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DSI2_IPI_VID_VACT_MAN_CFG 0X0334
+#define VID_VACT_LINES(x) FIELD_PREP(GENMASK(13, 0), x)
+#define DSI2_IPI_VID_VFP_MAN_CFG 0X033C
+#define VID_VFP_LINES(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DSI2_IPI_PIX_PKT_CFG 0x0344
+#define MAX_PIX_PKT(x) FIELD_PREP(GENMASK(15, 0), x)
+
+#define DSI2_INT_ST_PHY 0x0400
+#define DSI2_INT_MASK_PHY 0x0404
+#define DSI2_INT_ST_TO 0x0410
+#define DSI2_INT_MASK_TO 0x0414
+#define DSI2_INT_ST_ACK 0x0420
+#define DSI2_INT_MASK_ACK 0x0424
+#define DSI2_INT_ST_IPI 0x0430
+#define DSI2_INT_MASK_IPI 0x0434
+#define DSI2_INT_ST_FIFO 0x0440
+#define DSI2_INT_MASK_FIFO 0x0444
+#define DSI2_INT_ST_PRI 0x0450
+#define DSI2_INT_MASK_PRI 0x0454
+#define DSI2_INT_ST_CRI 0x0460
+#define DSI2_INT_MASK_CRI 0x0464
+#define DSI2_INT_FORCE_CRI 0x0468
+#define DSI2_MAX_REGISGER DSI2_INT_FORCE_CRI
+
+#define MODE_STATUS_TIMEOUT_US 10000
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+enum vid_mode_type {
+ VID_MODE_TYPE_NON_BURST_SYNC_PULSES,
+ VID_MODE_TYPE_NON_BURST_SYNC_EVENTS,
+ VID_MODE_TYPE_BURST,
+};
+
+enum mode_ctrl {
+ IDLE_MODE,
+ AUTOCALC_MODE,
+ COMMAND_MODE,
+ VIDEO_MODE,
+ DATA_STREAM_MODE,
+ VIDEO_TEST_MODE,
+ DATA_STREAM_TEST_MODE,
+};
+
+enum ppi_width {
+ PPI_WIDTH_8_BITS,
+ PPI_WIDTH_16_BITS,
+ PPI_WIDTH_32_BITS,
+};
+
+struct cmd_header {
+ u8 cmd_type;
+ u8 delay;
+ u8 payload_length;
+};
+
+struct dw_mipi_dsi2 {
+ struct drm_bridge bridge;
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge *panel_bridge;
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *pclk;
+ struct clk *sys_clk;
+
+ unsigned int lane_mbps; /* per lane */
+ u32 channel;
+ u32 lanes;
+ u32 format;
+ unsigned long mode_flags;
+
+ struct drm_display_mode mode;
+ const struct dw_mipi_dsi2_plat_data *plat_data;
+};
+
+static inline struct dw_mipi_dsi2 *host_to_dsi2(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct dw_mipi_dsi2, dsi_host);
+}
+
+static inline struct dw_mipi_dsi2 *bridge_to_dsi2(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dw_mipi_dsi2, bridge);
+}
+
+static int cri_fifos_wait_avail(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 sts, mask;
+ int ret;
+
+ mask = CRI_BUSY | CRT_FIFOS_NOT_EMPTY;
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_CORE_STATUS, sts,
+ !(sts & mask), 0, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(dsi2->dev, "command interface is busy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_set_vid_mode(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 val = 0, mode;
+ int ret;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ val |= BLK_HFP_HS_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ val |= BLK_HBP_HS_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ val |= BLK_HSA_HS_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ val |= VID_MODE_TYPE_BURST;
+ else if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_PULSES;
+ else
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
+
+ regmap_write(dsi2->regmap, DSI2_DSI_VID_TX_CFG, val);
+
+ regmap_write(dsi2->regmap, DSI2_MODE_CTRL, VIDEO_MODE);
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_MODE_STATUS,
+ mode, mode & VIDEO_MODE,
+ 1000, MODE_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(dsi2->dev, "failed to enter video mode\n");
+}
+
+static void dw_mipi_dsi2_set_data_stream_mode(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 mode;
+ int ret;
+
+ regmap_write(dsi2->regmap, DSI2_MODE_CTRL, DATA_STREAM_MODE);
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_MODE_STATUS,
+ mode, mode & DATA_STREAM_MODE,
+ 1000, MODE_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(dsi2->dev, "failed to enter data stream mode\n");
+}
+
+static void dw_mipi_dsi2_set_cmd_mode(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 mode;
+ int ret;
+
+ regmap_write(dsi2->regmap, DSI2_MODE_CTRL, COMMAND_MODE);
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_MODE_STATUS,
+ mode, mode & COMMAND_MODE,
+ 1000, MODE_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(dsi2->dev, "failed to enter data stream mode\n");
+}
+
+static void dw_mipi_dsi2_host_softrst(struct dw_mipi_dsi2 *dsi2)
+{
+ regmap_write(dsi2->regmap, DSI2_SOFT_RESET, 0x0);
+ usleep_range(50, 100);
+ regmap_write(dsi2->regmap, DSI2_SOFT_RESET,
+ SYS_RSTN | PHY_RSTN | IPI_RSTN);
+}
+
+static void dw_mipi_dsi2_phy_clk_mode_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 sys_clk, esc_clk_div;
+ u32 val = 0;
+
+ /*
+ * clk_type should be NON_CONTINUOUS_CLK before
+ * initial deskew calibration be sent.
+ */
+ val |= NON_CONTINUOUS_CLK;
+
+ /* The maximum value of the escape clock frequency is 20MHz */
+ sys_clk = clk_get_rate(dsi2->sys_clk) / USEC_PER_SEC;
+ esc_clk_div = DIV_ROUND_UP(sys_clk, 20 * 2);
+ val |= PHY_LPTX_CLK_DIV(esc_clk_div);
+
+ regmap_write(dsi2->regmap, DSI2_PHY_CLK_CFG, val);
+}
+
+static void dw_mipi_dsi2_phy_ratio_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ struct drm_display_mode *mode = &dsi2->mode;
+ u64 sys_clk = clk_get_rate(dsi2->sys_clk);
+ u64 pixel_clk, ipi_clk, phy_hsclk;
+ u64 tmp;
+
+ /*
+ * in DPHY mode, the phy_hstx_clk is exactly 1/16 the Lane high-speed
+ * data rate; In CPHY mode, the phy_hstx_clk is exactly 1/7 the trio
+ * high speed symbol rate.
+ */
+ phy_hsclk = DIV_ROUND_CLOSEST_ULL(dsi2->lane_mbps * USEC_PER_SEC, 16);
+
+ /* IPI_RATIO_MAN_CFG = PHY_HSTX_CLK / IPI_CLK */
+ pixel_clk = mode->crtc_clock * MSEC_PER_SEC;
+ ipi_clk = pixel_clk / 4;
+
+ tmp = DIV_ROUND_CLOSEST_ULL(phy_hsclk << 16, ipi_clk);
+ regmap_write(dsi2->regmap, DSI2_PHY_IPI_RATIO_MAN_CFG,
+ PHY_IPI_RATIO(tmp));
+
+ /*
+ * SYS_RATIO_MAN_CFG = MIPI_DCPHY_HSCLK_Freq / MIPI_DCPHY_HSCLK_Freq
+ */
+ tmp = DIV_ROUND_CLOSEST_ULL(phy_hsclk << 16, sys_clk);
+ regmap_write(dsi2->regmap, DSI2_PHY_SYS_RATIO_MAN_CFG,
+ PHY_SYS_RATIO(tmp));
+}
+
+static void dw_mipi_dsi2_lp2hs_or_hs2lp_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+ struct dw_mipi_dsi2_phy_timing timing;
+ int ret;
+
+ ret = phy_ops->get_timing(dsi2->plat_data->priv_data,
+ dsi2->lane_mbps, &timing);
+ if (ret)
+ dev_err(dsi2->dev, "Retrieving phy timings failed\n");
+
+ regmap_write(dsi2->regmap, DSI2_PHY_LP2HS_MAN_CFG, PHY_LP2HS_TIME(timing.data_lp2hs));
+ regmap_write(dsi2->regmap, DSI2_PHY_HS2LP_MAN_CFG, PHY_HS2LP_TIME(timing.data_hs2lp));
+}
+
+static void dw_mipi_dsi2_phy_init(struct dw_mipi_dsi2 *dsi2)
+{
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+ struct dw_mipi_dsi2_phy_iface iface;
+ u32 val = 0;
+
+ phy_ops->get_interface(dsi2->plat_data->priv_data, &iface);
+
+ switch (iface.ppi_width) {
+ case 8:
+ val |= PPI_WIDTH(PPI_WIDTH_8_BITS);
+ break;
+ case 16:
+ val |= PPI_WIDTH(PPI_WIDTH_16_BITS);
+ break;
+ case 32:
+ val |= PPI_WIDTH(PPI_WIDTH_32_BITS);
+ break;
+ default:
+ /* Caught in probe */
+ break;
+ }
+
+ val |= PHY_LANES(dsi2->lanes);
+ val |= PHY_TYPE(DW_MIPI_DSI2_DPHY);
+ regmap_write(dsi2->regmap, DSI2_PHY_MODE_CFG, val);
+
+ dw_mipi_dsi2_phy_clk_mode_cfg(dsi2);
+ dw_mipi_dsi2_phy_ratio_cfg(dsi2);
+ dw_mipi_dsi2_lp2hs_or_hs2lp_cfg(dsi2);
+
+ /* phy configuration 8 - 10 */
+}
+
+static void dw_mipi_dsi2_tx_option_set(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 val;
+
+ val = BTA_EN | EOTP_TX_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ val &= ~EOTP_TX_EN;
+
+ regmap_write(dsi2->regmap, DSI2_DSI_GENERAL_CFG, val);
+ regmap_write(dsi2->regmap, DSI2_DSI_VCID_CFG, TX_VCID(dsi2->channel));
+}
+
+static void dw_mipi_dsi2_ipi_color_coding_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 val, color_depth;
+
+ switch (dsi2->format) {
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ color_depth = IPI_DEPTH_6_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ color_depth = IPI_DEPTH_5_6_5_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ color_depth = IPI_DEPTH_8_BITS;
+ break;
+ }
+
+ val = IPI_DEPTH(color_depth) |
+ IPI_FORMAT(IPI_FORMAT_RGB);
+ regmap_write(dsi2->regmap, DSI2_IPI_COLOR_MAN_CFG, val);
+}
+
+static void dw_mipi_dsi2_vertical_timing_config(struct dw_mipi_dsi2 *dsi2,
+ const struct drm_display_mode *mode)
+{
+ u32 vactive, vsa, vfp, vbp;
+
+ vactive = mode->vdisplay;
+ vsa = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VSA_MAN_CFG, VID_VSA_LINES(vsa));
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VBP_MAN_CFG, VID_VBP_LINES(vbp));
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VACT_MAN_CFG, VID_VACT_LINES(vactive));
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VFP_MAN_CFG, VID_VFP_LINES(vfp));
+}
+
+static void dw_mipi_dsi2_ipi_set(struct dw_mipi_dsi2 *dsi2)
+{
+ struct drm_display_mode *mode = &dsi2->mode;
+ u32 hline, hsa, hbp, hact;
+ u64 hline_time, hsa_time, hbp_time, hact_time, tmp;
+ u64 pixel_clk, phy_hs_clk;
+ u16 val;
+
+ val = mode->hdisplay;
+
+ regmap_write(dsi2->regmap, DSI2_IPI_PIX_PKT_CFG, MAX_PIX_PKT(val));
+
+ dw_mipi_dsi2_ipi_color_coding_cfg(dsi2);
+
+ /*
+ * if the controller is intended to operate in data stream mode,
+ * no more steps are required.
+ */
+ if (!(dsi2->mode_flags & MIPI_DSI_MODE_VIDEO))
+ return;
+
+ hact = mode->hdisplay;
+ hsa = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+ hline = mode->htotal;
+
+ pixel_clk = mode->crtc_clock * MSEC_PER_SEC;
+
+ phy_hs_clk = DIV_ROUND_CLOSEST_ULL(dsi2->lane_mbps * USEC_PER_SEC, 16);
+
+ tmp = hsa * phy_hs_clk;
+ hsa_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HSA_MAN_CFG, VID_HSA_TIME(hsa_time));
+
+ tmp = hbp * phy_hs_clk;
+ hbp_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HBP_MAN_CFG, VID_HBP_TIME(hbp_time));
+
+ tmp = hact * phy_hs_clk;
+ hact_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HACT_MAN_CFG, VID_HACT_TIME(hact_time));
+
+ tmp = hline * phy_hs_clk;
+ hline_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HLINE_MAN_CFG, VID_HLINE_TIME(hline_time));
+
+ dw_mipi_dsi2_vertical_timing_config(dsi2, mode);
+}
+
+static void
+dw_mipi_dsi2_work_mode(struct dw_mipi_dsi2 *dsi2, u32 mode)
+{
+ /*
+ * select controller work in Manual mode
+ * Manual: MANUAL_MODE_EN
+ * Automatic: 0
+ */
+ regmap_write(dsi2->regmap, MANUAL_MODE_CFG, mode);
+}
+
+static int dw_mipi_dsi2_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2 *dsi2 = host_to_dsi2(host);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ struct drm_bridge *bridge;
+ int ret;
+
+ if (device->lanes > dsi2->plat_data->max_data_lanes) {
+ dev_err(dsi2->dev, "the number of data lanes(%u) is too many\n",
+ device->lanes);
+ return -EINVAL;
+ }
+
+ dsi2->lanes = device->lanes;
+ dsi2->channel = device->channel;
+ dsi2->format = device->format;
+ dsi2->mode_flags = device->mode_flags;
+
+ bridge = devm_drm_of_get_bridge(dsi2->dev, dsi2->dev->of_node, 1, 0);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ bridge->pre_enable_prev_first = true;
+ dsi2->panel_bridge = bridge;
+
+ drm_bridge_add(&dsi2->bridge);
+
+ if (pdata->host_ops && pdata->host_ops->attach) {
+ ret = pdata->host_ops->attach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2 *dsi2 = host_to_dsi2(host);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ int ret;
+
+ if (pdata->host_ops && pdata->host_ops->detach) {
+ ret = pdata->host_ops->detach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
+
+ drm_bridge_remove(&dsi2->bridge);
+
+ drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_gen_pkt_hdr_write(struct dw_mipi_dsi2 *dsi2,
+ u32 hdr_val, bool lpm)
+{
+ int ret;
+
+ regmap_write(dsi2->regmap, DSI2_CRI_TX_HDR, hdr_val | CMD_TX_MODE(lpm));
+
+ ret = cri_fifos_wait_avail(dsi2);
+ if (ret) {
+ dev_err(dsi2->dev, "failed to write command header\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_write(struct dw_mipi_dsi2 *dsi2,
+ const struct mipi_dsi_packet *packet, bool lpm)
+{
+ const u8 *tx_buf = packet->payload;
+ int len = packet->payload_length, pld_data_bytes = sizeof(u32);
+ __le32 word;
+
+ /* Send payload */
+ while (len) {
+ if (len < pld_data_bytes) {
+ word = 0;
+ memcpy(&word, tx_buf, len);
+ regmap_write(dsi2->regmap, DSI2_CRI_TX_PLD, le32_to_cpu(word));
+ len = 0;
+ } else {
+ memcpy(&word, tx_buf, pld_data_bytes);
+ regmap_write(dsi2->regmap, DSI2_CRI_TX_PLD, le32_to_cpu(word));
+ tx_buf += pld_data_bytes;
+ len -= pld_data_bytes;
+ }
+ }
+
+ word = 0;
+ memcpy(&word, packet->header, sizeof(packet->header));
+ return dw_mipi_dsi2_gen_pkt_hdr_write(dsi2, le32_to_cpu(word), lpm);
+}
+
+static int dw_mipi_dsi2_read(struct dw_mipi_dsi2 *dsi2,
+ const struct mipi_dsi_msg *msg)
+{
+ u8 *payload = msg->rx_buf;
+ int i, j, ret, len = msg->rx_len;
+ u8 data_type;
+ u16 wc;
+ u32 val;
+
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_CORE_STATUS,
+ val, val & CRI_RD_DATA_AVAIL,
+ 100, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(dsi2->dev, "CRI has no available read data\n");
+ return ret;
+ }
+
+ regmap_read(dsi2->regmap, DSI2_CRI_RX_HDR, &val);
+ data_type = val & 0x3f;
+
+ if (mipi_dsi_packet_format_is_short(data_type)) {
+ for (i = 0; i < len && i < 2; i++)
+ payload[i] = (val >> (8 * (i + 1))) & 0xff;
+
+ return 0;
+ }
+
+ wc = (val >> 8) & 0xffff;
+ /* Receive payload */
+ for (i = 0; i < len && i < wc; i += 4) {
+ regmap_read(dsi2->regmap, DSI2_CRI_RX_PLD, &val);
+ for (j = 0; j < 4 && j + i < len && j + i < wc; j++)
+ payload[i + j] = val >> (8 * j);
+ }
+
+ return 0;
+}
+
+static ssize_t dw_mipi_dsi2_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct dw_mipi_dsi2 *dsi2 = host_to_dsi2(host);
+ bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
+ struct mipi_dsi_packet packet;
+ int ret, nb_bytes;
+
+ regmap_update_bits(dsi2->regmap, DSI2_DSI_VID_TX_CFG,
+ LPDT_DISPLAY_CMD_EN,
+ lpm ? LPDT_DISPLAY_CMD_EN : 0);
+
+ /* create a packet to the DSI protocol */
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ dev_err(dsi2->dev, "failed to create packet: %d\n", ret);
+ return ret;
+ }
+
+ ret = cri_fifos_wait_avail(dsi2);
+ if (ret)
+ return ret;
+
+ ret = dw_mipi_dsi2_write(dsi2, &packet, lpm);
+ if (ret)
+ return ret;
+
+ if (msg->rx_buf && msg->rx_len) {
+ ret = dw_mipi_dsi2_read(dsi2, msg);
+ if (ret < 0)
+ return ret;
+ nb_bytes = msg->rx_len;
+ } else {
+ nb_bytes = packet.size;
+ }
+
+ return nb_bytes;
+}
+
+static const struct mipi_dsi_host_ops dw_mipi_dsi2_host_ops = {
+ .attach = dw_mipi_dsi2_host_attach,
+ .detach = dw_mipi_dsi2_host_detach,
+ .transfer = dw_mipi_dsi2_host_transfer,
+};
+
+static u32 *
+dw_mipi_dsi2_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ u32 *input_fmts;
+
+ if (pdata->get_input_bus_fmts)
+ return pdata->get_input_bus_fmts(pdata->priv_data,
+ bridge, bridge_state,
+ crtc_state, conn_state,
+ output_fmt, num_input_fmts);
+
+ /* Fall back to MEDIA_BUS_FMT_FIXED as the only input format. */
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+ input_fmts[0] = MEDIA_BUS_FMT_FIXED;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int dw_mipi_dsi2_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ bool ret;
+
+ bridge_state->input_bus_cfg.flags =
+ DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ if (pdata->mode_fixup) {
+ ret = pdata->mode_fixup(pdata->priv_data, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_DRIVER("failed to fixup mode " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&crtc_state->mode));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_bridge_post_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+
+ regmap_write(dsi2->regmap, DSI2_IPI_PIX_PKT_CFG, 0);
+
+ /*
+ * Switch to command mode before panel-bridge post_disable &
+ * panel unprepare.
+ * Note: panel-bridge disable & panel disable has been called
+ * before by the drm framework.
+ */
+ dw_mipi_dsi2_set_cmd_mode(dsi2);
+
+ regmap_write(dsi2->regmap, DSI2_PWR_UP, RESET);
+
+ if (phy_ops->power_off)
+ phy_ops->power_off(dsi2->plat_data->priv_data);
+
+ clk_disable_unprepare(dsi2->sys_clk);
+ clk_disable_unprepare(dsi2->pclk);
+ pm_runtime_put(dsi2->dev);
+}
+
+static unsigned int dw_mipi_dsi2_get_lanes(struct dw_mipi_dsi2 *dsi2)
+{
+ /* single-dsi, so no other instance to consider */
+ return dsi2->lanes;
+}
+
+static void dw_mipi_dsi2_mode_set(struct dw_mipi_dsi2 *dsi2,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+ void *priv_data = dsi2->plat_data->priv_data;
+ u32 lanes = dw_mipi_dsi2_get_lanes(dsi2);
+ int ret;
+
+ clk_prepare_enable(dsi2->pclk);
+ clk_prepare_enable(dsi2->sys_clk);
+
+ ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi2->mode_flags,
+ lanes, dsi2->format, &dsi2->lane_mbps);
+ if (ret)
+ DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
+
+ pm_runtime_get_sync(dsi2->dev);
+
+ dw_mipi_dsi2_host_softrst(dsi2);
+ regmap_write(dsi2->regmap, DSI2_PWR_UP, RESET);
+
+ dw_mipi_dsi2_work_mode(dsi2, MANUAL_MODE_EN);
+ dw_mipi_dsi2_phy_init(dsi2);
+
+ if (phy_ops->power_on)
+ phy_ops->power_on(dsi2->plat_data->priv_data);
+
+ dw_mipi_dsi2_tx_option_set(dsi2);
+
+ /*
+ * initial deskew calibration is send after phy_power_on,
+ * then we can configure clk_type.
+ */
+
+ regmap_update_bits(dsi2->regmap, DSI2_PHY_CLK_CFG, CLK_TYPE_MASK,
+ dsi2->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS ? NON_CONTINUOUS_CLK :
+ CONTINUOUS_CLK);
+
+ regmap_write(dsi2->regmap, DSI2_PWR_UP, POWER_UP);
+ dw_mipi_dsi2_set_cmd_mode(dsi2);
+
+ dw_mipi_dsi2_ipi_set(dsi2);
+}
+
+static void dw_mipi_dsi2_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Power up the dsi ctl into a command mode */
+ dw_mipi_dsi2_mode_set(dsi2, &dsi2->mode);
+}
+
+static void dw_mipi_dsi2_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Store the display mode for later use in pre_enable callback */
+ drm_mode_copy(&dsi2->mode, adjusted_mode);
+}
+
+static void dw_mipi_dsi2_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Switch to video mode for panel-bridge enable & panel enable */
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO)
+ dw_mipi_dsi2_set_vid_mode(dsi2);
+ else
+ dw_mipi_dsi2_set_data_stream_mode(dsi2);
+}
+
+static enum drm_mode_status
+dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ enum drm_mode_status mode_status = MODE_OK;
+
+ if (pdata->mode_valid)
+ mode_status = pdata->mode_valid(pdata->priv_data, mode,
+ dsi2->mode_flags,
+ dw_mipi_dsi2_get_lanes(dsi2),
+ dsi2->format);
+
+ return mode_status;
+}
+
+static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Set the encoder type as caller does not know it */
+ bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
+ flags);
+}
+
+static const struct drm_bridge_funcs dw_mipi_dsi2_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = dw_mipi_dsi2_bridge_atomic_get_input_bus_fmts,
+ .atomic_check = dw_mipi_dsi2_bridge_atomic_check,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = dw_mipi_dsi2_bridge_atomic_pre_enable,
+ .atomic_enable = dw_mipi_dsi2_bridge_atomic_enable,
+ .atomic_post_disable = dw_mipi_dsi2_bridge_post_atomic_disable,
+ .mode_set = dw_mipi_dsi2_bridge_mode_set,
+ .mode_valid = dw_mipi_dsi2_bridge_mode_valid,
+ .attach = dw_mipi_dsi2_bridge_attach,
+};
+
+static const struct regmap_config dw_mipi_dsi2_regmap_config = {
+ .name = "dsi2-host",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static struct dw_mipi_dsi2 *
+__dw_mipi_dsi2_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi2_plat_data *plat_data)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *apb_rst;
+ struct dw_mipi_dsi2 *dsi2;
+ int ret;
+
+ dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
+ if (!dsi2)
+ return ERR_PTR(-ENOMEM);
+
+ dsi2->dev = dev;
+ dsi2->plat_data = plat_data;
+
+ if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps ||
+ !plat_data->phy_ops->get_timing)
+ return dev_err_ptr_probe(dev, -ENODEV, "Phy not properly configured\n");
+
+ if (!plat_data->regmap) {
+ void __iomem *base = devm_platform_ioremap_resource(pdev, 0);
+
+ if (IS_ERR(base))
+ return dev_err_cast_probe(dev, base, "failed to registers\n");
+
+ dsi2->regmap = devm_regmap_init_mmio(dev, base,
+ &dw_mipi_dsi2_regmap_config);
+ if (IS_ERR(dsi2->regmap))
+ return dev_err_cast_probe(dev, dsi2->regmap, "failed to init regmap\n");
+ } else {
+ dsi2->regmap = plat_data->regmap;
+ }
+
+ dsi2->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(dsi2->pclk))
+ return dev_err_cast_probe(dev, dsi2->pclk, "Unable to get pclk\n");
+
+ dsi2->sys_clk = devm_clk_get(dev, "sys");
+ if (IS_ERR(dsi2->sys_clk))
+ return dev_err_cast_probe(dev, dsi2->sys_clk, "Unable to get sys_clk\n");
+
+ /*
+ * Note that the reset was not defined in the initial device tree, so
+ * we have to be prepared for it not being found.
+ */
+ apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb");
+ if (IS_ERR(apb_rst))
+ return dev_err_cast_probe(dev, apb_rst, "Unable to get reset control\n");
+
+ if (apb_rst) {
+ ret = clk_prepare_enable(dsi2->pclk);
+ if (ret) {
+ dev_err(dev, "%s: Failed to enable pclk\n", __func__);
+ return ERR_PTR(ret);
+ }
+
+ reset_control_assert(apb_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(apb_rst);
+
+ clk_disable_unprepare(dsi2->pclk);
+ }
+
+ devm_pm_runtime_enable(dev);
+
+ dsi2->dsi_host.ops = &dw_mipi_dsi2_host_ops;
+ dsi2->dsi_host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi2->dsi_host);
+ if (ret) {
+ dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ pm_runtime_disable(dev);
+ return ERR_PTR(ret);
+ }
+
+ dsi2->bridge.driver_private = dsi2;
+ dsi2->bridge.funcs = &dw_mipi_dsi2_bridge_funcs;
+ dsi2->bridge.of_node = pdev->dev.of_node;
+
+ return dsi2;
+}
+
+static void __dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2)
+{
+ mipi_dsi_host_unregister(&dsi2->dsi_host);
+}
+
+/*
+ * Probe/remove API, used to create the bridge instance.
+ */
+struct dw_mipi_dsi2 *
+dw_mipi_dsi2_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi2_plat_data *plat_data)
+{
+ return __dw_mipi_dsi2_probe(pdev, plat_data);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_probe);
+
+void dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2)
+{
+ __dw_mipi_dsi2_remove(dsi2);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_remove);
+
+/*
+ * Bind/unbind API, used from platforms based on the component framework
+ * to attach the bridge to an encoder.
+ */
+int dw_mipi_dsi2_bind(struct dw_mipi_dsi2 *dsi2, struct drm_encoder *encoder)
+{
+ return drm_bridge_attach(encoder, &dsi2->bridge, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_bind);
+
+void dw_mipi_dsi2_unbind(struct dw_mipi_dsi2 *dsi2)
+{
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_unbind);
+
+MODULE_AUTHOR("Guochun Huang <hero.huang@rock-chips.com>");
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@cherry.de>");
+MODULE_DESCRIPTION("DW MIPI DSI2 host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dw-mipi-dsi2");
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 7275e66faefc..4637bf6ea7a3 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2587,7 +2587,7 @@ static void tc_remove(struct i2c_client *client)
}
static const struct i2c_device_id tc358767_i2c_ids[] = {
- { "tc358767", 0 },
+ { "tc358767" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 2cb748bbefcd..ec79b0dd0e2c 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1244,8 +1244,8 @@ static const struct regmap_config tc358768_regmap_config = {
};
static const struct i2c_device_id tc358768_i2c_ids[] = {
- { "tc358768", 0 },
- { "tc358778", 0 },
+ { "tc358768" },
+ { "tc358778" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index a0a1b5dd794e..eaec70fa42b6 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -389,7 +389,7 @@ static void dlpc3433_remove(struct i2c_client *client)
}
static const struct i2c_device_id dlpc3433_id[] = {
- { "ti,dlpc3433", 0 },
+ { "ti,dlpc3433" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, dlpc3433_id);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 57a7ed13f996..336380114eea 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -132,6 +132,16 @@
#define REG_IRQ_STAT_CHA_SOT_BIT_ERR BIT(2)
#define REG_IRQ_STAT_CHA_PLL_UNLOCK BIT(0)
+enum sn65dsi83_channel {
+ CHANNEL_A,
+ CHANNEL_B
+};
+
+enum sn65dsi83_lvds_term {
+ OHM_100,
+ OHM_200
+};
+
enum sn65dsi83_model {
MODEL_SN65DSI83,
MODEL_SN65DSI84,
@@ -147,6 +157,8 @@ struct sn65dsi83 {
struct regulator *vcc;
bool lvds_dual_link;
bool lvds_dual_link_even_odd_swap;
+ int lvds_vod_swing_conf[2];
+ int lvds_term_conf[2];
};
static const struct regmap_range sn65dsi83_readable_ranges[] = {
@@ -237,6 +249,36 @@ static const struct regmap_config sn65dsi83_regmap_config = {
.max_register = REG_IRQ_STAT,
};
+static const int lvds_vod_swing_data_table[2][4][2] = {
+ { /* 100 Ohm */
+ { 180000, 313000 },
+ { 215000, 372000 },
+ { 250000, 430000 },
+ { 290000, 488000 },
+ },
+ { /* 200 Ohm */
+ { 150000, 261000 },
+ { 200000, 346000 },
+ { 250000, 428000 },
+ { 300000, 511000 },
+ },
+};
+
+static const int lvds_vod_swing_clock_table[2][4][2] = {
+ { /* 100 Ohm */
+ { 140000, 244000 },
+ { 168000, 290000 },
+ { 195000, 335000 },
+ { 226000, 381000 },
+ },
+ { /* 200 Ohm */
+ { 117000, 204000 },
+ { 156000, 270000 },
+ { 195000, 334000 },
+ { 234000, 399000 },
+ },
+};
+
static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
{
return container_of(bridge, struct sn65dsi83, bridge);
@@ -435,12 +477,16 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
val |= REG_LVDS_FMT_LVDS_LINK_CFG;
regmap_write(ctx->regmap, REG_LVDS_FMT, val);
- regmap_write(ctx->regmap, REG_LVDS_VCOM, 0x05);
+ regmap_write(ctx->regmap, REG_LVDS_VCOM,
+ REG_LVDS_VCOM_CHA_LVDS_VOD_SWING(ctx->lvds_vod_swing_conf[CHANNEL_A]) |
+ REG_LVDS_VCOM_CHB_LVDS_VOD_SWING(ctx->lvds_vod_swing_conf[CHANNEL_B]));
regmap_write(ctx->regmap, REG_LVDS_LANE,
(ctx->lvds_dual_link_even_odd_swap ?
REG_LVDS_LANE_EVEN_ODD_SWAP : 0) |
- REG_LVDS_LANE_CHA_LVDS_TERM |
- REG_LVDS_LANE_CHB_LVDS_TERM);
+ (ctx->lvds_term_conf[CHANNEL_A] ?
+ REG_LVDS_LANE_CHA_LVDS_TERM : 0) |
+ (ctx->lvds_term_conf[CHANNEL_B] ?
+ REG_LVDS_LANE_CHB_LVDS_TERM : 0));
regmap_write(ctx->regmap, REG_LVDS_CM, 0x00);
le16val = cpu_to_le16(mode->hdisplay);
@@ -576,10 +622,103 @@ static const struct drm_bridge_funcs sn65dsi83_funcs = {
.atomic_get_input_bus_fmts = sn65dsi83_atomic_get_input_bus_fmts,
};
+static int sn65dsi83_select_lvds_vod_swing(struct device *dev,
+ u32 lvds_vod_swing_data[2], u32 lvds_vod_swing_clk[2], u8 lvds_term)
+{
+ int i;
+
+ for (i = 0; i <= 3; i++) {
+ if (lvds_vod_swing_data_table[lvds_term][i][0] >= lvds_vod_swing_data[0] &&
+ lvds_vod_swing_data_table[lvds_term][i][1] <= lvds_vod_swing_data[1] &&
+ lvds_vod_swing_clock_table[lvds_term][i][0] >= lvds_vod_swing_clk[0] &&
+ lvds_vod_swing_clock_table[lvds_term][i][1] <= lvds_vod_swing_clk[1])
+ return i;
+ }
+
+ dev_err(dev, "failed to find appropriate LVDS_VOD_SWING configuration\n");
+ return -EINVAL;
+}
+
+static int sn65dsi83_parse_lvds_endpoint(struct sn65dsi83 *ctx, int channel)
+{
+ struct device *dev = ctx->dev;
+ struct device_node *endpoint;
+ int endpoint_reg;
+ /* Set so the property can be freely selected if not defined */
+ u32 lvds_vod_swing_data[2] = { 0, 1000000 };
+ u32 lvds_vod_swing_clk[2] = { 0, 1000000 };
+ /* Set default near end terminataion to 200 Ohm */
+ u32 lvds_term = 200;
+ int lvds_vod_swing_conf;
+ int ret = 0;
+ int ret_data;
+ int ret_clock;
+
+ if (channel == CHANNEL_A)
+ endpoint_reg = 2;
+ else
+ endpoint_reg = 3;
+
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, endpoint_reg, -1);
+
+ of_property_read_u32(endpoint, "ti,lvds-termination-ohms", &lvds_term);
+ if (lvds_term == 100)
+ ctx->lvds_term_conf[channel] = OHM_100;
+ else if (lvds_term == 200)
+ ctx->lvds_term_conf[channel] = OHM_200;
+ else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret_data = of_property_read_u32_array(endpoint, "ti,lvds-vod-swing-data-microvolt",
+ lvds_vod_swing_data, ARRAY_SIZE(lvds_vod_swing_data));
+ if (ret_data != 0 && ret_data != -EINVAL) {
+ ret = ret_data;
+ goto exit;
+ }
+
+ ret_clock = of_property_read_u32_array(endpoint, "ti,lvds-vod-swing-clock-microvolt",
+ lvds_vod_swing_clk, ARRAY_SIZE(lvds_vod_swing_clk));
+ if (ret_clock != 0 && ret_clock != -EINVAL) {
+ ret = ret_clock;
+ goto exit;
+ }
+
+ /* Use default value if both properties are NOT defined. */
+ if (ret_data == -EINVAL && ret_clock == -EINVAL)
+ lvds_vod_swing_conf = 0x1;
+
+ /* Use lookup table if any of the two properties is defined. */
+ if (!ret_data || !ret_clock) {
+ lvds_vod_swing_conf = sn65dsi83_select_lvds_vod_swing(dev, lvds_vod_swing_data,
+ lvds_vod_swing_clk, ctx->lvds_term_conf[channel]);
+ if (lvds_vod_swing_conf < 0) {
+ ret = lvds_vod_swing_conf;
+ goto exit;
+ }
+ }
+
+ ctx->lvds_vod_swing_conf[channel] = lvds_vod_swing_conf;
+ ret = 0;
+exit:
+ of_node_put(endpoint);
+ return ret;
+}
+
static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
{
struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
+ int ret;
+
+ ret = sn65dsi83_parse_lvds_endpoint(ctx, CHANNEL_A);
+ if (ret < 0)
+ return ret;
+
+ ret = sn65dsi83_parse_lvds_endpoint(ctx, CHANNEL_B);
+ if (ret < 0)
+ return ret;
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
@@ -606,7 +745,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0);
if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ return dev_err_probe(dev, PTR_ERR(panel_bridge), "Failed to get panel bridge\n");
ctx->panel_bridge = panel_bridge;
@@ -732,7 +871,7 @@ static void sn65dsi83_remove(struct i2c_client *client)
drm_bridge_remove(&ctx->bridge);
}
-static struct i2c_device_id sn65dsi83_id[] = {
+static const struct i2c_device_id sn65dsi83_id[] = {
{ "ti,sn65dsi83", MODEL_SN65DSI83 },
{ "ti,sn65dsi84", MODEL_SN65DSI84 },
{},
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 9e31f750fd88..e4d9006b59f1 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1970,9 +1970,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return ti_sn65dsi86_add_aux_device(pdata, &pdata->aux_aux, "aux");
}
-static struct i2c_device_id ti_sn65dsi86_id[] = {
- { "ti,sn65dsi86", 0},
- {},
+static const struct i2c_device_id ti_sn65dsi86_id[] = {
+ { "ti,sn65dsi86" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ti_sn65dsi86_id);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 107a2c0b96c9..79ab5da827e1 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -435,7 +435,7 @@ static void tfp410_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id tfp410_i2c_ids[] = {
- { "tfp410", 0 },
+ { "tfp410" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids);
diff --git a/drivers/gpu/drm/clients/Kconfig b/drivers/gpu/drm/clients/Kconfig
new file mode 100644
index 000000000000..6096c623d9d5
--- /dev/null
+++ b/drivers/gpu/drm/clients/Kconfig
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_CLIENT_LIB
+ tristate
+ depends on DRM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ help
+ This option enables the DRM client library and selects all
+ modules and components according to the enabled clients.
+
+config DRM_CLIENT_SELECTION
+ tristate
+ depends on DRM
+ select DRM_CLIENT_LIB if DRM_CLIENT_LOG
+ select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION
+ help
+ Drivers that support in-kernel DRM clients have to select this
+ option.
+
+config DRM_CLIENT_SETUP
+ bool
+ depends on DRM_CLIENT_SELECTION
+ help
+ Enables the DRM client selection. DRM drivers that support the
+ default clients should select DRM_CLIENT_SELECTION instead.
+
+menu "Supported DRM clients"
+ depends on DRM_CLIENT_SELECTION
+
+config DRM_FBDEV_EMULATION
+ bool "Enable legacy fbdev support for your modesetting driver"
+ depends on DRM_CLIENT_SELECTION
+ select DRM_CLIENT
+ select DRM_CLIENT_SETUP
+ select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+ default FB
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provides the linux console
+ support on top of your modesetting driver.
+
+ If in doubt, say "Y".
+
+config DRM_FBDEV_OVERALLOC
+ int "Overallocation of the fbdev buffer"
+ depends on DRM_FBDEV_EMULATION
+ default 100
+ help
+ Defines the fbdev buffer overallocation in percent. Default
+ is 100. Typical values for double buffering will be 200,
+ triple buffering 300.
+
+config DRM_FBDEV_LEAK_PHYS_SMEM
+ bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
+ depends on DRM_FBDEV_EMULATION && EXPERT
+ default n
+ help
+ In order to keep user-space compatibility, we want in certain
+ use-cases to keep leaking the fbdev physical address to the
+ user-space program handling the fbdev buffer.
+ This affects, not only, Amlogic, Allwinner or Rockchip devices
+ with ARM Mali GPUs using a userspace Blob.
+ This option is not supported by upstream developers and should be
+ removed as soon as possible and be considered as a broken and
+ legacy behaviour from a modern fbdev device driver.
+
+ Please send any bug reports when using this to your proprietary
+ software vendor that requires this.
+
+ If in doubt, say "N" or spread the word to your closed source
+ library vendor.
+
+config DRM_CLIENT_LOG
+ bool "Print the kernel boot message on the screen"
+ depends on DRM_CLIENT_SELECTION
+ select DRM_CLIENT
+ select DRM_CLIENT_SETUP
+ select DRM_DRAW
+ select FONT_SUPPORT
+ help
+ This enable a drm logger, that will print the kernel messages to the
+ screen until the userspace is ready to take over.
+
+ If you only need logs, but no terminal, or if you prefer userspace
+ terminal, say "Y".
+
+choice
+ prompt "Default DRM Client"
+ depends on DRM_CLIENT_SELECTION
+ depends on DRM_FBDEV_EMULATION || DRM_CLIENT_LOG
+ default DRM_CLIENT_DEFAULT_FBDEV
+ help
+ Selects the default drm client.
+
+ The selection made here can be overridden by using the kernel
+ command line 'drm_client_lib.active=fbdev' option.
+
+config DRM_CLIENT_DEFAULT_FBDEV
+ bool "fbdev"
+ depends on DRM_FBDEV_EMULATION
+ help
+ Use fbdev emulation as default drm client. This is needed to have
+ fbcon on top of a drm driver.
+
+config DRM_CLIENT_DEFAULT_LOG
+ bool "log"
+ depends on DRM_CLIENT_LOG
+ help
+ Use drm log as default drm client. This will display boot logs on the
+ screen, but doesn't implement a full terminal. For that you will need
+ a userspace terminal using drm/kms.
+
+endchoice
+
+config DRM_CLIENT_DEFAULT
+ string
+ depends on DRM_CLIENT
+ default "fbdev" if DRM_CLIENT_DEFAULT_FBDEV
+ default "log" if DRM_CLIENT_DEFAULT_LOG
+ default ""
+
+endmenu
diff --git a/drivers/gpu/drm/clients/Makefile b/drivers/gpu/drm/clients/Makefile
new file mode 100644
index 000000000000..c16addbc327f
--- /dev/null
+++ b/drivers/gpu/drm/clients/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+subdir-ccflags-y += -I$(src)/..
+
+drm_client_lib-y := drm_client_setup.o
+drm_client_lib-$(CONFIG_DRM_CLIENT_LOG) += drm_log.o
+drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o
+obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o
diff --git a/drivers/gpu/drm/clients/drm_client_internal.h b/drivers/gpu/drm/clients/drm_client_internal.h
new file mode 100644
index 000000000000..6dc078bf6503
--- /dev/null
+++ b/drivers/gpu/drm/clients/drm_client_internal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_CLIENT_INTERNAL_H
+#define DRM_CLIENT_INTERNAL_H
+
+struct drm_device;
+struct drm_format_info;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format);
+#else
+static inline int drm_fbdev_client_setup(struct drm_device *dev,
+ const struct drm_format_info *format)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DRM_CLIENT_LOG
+void drm_log_register(struct drm_device *dev);
+#else
+static inline void drm_log_register(struct drm_device *dev) {}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index c14221ca5a0d..e17265039ca8 100644
--- a/drivers/gpu/drm/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -1,11 +1,18 @@
// SPDX-License-Identifier: MIT
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_device.h>
-#include <drm/drm_fbdev_client.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
+#include "drm_client_internal.h"
+
+static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT;
+module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444);
+MODULE_PARM_DESC(active,
+ "Choose which drm client to start, default is"
+ CONFIG_DRM_CLIENT_DEFAULT "]");
+
/**
* drm_client_setup() - Setup in-kernel DRM clients
* @dev: DRM device
@@ -24,11 +31,26 @@
*/
void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
{
- int ret;
- ret = drm_fbdev_client_setup(dev, format);
- if (ret)
- drm_warn(dev, "Failed to set up DRM client; error %d\n", ret);
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (!strcmp(drm_client_default, "fbdev")) {
+ int ret;
+
+ ret = drm_fbdev_client_setup(dev, format);
+ if (ret)
+ drm_warn(dev, "Failed to set up DRM client; error %d\n", ret);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_DRM_CLIENT_LOG
+ if (!strcmp(drm_client_default, "log")) {
+ drm_log_register(dev);
+ return;
+ }
+#endif
+ if (strcmp(drm_client_default, ""))
+ drm_warn(dev, "Unknown DRM client %s\n", drm_client_default);
}
EXPORT_SYMBOL(drm_client_setup);
diff --git a/drivers/gpu/drm/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c
index 246fb63ab250..f894ba52bdb5 100644
--- a/drivers/gpu/drm/drm_fbdev_client.c
+++ b/drivers/gpu/drm/clients/drm_fbdev_client.c
@@ -3,11 +3,12 @@
#include <drm/drm_client.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_client.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
+#include "drm_client_internal.h"
+
/*
* struct drm_client_funcs
*/
@@ -164,4 +165,3 @@ err_drm_client_init:
kfree(fb_helper);
return ret;
}
-EXPORT_SYMBOL(drm_fbdev_client_setup);
diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c
new file mode 100644
index 000000000000..379850c83e51
--- /dev/null
+++ b/drivers/gpu/drm/clients/drm_log.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/*
+ * Copyright (c) 2024 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ */
+
+#include <linux/console.h>
+#include <linux/font.h>
+#include <linux/init.h>
+#include <linux/iosys-map.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <drm/drm_client.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
+
+#include "drm_client_internal.h"
+#include "drm_draw_internal.h"
+#include "drm_internal.h"
+
+MODULE_AUTHOR("Jocelyn Falempe");
+MODULE_DESCRIPTION("DRM boot logger");
+MODULE_LICENSE("GPL");
+
+static unsigned int scale = 1;
+module_param(scale, uint, 0444);
+MODULE_PARM_DESC(scale, "Integer scaling factor for drm_log, default is 1");
+
+/**
+ * DOC: overview
+ *
+ * This is a simple graphic logger, to print the kernel message on screen, until
+ * a userspace application is able to take over.
+ * It is only for debugging purpose.
+ */
+
+struct drm_log_scanout {
+ struct drm_client_buffer *buffer;
+ const struct font_desc *font;
+ u32 rows;
+ u32 columns;
+ u32 scaled_font_h;
+ u32 scaled_font_w;
+ u32 line;
+ u32 format;
+ u32 px_width;
+ u32 front_color;
+ u32 prefix_color;
+};
+
+struct drm_log {
+ struct mutex lock;
+ struct drm_client_dev client;
+ struct console con;
+ bool probed;
+ u32 n_scanout;
+ struct drm_log_scanout *scanout;
+};
+
+static struct drm_log *client_to_drm_log(struct drm_client_dev *client)
+{
+ return container_of(client, struct drm_log, client);
+}
+
+static struct drm_log *console_to_drm_log(struct console *con)
+{
+ return container_of(con, struct drm_log, con);
+}
+
+static void drm_log_blit(struct iosys_map *dst, unsigned int dst_pitch,
+ const u8 *src, unsigned int src_pitch,
+ u32 height, u32 width, u32 px_width, u32 color)
+{
+ switch (px_width) {
+ case 2:
+ drm_draw_blit16(dst, dst_pitch, src, src_pitch, height, width, scale, color);
+ break;
+ case 3:
+ drm_draw_blit24(dst, dst_pitch, src, src_pitch, height, width, scale, color);
+ break;
+ case 4:
+ drm_draw_blit32(dst, dst_pitch, src, src_pitch, height, width, scale, color);
+ break;
+ default:
+ WARN_ONCE(1, "Can't blit with pixel width %d\n", px_width);
+ }
+}
+
+static void drm_log_clear_line(struct drm_log_scanout *scanout, u32 line)
+{
+ struct drm_framebuffer *fb = scanout->buffer->fb;
+ unsigned long height = scanout->scaled_font_h;
+ struct iosys_map map;
+ struct drm_rect r = DRM_RECT_INIT(0, line * height, fb->width, height);
+
+ if (drm_client_buffer_vmap_local(scanout->buffer, &map))
+ return;
+ iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]);
+ drm_client_buffer_vunmap_local(scanout->buffer);
+ drm_client_framebuffer_flush(scanout->buffer, &r);
+}
+
+static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s,
+ unsigned int len, unsigned int prefix_len)
+{
+ struct drm_framebuffer *fb = scanout->buffer->fb;
+ struct iosys_map map;
+ const struct font_desc *font = scanout->font;
+ size_t font_pitch = DIV_ROUND_UP(font->width, 8);
+ const u8 *src;
+ u32 px_width = fb->format->cpp[0];
+ struct drm_rect r = DRM_RECT_INIT(0, scanout->line * scanout->scaled_font_h,
+ fb->width, (scanout->line + 1) * scanout->scaled_font_h);
+ u32 i;
+
+ if (drm_client_buffer_vmap_local(scanout->buffer, &map))
+ return;
+
+ iosys_map_incr(&map, r.y1 * fb->pitches[0]);
+ for (i = 0; i < len && i < scanout->columns; i++) {
+ u32 color = (i < prefix_len) ? scanout->prefix_color : scanout->front_color;
+ src = drm_draw_get_char_bitmap(font, s[i], font_pitch);
+ drm_log_blit(&map, fb->pitches[0], src, font_pitch,
+ scanout->scaled_font_h, scanout->scaled_font_w,
+ px_width, color);
+ iosys_map_incr(&map, scanout->scaled_font_w * px_width);
+ }
+
+ scanout->line++;
+ if (scanout->line >= scanout->rows)
+ scanout->line = 0;
+ drm_client_buffer_vunmap_local(scanout->buffer);
+ drm_client_framebuffer_flush(scanout->buffer, &r);
+}
+
+static void drm_log_draw_new_line(struct drm_log_scanout *scanout,
+ const char *s, unsigned int len, unsigned int prefix_len)
+{
+ if (scanout->line == 0) {
+ drm_log_clear_line(scanout, 0);
+ drm_log_clear_line(scanout, 1);
+ drm_log_clear_line(scanout, 2);
+ } else if (scanout->line + 2 < scanout->rows)
+ drm_log_clear_line(scanout, scanout->line + 2);
+
+ drm_log_draw_line(scanout, s, len, prefix_len);
+}
+
+/*
+ * Depends on print_time() in printk.c
+ * Timestamp is written with "[%5lu.%06lu]"
+ */
+#define TS_PREFIX_LEN 13
+
+static void drm_log_draw_kmsg_record(struct drm_log_scanout *scanout,
+ const char *s, unsigned int len)
+{
+ u32 prefix_len = 0;
+
+ if (len > TS_PREFIX_LEN && s[0] == '[' && s[6] == '.' && s[TS_PREFIX_LEN] == ']')
+ prefix_len = TS_PREFIX_LEN + 1;
+
+ /* do not print the ending \n character */
+ if (s[len - 1] == '\n')
+ len--;
+
+ while (len > scanout->columns) {
+ drm_log_draw_new_line(scanout, s, scanout->columns, prefix_len);
+ s += scanout->columns;
+ len -= scanout->columns;
+ prefix_len = 0;
+ }
+ if (len)
+ drm_log_draw_new_line(scanout, s, len, prefix_len);
+}
+
+static u32 drm_log_find_usable_format(struct drm_plane *plane)
+{
+ int i;
+
+ for (i = 0; i < plane->format_count; i++)
+ if (drm_draw_color_from_xrgb8888(0xffffff, plane->format_types[i]) != 0)
+ return plane->format_types[i];
+ return DRM_FORMAT_INVALID;
+}
+
+static int drm_log_setup_modeset(struct drm_client_dev *client,
+ struct drm_mode_set *mode_set,
+ struct drm_log_scanout *scanout)
+{
+ struct drm_crtc *crtc = mode_set->crtc;
+ u32 width = mode_set->mode->hdisplay;
+ u32 height = mode_set->mode->vdisplay;
+ u32 format;
+
+ scanout->font = get_default_font(width, height, NULL, NULL);
+ if (!scanout->font)
+ return -ENOENT;
+
+ format = drm_log_find_usable_format(crtc->primary);
+ if (format == DRM_FORMAT_INVALID)
+ return -EINVAL;
+
+ scanout->buffer = drm_client_framebuffer_create(client, width, height, format);
+ if (IS_ERR(scanout->buffer)) {
+ drm_warn(client->dev, "drm_log can't create framebuffer %d %d %p4cc\n",
+ width, height, &format);
+ return -ENOMEM;
+ }
+ mode_set->fb = scanout->buffer->fb;
+ scanout->scaled_font_h = scanout->font->height * scale;
+ scanout->scaled_font_w = scanout->font->width * scale;
+ scanout->rows = height / scanout->scaled_font_h;
+ scanout->columns = width / scanout->scaled_font_w;
+ scanout->front_color = drm_draw_color_from_xrgb8888(0xffffff, format);
+ scanout->prefix_color = drm_draw_color_from_xrgb8888(0x4e9a06, format);
+ return 0;
+}
+
+static int drm_log_count_modeset(struct drm_client_dev *client)
+{
+ struct drm_mode_set *mode_set;
+ int count = 0;
+
+ mutex_lock(&client->modeset_mutex);
+ drm_client_for_each_modeset(mode_set, client)
+ count++;
+ mutex_unlock(&client->modeset_mutex);
+ return count;
+}
+
+static void drm_log_init_client(struct drm_log *dlog)
+{
+ struct drm_client_dev *client = &dlog->client;
+ struct drm_mode_set *mode_set;
+ int i, max_modeset;
+ int n_modeset = 0;
+
+ dlog->probed = true;
+
+ if (drm_client_modeset_probe(client, 0, 0))
+ return;
+
+ max_modeset = drm_log_count_modeset(client);
+ if (!max_modeset)
+ return;
+
+ dlog->scanout = kcalloc(max_modeset, sizeof(*dlog->scanout), GFP_KERNEL);
+ if (!dlog->scanout)
+ return;
+
+ mutex_lock(&client->modeset_mutex);
+ drm_client_for_each_modeset(mode_set, client) {
+ if (!mode_set->mode)
+ continue;
+ if (drm_log_setup_modeset(client, mode_set, &dlog->scanout[n_modeset]))
+ continue;
+ n_modeset++;
+ }
+ mutex_unlock(&client->modeset_mutex);
+ if (n_modeset == 0)
+ goto err_nomodeset;
+
+ if (drm_client_modeset_commit(client))
+ goto err_failed_commit;
+
+ dlog->n_scanout = n_modeset;
+ return;
+
+err_failed_commit:
+ for (i = 0; i < n_modeset; i++)
+ drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+
+err_nomodeset:
+ kfree(dlog->scanout);
+ dlog->scanout = NULL;
+}
+
+static void drm_log_free_scanout(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+ int i;
+
+ if (dlog->n_scanout) {
+ for (i = 0; i < dlog->n_scanout; i++)
+ drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+ dlog->n_scanout = 0;
+ kfree(dlog->scanout);
+ dlog->scanout = NULL;
+ }
+}
+
+static void drm_log_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+ struct drm_device *dev = client->dev;
+
+ unregister_console(&dlog->con);
+
+ mutex_lock(&dlog->lock);
+ drm_log_free_scanout(client);
+ drm_client_release(client);
+ mutex_unlock(&dlog->lock);
+ kfree(dlog);
+ drm_dbg(dev, "Unregistered with drm log\n");
+}
+
+static int drm_log_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
+ mutex_lock(&dlog->lock);
+ drm_log_free_scanout(client);
+ dlog->probed = false;
+ mutex_unlock(&dlog->lock);
+ return 0;
+}
+
+static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
+ console_stop(&dlog->con);
+
+ return 0;
+}
+
+static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
+ console_start(&dlog->con);
+
+ return 0;
+}
+
+static const struct drm_client_funcs drm_log_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_log_client_unregister,
+ .hotplug = drm_log_client_hotplug,
+ .suspend = drm_log_client_suspend,
+ .resume = drm_log_client_resume,
+};
+
+static void drm_log_write_thread(struct console *con, struct nbcon_write_context *wctxt)
+{
+ struct drm_log *dlog = console_to_drm_log(con);
+ int i;
+
+ if (!dlog->probed)
+ drm_log_init_client(dlog);
+
+ /* Check that we are still the master before drawing */
+ if (drm_master_internal_acquire(dlog->client.dev)) {
+ drm_master_internal_release(dlog->client.dev);
+
+ for (i = 0; i < dlog->n_scanout; i++)
+ drm_log_draw_kmsg_record(&dlog->scanout[i], wctxt->outbuf, wctxt->len);
+ }
+}
+
+static void drm_log_lock(struct console *con, unsigned long *flags)
+{
+ struct drm_log *dlog = console_to_drm_log(con);
+
+ mutex_lock(&dlog->lock);
+ migrate_disable();
+}
+
+static void drm_log_unlock(struct console *con, unsigned long flags)
+{
+ struct drm_log *dlog = console_to_drm_log(con);
+
+ migrate_enable();
+ mutex_unlock(&dlog->lock);
+}
+
+static void drm_log_register_console(struct console *con)
+{
+ strscpy(con->name, "drm_log");
+ con->write_thread = drm_log_write_thread;
+ con->device_lock = drm_log_lock;
+ con->device_unlock = drm_log_unlock;
+ con->flags = CON_PRINTBUFFER | CON_NBCON;
+ con->index = -1;
+
+ register_console(con);
+}
+
+/**
+ * drm_log_register() - Register a drm device to drm_log
+ * @dev: the drm device to register.
+ */
+void drm_log_register(struct drm_device *dev)
+{
+ struct drm_log *new;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto err_warn;
+
+ mutex_init(&new->lock);
+ if (drm_client_init(dev, &new->client, "drm_log", &drm_log_client_funcs))
+ goto err_free;
+
+ drm_client_register(&new->client);
+
+ drm_log_register_console(&new->con);
+
+ drm_dbg(dev, "Registered with drm log as %s\n", new->con.name);
+ return;
+
+err_free:
+ kfree(new);
+err_warn:
+ drm_warn(dev, "Failed to register with drm log\n");
+}
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 6a4e892afcf8..8d22b7627d41 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -15,6 +15,7 @@ if DRM_DISPLAY_HELPER
config DRM_BRIDGE_CONNECTOR
bool
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
help
DRM connector implementation terminating DRM bridge chains.
@@ -75,6 +76,12 @@ config DRM_DISPLAY_HDCP_HELPER
help
DRM display helpers for HDCP.
+config DRM_DISPLAY_HDMI_AUDIO_HELPER
+ bool
+ help
+ DRM display helpers for HDMI Audio functionality (generic HDMI Codec
+ implementation).
+
config DRM_DISPLAY_HDMI_HELPER
bool
help
@@ -82,6 +89,7 @@ config DRM_DISPLAY_HDMI_HELPER
config DRM_DISPLAY_HDMI_STATE_HELPER
bool
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select DRM_DISPLAY_HDMI_HELPER
help
DRM KMS state helpers for HDMI.
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index 629c834c3192..b17879b957d5 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -14,6 +14,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \
drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_AUDIO_HELPER) += \
+ drm_hdmi_audio_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 320c297008aa..56f977bbe62d 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -17,7 +17,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
/**
@@ -179,11 +182,15 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
struct drm_bridge *detect = bridge_connector->bridge_detect;
+ struct drm_bridge *hdmi = bridge_connector->bridge_hdmi;
enum drm_connector_status status;
if (detect) {
status = detect->funcs->detect(detect);
+ if (hdmi)
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
+
drm_bridge_connector_hpd_notify(connector, status);
} else {
switch (connector->connector_type) {
@@ -202,6 +209,16 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
return status;
}
+static void drm_bridge_connector_force(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hdmi = bridge_connector->bridge_hdmi;
+
+ if (hdmi)
+ drm_atomic_helper_connector_hdmi_force(connector);
+}
+
static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
struct dentry *root)
{
@@ -230,6 +247,7 @@ static void drm_bridge_connector_reset(struct drm_connector *connector)
static const struct drm_connector_funcs drm_bridge_connector_funcs = {
.reset = drm_bridge_connector_reset,
.detect = drm_bridge_connector_detect,
+ .force = drm_bridge_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -276,6 +294,14 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
struct drm_bridge *bridge;
/*
+ * If there is a HDMI bridge, EDID has been updated as a part of
+ * the .detect(). Just update the modes here.
+ */
+ bridge = bridge_connector->bridge_hdmi;
+ if (bridge)
+ return drm_edid_connector_add_modes(connector);
+
+ /*
* If display exposes EDID, then we parse that in the normal way to
* build table of supported modes.
*/
@@ -299,11 +325,37 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
return 0;
}
+static enum drm_mode_status
+drm_bridge_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hdmi)
+ return drm_hdmi_connector_mode_valid(connector, mode);
+
+ return MODE_OK;
+}
+
+static int drm_bridge_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hdmi)
+ return drm_atomic_helper_connector_hdmi_check(connector, state);
+
+ return 0;
+}
+
static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
.get_modes = drm_bridge_connector_get_modes,
- /* No need for .mode_valid(), the bridges are checked by the core. */
+ .mode_valid = drm_bridge_connector_mode_valid,
.enable_hpd = drm_bridge_connector_enable_hpd,
.disable_hpd = drm_bridge_connector_disable_hpd,
+ .atomic_check = drm_bridge_connector_atomic_check,
};
static enum drm_mode_status
@@ -354,10 +406,94 @@ static int drm_bridge_connector_write_infoframe(struct drm_connector *connector,
return bridge->funcs->hdmi_write_infoframe(bridge, type, buffer, len);
}
+static const struct drm_edid *
+drm_bridge_connector_read_edid(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_edid;
+ if (!bridge)
+ return NULL;
+
+ return drm_bridge_edid_read(bridge, connector);
+}
+
static const struct drm_connector_hdmi_funcs drm_bridge_connector_hdmi_funcs = {
.tmds_char_rate_valid = drm_bridge_connector_tmds_char_rate_valid,
.clear_infoframe = drm_bridge_connector_clear_infoframe,
.write_infoframe = drm_bridge_connector_write_infoframe,
+ .read_edid = drm_bridge_connector_read_edid,
+};
+
+static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(connector, bridge);
+}
+
+static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+}
+
+static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return;
+
+ bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+}
+
+static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
+ bool enable, int direction)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ if (bridge->funcs->hdmi_audio_mute_stream)
+ return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
+ enable, direction);
+ else
+ return -ENOTSUPP;
+}
+
+static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
+ .startup = drm_bridge_connector_audio_startup,
+ .prepare = drm_bridge_connector_audio_prepare,
+ .shutdown = drm_bridge_connector_audio_shutdown,
+ .mute_stream = drm_bridge_connector_audio_mute_stream,
};
/* -----------------------------------------------------------------------------
@@ -459,7 +595,12 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return ERR_PTR(-EINVAL);
- if (bridge_connector->bridge_hdmi)
+ if (bridge_connector->bridge_hdmi) {
+ if (!connector->ycbcr_420_allowed)
+ supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
+
+ bridge = bridge_connector->bridge_hdmi;
+
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
bridge_connector->bridge_hdmi->product,
@@ -468,12 +609,31 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc,
supported_formats,
max_bpc);
- else
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (bridge->hdmi_audio_max_i2s_playback_channels ||
+ bridge->hdmi_audio_spdif_playback) {
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ ret = drm_connector_hdmi_audio_init(connector,
+ bridge->hdmi_audio_dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ } else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
connector_type, ddc);
- if (ret)
- return ERR_PTR(ret);
+ if (ret)
+ return ERR_PTR(ret);
+ }
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 6ee51003de3c..da3c8521a7fa 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -22,15 +22,16 @@
#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/dynamic_debug.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
-#include <linux/dynamic_debug.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -779,6 +780,128 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+static int read_payload_update_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+/**
+ * drm_dp_dpcd_write_payload() - Write Virtual Channel information to payload table
+ * @aux: DisplayPort AUX channel
+ * @vcpid: Virtual Channel Payload ID
+ * @start_time_slot: Starting time slot
+ * @time_slot_count: Time slot count
+ *
+ * Write the Virtual Channel payload allocation table, checking the payload
+ * update status and retrying as necessary.
+ *
+ * Returns:
+ * 0 on success, negative error otherwise
+ */
+int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
+ int vcpid, u8 start_time_slot, u8 time_slot_count)
+{
+ u8 payload_alloc[3], status;
+ int ret;
+ int retries = 0;
+
+ drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
+
+ payload_alloc[0] = vcpid;
+ payload_alloc[1] = start_time_slot;
+ payload_alloc[2] = time_slot_count;
+
+ ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret != 3) {
+ drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
+ goto fail;
+ }
+
+retry:
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
+ retries++;
+ if (retries < 20) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ drm_dbg_kms(aux->drm_dev, "status not set after read payload table status %d\n",
+ status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_write_payload);
+
+/**
+ * drm_dp_dpcd_clear_payload() - Clear the entire VC Payload ID table
+ * @aux: DisplayPort AUX channel
+ *
+ * Clear the entire VC Payload ID table.
+ *
+ * Returns: 0 on success, negative error code on errors.
+ */
+int drm_dp_dpcd_clear_payload(struct drm_dp_aux *aux)
+{
+ return drm_dp_dpcd_write_payload(aux, 0, 0, 0x3f);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_clear_payload);
+
+/**
+ * drm_dp_dpcd_poll_act_handled() - Poll for ACT handled status
+ * @aux: DisplayPort AUX channel
+ * @timeout_ms: Timeout in ms
+ *
+ * Try waiting for the sink to finish updating its payload table by polling for
+ * the ACT handled bit of DP_PAYLOAD_TABLE_UPDATE_STATUS for up to @timeout_ms
+ * milliseconds, defaulting to 3000 ms if 0.
+ *
+ * Returns:
+ * 0 if the ACT was handled in time, negative error code on failure.
+ */
+int drm_dp_dpcd_poll_act_handled(struct drm_dp_aux *aux, int timeout_ms)
+{
+ int ret, status;
+
+ /* default to 3 seconds, this is arbitrary */
+ timeout_ms = timeout_ms ?: 3000;
+
+ ret = readx_poll_timeout(read_payload_update_status, aux, status,
+ status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+ 200, timeout_ms * USEC_PER_MSEC);
+ if (ret < 0 && status >= 0) {
+ drm_err(aux->drm_dev, "Failed to get ACT after %d ms, last status: %02x\n",
+ timeout_ms, status);
+ return -EINVAL;
+ } else if (status < 0) {
+ /*
+ * Failure here isn't unexpected - the hub may have
+ * just been unplugged
+ */
+ drm_dbg_kms(aux->drm_dev, "Failed to read payload table status: %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_poll_act_handled);
+
static bool is_edid_digital_input_dp(const struct drm_edid *drm_edid)
{
/* FIXME: get rid of drm_edid_raw() */
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index dc4446d589e7..06c91c5b7f7c 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -29,7 +29,6 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/iopoll.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stacktrace.h>
@@ -68,9 +67,6 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots);
-
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
@@ -2285,7 +2281,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
port->cached_edid = drm_edid_read_ddc(port->connector,
&port->aux.ddc);
- drm_connector_register(port->connector);
+ drm_connector_dynamic_register(port->connector);
return;
error:
@@ -3267,7 +3263,7 @@ EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload)
{
- return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
+ return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot,
payload->time_slots);
}
@@ -3298,7 +3294,7 @@ static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_
}
if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP)
- drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
+ drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0);
}
/**
@@ -3576,8 +3572,7 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
}
/**
- * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
- * @mgr: The &drm_dp_mst_topology_mgr to use
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link
* @link_rate: link rate in 10kbits/s units
* @link_lane_count: lane count
*
@@ -3588,17 +3583,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
*
* Returns the BW / timeslot value in 20.12 fixed point format.
*/
-fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count)
+fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
int ch_coding_efficiency =
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
fixed20_12 ret;
- if (link_rate == 0 || link_lane_count == 0)
- drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
- link_rate, link_lane_count);
-
/* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
ch_coding_efficiency),
@@ -3686,7 +3676,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
/* Write reset payload */
- drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
+ drm_dp_dpcd_clear_payload(mgr->aux);
drm_dp_mst_queue_probe_work(mgr);
@@ -4747,61 +4737,6 @@ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_
}
EXPORT_SYMBOL(drm_dp_mst_update_slots);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots)
-{
- u8 payload_alloc[3], status;
- int ret;
- int retries = 0;
-
- drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
-
- payload_alloc[0] = id;
- payload_alloc[1] = start_slot;
- payload_alloc[2] = num_slots;
-
- ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
- drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
- goto fail;
- }
-
-retry:
- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0) {
- drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
- goto fail;
- }
-
- if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
- retries++;
- if (retries < 20) {
- usleep_range(10000, 20000);
- goto retry;
- }
- drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
- status);
- ret = -EINVAL;
- goto fail;
- }
- ret = 0;
-fail:
- return ret;
-}
-
-static int do_get_act_status(struct drm_dp_aux *aux)
-{
- int ret;
- u8 status;
-
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0)
- return ret;
-
- return status;
-}
-
/**
* drm_dp_check_act_status() - Polls for ACT handled status.
* @mgr: manager to use
@@ -4819,28 +4754,9 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
* There doesn't seem to be any recommended retry count or timeout in
* the MST specification. Since some hubs have been observed to take
* over 1 second to update their payload allocations under certain
- * conditions, we use a rather large timeout value.
+ * conditions, we use a rather large timeout value of 3 seconds.
*/
- const int timeout_ms = 3000;
- int ret, status;
-
- ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
- status & DP_PAYLOAD_ACT_HANDLED || status < 0,
- 200, timeout_ms * USEC_PER_MSEC);
- if (ret < 0 && status >= 0) {
- drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
- timeout_ms, status);
- return -EINVAL;
- } else if (status < 0) {
- /*
- * Failure here isn't unexpected - the hub may have
- * just been unplugged
- */
- drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
- return status;
- }
-
- return 0;
+ return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000);
}
EXPORT_SYMBOL(drm_dp_check_act_status);
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
new file mode 100644
index 000000000000..05afc9f0bdd6
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
+
+#include <sound/hdmi-codec.h>
+
+static int drm_connector_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ if (funcs->startup)
+ return funcs->startup(connector);
+
+ return 0;
+}
+
+static int drm_connector_hdmi_audio_prepare(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ return funcs->prepare(connector, fmt, hparms);
+}
+
+static void drm_connector_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ return funcs->shutdown(connector);
+}
+
+static int drm_connector_hdmi_audio_mute_stream(struct device *dev, void *data,
+ bool enable, int direction)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ if (funcs->mute_stream)
+ return funcs->mute_stream(connector, enable, direction);
+
+ return -ENOTSUPP;
+}
+
+static int drm_connector_hdmi_audio_get_dai_id(struct snd_soc_component *comment,
+ struct device_node *endpoint,
+ void *data)
+{
+ struct drm_connector *connector = data;
+ struct of_endpoint of_ep;
+ int ret;
+
+ if (connector->hdmi_audio.dai_port < 0)
+ return -ENOTSUPP;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ if (of_ep.port == connector->hdmi_audio.dai_port)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int drm_connector_hdmi_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct drm_connector *connector = data;
+
+ mutex_lock(&connector->eld_mutex);
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
+
+ return 0;
+}
+
+static int drm_connector_hdmi_audio_hook_plugged_cb(struct device *dev,
+ void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct drm_connector *connector = data;
+
+ mutex_lock(&connector->hdmi_audio.lock);
+
+ connector->hdmi_audio.plugged_cb = fn;
+ connector->hdmi_audio.plugged_cb_dev = codec_dev;
+
+ fn(codec_dev, connector->hdmi_audio.last_state);
+
+ mutex_unlock(&connector->hdmi_audio.lock);
+
+ return 0;
+}
+
+void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector,
+ bool plugged)
+{
+ mutex_lock(&connector->hdmi_audio.lock);
+
+ connector->hdmi_audio.last_state = plugged;
+
+ if (connector->hdmi_audio.plugged_cb &&
+ connector->hdmi_audio.plugged_cb_dev)
+ connector->hdmi_audio.plugged_cb(connector->hdmi_audio.plugged_cb_dev,
+ connector->hdmi_audio.last_state);
+
+ mutex_unlock(&connector->hdmi_audio.lock);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_audio_plugged_notify);
+
+static const struct hdmi_codec_ops drm_connector_hdmi_audio_ops = {
+ .audio_startup = drm_connector_hdmi_audio_startup,
+ .prepare = drm_connector_hdmi_audio_prepare,
+ .audio_shutdown = drm_connector_hdmi_audio_shutdown,
+ .mute_stream = drm_connector_hdmi_audio_mute_stream,
+ .get_eld = drm_connector_hdmi_audio_get_eld,
+ .get_dai_id = drm_connector_hdmi_audio_get_dai_id,
+ .hook_plugged_cb = drm_connector_hdmi_audio_hook_plugged_cb,
+};
+
+/**
+ * drm_connector_hdmi_audio_init - Initialize HDMI Codec device for the DRM connector
+ * @connector: A pointer to the connector to allocate codec for
+ * @hdmi_codec_dev: device to be used as a parent for the HDMI Codec
+ * @funcs: callbacks for this HDMI Codec
+ * @max_i2s_playback_channels: maximum number of playback I2S channels
+ * @spdif_playback: set if HDMI codec has S/PDIF playback port
+ * @dai_port: sound DAI port, -1 if it is not enabled
+ *
+ * Create a HDMI codec device to be used with the specified connector.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_hdmi_audio_init(struct drm_connector *connector,
+ struct device *hdmi_codec_dev,
+ const struct drm_connector_hdmi_audio_funcs *funcs,
+ unsigned int max_i2s_playback_channels,
+ bool spdif_playback,
+ int dai_port)
+{
+ struct hdmi_codec_pdata codec_pdata = {
+ .ops = &drm_connector_hdmi_audio_ops,
+ .max_i2s_channels = max_i2s_playback_channels,
+ .i2s = !!max_i2s_playback_channels,
+ .spdif = spdif_playback,
+ .no_i2s_capture = true,
+ .no_spdif_capture = true,
+ .data = connector,
+ };
+ struct platform_device *pdev;
+
+ if (!funcs ||
+ !funcs->prepare ||
+ !funcs->shutdown)
+ return -EINVAL;
+
+ connector->hdmi_audio.funcs = funcs;
+ connector->hdmi_audio.dai_port = dai_port;
+
+ pdev = platform_device_register_data(hdmi_codec_dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_pdata, sizeof(codec_pdata));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ connector->hdmi_audio.codec_pdev = pdev;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_hdmi_audio_init);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index feb7a3a75981..9b2ee2385634 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -5,6 +5,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -347,6 +348,8 @@ static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
is_limited_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL;
int ret;
+ infoframe->set = false;
+
ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, mode);
if (ret)
return ret;
@@ -376,6 +379,8 @@ static int hdmi_generate_spd_infoframe(const struct drm_connector *connector,
&infoframe->data.spd;
int ret;
+ infoframe->set = false;
+
ret = hdmi_spd_infoframe_init(frame,
connector->hdmi.vendor,
connector->hdmi.product);
@@ -398,6 +403,8 @@ static int hdmi_generate_hdr_infoframe(const struct drm_connector *connector,
&infoframe->data.drm;
int ret;
+ infoframe->set = false;
+
if (connector->max_bpc < 10)
return 0;
@@ -425,6 +432,8 @@ static int hdmi_generate_hdmi_vendor_infoframe(const struct drm_connector *conne
&infoframe->data.vendor.hdmi;
int ret;
+ infoframe->set = false;
+
if (!info->has_hdmi_infoframe)
return 0;
@@ -494,6 +503,9 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
connector_state_get_mode(new_conn_state);
int ret;
+ if (!new_conn_state->crtc || !new_conn_state->best_encoder)
+ return 0;
+
new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
ret = hdmi_compute_config(connector, new_conn_state, mode);
@@ -521,6 +533,27 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
+/**
+ * drm_hdmi_connector_mode_valid() - Check if mode is valid for HDMI connector
+ * @connector: DRM connector to validate the mode
+ * @mode: Display mode to validate
+ *
+ * Generic .mode_valid implementation for HDMI connectors.
+ */
+enum drm_mode_status
+drm_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ unsigned long long clock;
+
+ clock = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
+ if (!clock)
+ return MODE_ERROR;
+
+ return hdmi_clock_valid(connector, mode, clock);
+}
+EXPORT_SYMBOL(drm_hdmi_connector_mode_valid);
+
static int clear_device_infoframe(struct drm_connector *connector,
enum hdmi_infoframe_type type)
{
@@ -748,3 +781,61 @@ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(struct drm_connector *con
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_clear_audio_infoframe);
+
+static void
+drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ const struct drm_edid *drm_edid;
+
+ if (status == connector_status_disconnected) {
+ // TODO: also handle CEC and scramber, HDMI sink disconnected.
+ drm_connector_hdmi_audio_plugged_notify(connector, false);
+ drm_edid_connector_update(connector, NULL);
+ return;
+ }
+
+ if (connector->hdmi.funcs->read_edid)
+ drm_edid = connector->hdmi.funcs->read_edid(connector);
+ else
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ drm_edid_free(drm_edid);
+
+ if (status == connector_status_connected) {
+ // TODO: also handle CEC and scramber, HDMI sink is now connected.
+ drm_connector_hdmi_audio_plugged_notify(connector, true);
+ }
+}
+
+/**
+ * drm_atomic_helper_connector_hdmi_hotplug - Handle the hotplug event for the HDMI connector
+ * @connector: A pointer to the HDMI connector
+ * @status: Connection status
+ *
+ * This function should be called as a part of the .detect() / .detect_ctx()
+ * callbacks, updating the HDMI-specific connector's data.
+ */
+void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ drm_atomic_helper_connector_hdmi_update(connector, status);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_hotplug);
+
+/**
+ * drm_atomic_helper_connector_hdmi_force - HDMI Connector implementation of the force callback
+ * @connector: A pointer to the HDMI connector
+ *
+ * This function implements the .force() callback for the HDMI connectors. It
+ * can either be used directly as the callback or should be called from within
+ * the .force() callback implementation to maintain the HDMI-specific
+ * connector's data.
+ */
+void drm_atomic_helper_connector_hdmi_force(struct drm_connector *connector)
+{
+ drm_atomic_helper_connector_hdmi_update(connector, connector->status);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_force);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c6af46dd02bf..241a384ebce3 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -207,6 +207,10 @@ void drm_bridge_add(struct drm_bridge *bridge)
{
mutex_init(&bridge->hpd_mutex);
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
+ BIT(HDMI_COLORSPACE_YUV420));
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index fc35f47e2849..5f24d6b41cc6 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,6 +33,7 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/uaccess.h>
@@ -218,11 +219,11 @@ void drm_connector_free_work_fn(struct work_struct *work)
}
}
-static int __drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type,
- struct i2c_adapter *ddc)
+static int drm_connector_init_only(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -273,12 +274,15 @@ static int __drm_connector_init(struct drm_device *dev,
/* provide ddc symlink in sysfs */
connector->ddc = ddc;
+ INIT_LIST_HEAD(&connector->head);
INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
+ mutex_init(&connector->eld_mutex);
mutex_init(&connector->edid_override_mutex);
mutex_init(&connector->hdmi.infoframes.lock);
+ mutex_init(&connector->hdmi_audio.lock);
connector->edid_blob_ptr = NULL;
connector->epoch_counter = 0;
connector->tile_blob_ptr = NULL;
@@ -288,14 +292,6 @@ static int __drm_connector_init(struct drm_device *dev,
drm_connector_get_cmdline_mode(connector);
- /* We should add connectors at the end to avoid upsetting the connector
- * index too much.
- */
- spin_lock_irq(&config->connector_list_lock);
- list_add_tail(&connector->head, &config->connector_list);
- config->num_connector++;
- spin_unlock_irq(&config->connector_list_lock);
-
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_connector_attach_edid_property(connector);
@@ -332,6 +328,54 @@ out_put:
return ret;
}
+static void drm_connector_add(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (drm_WARN_ON(dev, !list_empty(&connector->head)))
+ return;
+
+ spin_lock_irq(&config->connector_list_lock);
+ list_add_tail(&connector->head, &config->connector_list);
+ config->num_connector++;
+ spin_unlock_irq(&config->connector_list_lock);
+}
+
+static void drm_connector_remove(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ /*
+ * For dynamic connectors drm_connector_cleanup() can call this function
+ * before the connector is registered and added to the list.
+ */
+ if (list_empty(&connector->head))
+ return;
+
+ spin_lock_irq(&dev->mode_config.connector_list_lock);
+ list_del_init(&connector->head);
+ dev->mode_config.num_connector--;
+ spin_unlock_irq(&dev->mode_config.connector_list_lock);
+}
+
+static int drm_connector_init_and_add(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
+ int ret;
+
+ ret = drm_connector_init_only(dev, connector, funcs, connector_type, ddc);
+ if (ret)
+ return ret;
+
+ drm_connector_add(connector);
+
+ return 0;
+}
+
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
@@ -361,11 +405,52 @@ int drm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
- return __drm_connector_init(dev, connector, funcs, connector_type, NULL);
+ return drm_connector_init_and_add(dev, connector, funcs, connector_type, NULL);
}
EXPORT_SYMBOL(drm_connector_init);
/**
+ * drm_connector_dynamic_init - Init a preallocated dynamic connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ * @ddc: pointer to the associated ddc adapter
+ *
+ * Initialises a preallocated dynamic connector. Connectors should be
+ * subclassed as part of driver connector objects. The connector
+ * structure should not be allocated with devm_kzalloc().
+ *
+ * Drivers should call this for dynamic connectors which can be hotplugged
+ * after drm_dev_register() has been called already, e.g. DP MST connectors.
+ * For all other - static - connectors, drivers should call one of the
+ * drm_connector_init*()/drmm_connector_init*() functions.
+ *
+ * After calling this function the drivers must call
+ * drm_connector_dynamic_register().
+ *
+ * To remove the connector the driver must call drm_connector_unregister()
+ * followed by drm_connector_put(). Putting the last reference will call the
+ * driver's &drm_connector_funcs.destroy hook, which in turn must call
+ * drm_connector_cleanup() and free the connector structure.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_dynamic_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
+ if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
+ return -EINVAL;
+
+ return drm_connector_init_only(dev, connector, funcs, connector_type, ddc);
+}
+EXPORT_SYMBOL(drm_connector_dynamic_init);
+
+/**
* drm_connector_init_with_ddc - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
@@ -398,7 +483,7 @@ int drm_connector_init_with_ddc(struct drm_device *dev,
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
- return __drm_connector_init(dev, connector, funcs, connector_type, ddc);
+ return drm_connector_init_and_add(dev, connector, funcs, connector_type, ddc);
}
EXPORT_SYMBOL(drm_connector_init_with_ddc);
@@ -442,7 +527,7 @@ int drmm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL;
- ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
+ ret = drm_connector_init_and_add(dev, connector, funcs, connector_type, ddc);
if (ret)
return ret;
@@ -507,6 +592,9 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
if (!supported_formats || !(supported_formats & BIT(HDMI_COLORSPACE_RGB)))
return -EINVAL;
+ if (connector->ycbcr_420_allowed != !!(supported_formats & BIT(HDMI_COLORSPACE_YUV420)))
+ return -EINVAL;
+
if (!(max_bpc == 8 || max_bpc == 10 || max_bpc == 12))
return -EINVAL;
@@ -631,6 +719,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
DRM_CONNECTOR_REGISTERED))
drm_connector_unregister(connector);
+ platform_device_unregister(connector->hdmi_audio.codec_pdev);
+
if (connector->privacy_screen) {
drm_privacy_screen_put(connector->privacy_screen);
connector->privacy_screen = NULL;
@@ -659,16 +749,15 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->name = NULL;
fwnode_handle_put(connector->fwnode);
connector->fwnode = NULL;
- spin_lock_irq(&dev->mode_config.connector_list_lock);
- list_del(&connector->head);
- dev->mode_config.num_connector--;
- spin_unlock_irq(&dev->mode_config.connector_list_lock);
+
+ drm_connector_remove(connector);
WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
if (connector->state && connector->funcs->atomic_destroy_state)
connector->funcs->atomic_destroy_state(connector,
connector->state);
+ mutex_destroy(&connector->hdmi_audio.lock);
mutex_destroy(&connector->hdmi.infoframes.lock);
mutex_destroy(&connector->mutex);
@@ -683,14 +772,17 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* drm_connector_register - register a connector
* @connector: the connector to register
*
- * Register userspace interfaces for a connector. Only call this for connectors
- * which can be hotplugged after drm_dev_register() has been called already,
- * e.g. DP MST connectors. All other connectors will be registered automatically
- * when calling drm_dev_register().
+ * Register userspace interfaces for a connector. Drivers shouldn't call this
+ * function. Static connectors will be registered automatically by DRM core
+ * from drm_dev_register(), dynamic connectors (MST) should be registered by
+ * drivers calling drm_connector_dynamic_register().
*
* When the connector is no longer available, callers must call
* drm_connector_unregister().
*
+ * Note: Existing uses of this function in drivers should be a nop already and
+ * are scheduled to be removed.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -750,12 +842,43 @@ unlock:
EXPORT_SYMBOL(drm_connector_register);
/**
+ * drm_connector_dynamic_register - register a dynamic connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector. Only call this for connectors
+ * initialized by calling drm_connector_dynamic_init(). All other connectors
+ * will be registered automatically when calling drm_dev_register().
+ *
+ * When the connector is no longer available the driver must call
+ * drm_connector_unregister().
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_dynamic_register(struct drm_connector *connector)
+{
+ /* Was the connector inited already? */
+ if (WARN_ON(!(connector->funcs && connector->funcs->destroy)))
+ return -EINVAL;
+
+ drm_connector_add(connector);
+
+ return drm_connector_register(connector);
+}
+EXPORT_SYMBOL(drm_connector_dynamic_register);
+
+/**
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
- * Unregister userspace interfaces for a connector. Only call this for
- * connectors which have been registered explicitly by calling
- * drm_connector_register().
+ * Unregister userspace interfaces for a connector. Drivers should call this
+ * for dynamic connectors (MST) only, which were registered explicitly by
+ * calling drm_connector_dynamic_register(). All other - static - connectors
+ * will be unregistered automatically by DRM core and drivers shouldn't call
+ * this function for those.
+ *
+ * Note: Existing uses of this function in drivers for static connectors
+ * should be a nop already and are scheduled to be removed.
*/
void drm_connector_unregister(struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
new file mode 100644
index 000000000000..cb2ad12bce57
--- /dev/null
+++ b/drivers/gpu/drm/drm_draw.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/*
+ * Copyright (c) 2023 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/iosys-map.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "drm_draw_internal.h"
+
+/*
+ * Conversions from xrgb8888
+ */
+
+static u16 convert_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00F80000) >> 8) |
+ ((pix & 0x0000FC00) >> 5) |
+ ((pix & 0x000000F8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_rgba5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2) |
+ BIT(0); /* set alpha bit */
+}
+
+static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u32 convert_xrgb8888_to_argb8888(u32 pix)
+{
+ return pix | GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ ((pix & 0xff000000) >> 24) << 24;
+}
+
+static u32 convert_xrgb8888_to_abgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return pix | ((pix >> 8) & 0x00300C03);
+}
+
+static u32 convert_xrgb8888_to_argb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+}
+
+static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00FF0000) >> 14) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x000000FF) << 22);
+ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+}
+
+/**
+ * drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
+ * @color: input color, in xrgb8888 format
+ * @format: output format
+ *
+ * Returns:
+ * Color in the format specified, casted to u32.
+ * Or 0 if the format is not supported.
+ */
+u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ return convert_xrgb8888_to_rgb565(color);
+ case DRM_FORMAT_RGBA5551:
+ return convert_xrgb8888_to_rgba5551(color);
+ case DRM_FORMAT_XRGB1555:
+ return convert_xrgb8888_to_xrgb1555(color);
+ case DRM_FORMAT_ARGB1555:
+ return convert_xrgb8888_to_argb1555(color);
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ return color;
+ case DRM_FORMAT_ARGB8888:
+ return convert_xrgb8888_to_argb8888(color);
+ case DRM_FORMAT_XBGR8888:
+ return convert_xrgb8888_to_xbgr8888(color);
+ case DRM_FORMAT_ABGR8888:
+ return convert_xrgb8888_to_abgr8888(color);
+ case DRM_FORMAT_XRGB2101010:
+ return convert_xrgb8888_to_xrgb2101010(color);
+ case DRM_FORMAT_ARGB2101010:
+ return convert_xrgb8888_to_argb2101010(color);
+ case DRM_FORMAT_ABGR2101010:
+ return convert_xrgb8888_to_abgr2101010(color);
+ default:
+ WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_draw_color_from_xrgb8888);
+
+/*
+ * Blit functions
+ */
+void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u16 fg16)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
+}
+EXPORT_SYMBOL(drm_draw_blit16);
+
+void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ u32 off = y * dpitch + x * 3;
+
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(drm_draw_blit24);
+
+void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
+}
+EXPORT_SYMBOL(drm_draw_blit32);
+
+/*
+ * Fill functions
+ */
+void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
+}
+EXPORT_SYMBOL(drm_draw_fill16);
+
+void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ unsigned int off = y * dpitch + x * 3;
+
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_draw_fill24);
+
+void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
+}
+EXPORT_SYMBOL(drm_draw_fill32);
diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h
new file mode 100644
index 000000000000..f121ee7339dc
--- /dev/null
+++ b/drivers/gpu/drm/drm_draw_internal.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/*
+ * Copyright (c) 2023 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ */
+
+#ifndef __DRM_DRAW_INTERNAL_H__
+#define __DRM_DRAW_INTERNAL_H__
+
+#include <linux/font.h>
+#include <linux/types.h>
+
+struct iosys_map;
+
+/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
+static inline bool drm_draw_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
+{
+ return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
+}
+
+static inline const u8 *drm_draw_get_char_bitmap(const struct font_desc *font,
+ char c, size_t font_pitch)
+{
+ return font->data + (c * font->height) * font_pitch;
+}
+
+u32 drm_draw_color_from_xrgb8888(u32 color, u32 format);
+
+void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u16 fg16);
+
+void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32);
+
+void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32);
+
+void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color);
+
+void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color);
+
+void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color);
+
+#endif /* __DRM_DRAW_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index c2c172eb25df..3cf440eee8a2 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -26,6 +26,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/module.h>
@@ -820,6 +821,37 @@ void drm_dev_put(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_put);
+static void drmm_cg_unregister_region(struct drm_device *dev, void *arg)
+{
+ dmem_cgroup_unregister_region(arg);
+}
+
+/**
+ * drmm_cgroup_register_region - Register a region of a DRM device to cgroups
+ * @dev: device for region
+ * @region_name: Region name for registering
+ * @size: Size of region in bytes
+ *
+ * This decreases the ref-count of @dev by one. The device is destroyed if the
+ * ref-count drops to zero.
+ */
+struct dmem_cgroup_region *drmm_cgroup_register_region(struct drm_device *dev, const char *region_name, u64 size)
+{
+ struct dmem_cgroup_region *region;
+ int ret;
+
+ region = dmem_cgroup_register_region(size, "drm/%s/%s", dev->unique, region_name);
+ if (IS_ERR_OR_NULL(region))
+ return region;
+
+ ret = drmm_add_action_or_reset(dev, drmm_cg_unregister_region, region);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return region;
+}
+EXPORT_SYMBOL_GPL(drmm_cgroup_register_region);
+
static int create_compat_control_link(struct drm_device *dev)
{
struct drm_minor *minor;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 855beafb76ff..13bc4c290b17 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5605,7 +5605,9 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name);
static void clear_eld(struct drm_connector *connector)
{
+ mutex_lock(&connector->eld_mutex);
memset(connector->eld, 0, sizeof(connector->eld));
+ mutex_unlock(&connector->eld_mutex);
connector->latency_present[0] = false;
connector->latency_present[1] = false;
@@ -5657,6 +5659,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
if (!drm_edid)
return;
+ mutex_lock(&connector->eld_mutex);
+
mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD monitor %s\n",
connector->base.id, connector->name,
@@ -5717,6 +5721,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD size %d, SAD count %d\n",
connector->base.id, connector->name,
drm_eld_size(eld), total_sad_count);
+
+ mutex_unlock(&connector->eld_mutex);
}
static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index cb5f22f5bbb6..2289e71e2fa2 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -845,6 +845,16 @@ static void print_size(struct drm_printer *p, const char *stat,
drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
}
+int drm_memory_stats_is_zero(const struct drm_memory_stats *stats)
+{
+ return (stats->shared == 0 &&
+ stats->private == 0 &&
+ stats->resident == 0 &&
+ stats->purgeable == 0 &&
+ stats->active == 0);
+}
+EXPORT_SYMBOL(drm_memory_stats_is_zero);
+
/**
* drm_print_memory_stats - A helper to print memory stats
* @p: The printer to print output to
@@ -860,7 +870,9 @@ void drm_print_memory_stats(struct drm_printer *p,
{
print_size(p, "total", region, stats->private + stats->shared);
print_size(p, "shared", region, stats->shared);
- print_size(p, "active", region, stats->active);
+
+ if (supported_status & DRM_GEM_OBJECT_ACTIVE)
+ print_size(p, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_RESIDENT)
print_size(p, "resident", region, stats->resident);
@@ -893,15 +905,13 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
if (obj->funcs && obj->funcs->status) {
s = obj->funcs->status(obj);
- supported_status = DRM_GEM_OBJECT_RESIDENT |
- DRM_GEM_OBJECT_PURGEABLE;
+ supported_status |= s;
}
- if (drm_gem_object_is_shared_for_memory_stats(obj)) {
+ if (drm_gem_object_is_shared_for_memory_stats(obj))
status.shared += obj->size;
- } else {
+ else
status.private += obj->size;
- }
if (s & DRM_GEM_OBJECT_RESIDENT) {
status.resident += add_size;
@@ -914,6 +924,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) {
status.active += add_size;
+ supported_status |= DRM_GEM_OBJECT_ACTIVE;
/* If still active, don't count as purgeable: */
s &= ~DRM_GEM_OBJECT_PURGEABLE;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 37d2e0a4ef4b..8642a2fb25a9 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -150,6 +150,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
drm_connector_list_iter_begin(dev, &conn_iter);
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
+ /*
+ * FIXME: the connectors on the list may not be fully initialized yet,
+ * if the ioctl is called before the connectors are registered. (See
+ * drm_dev_register()->drm_modeset_register_all() for static and
+ * drm_connector_dynamic_register() for dynamic connectors.)
+ * The driver should only get registered after static connectors are
+ * fully initialized and dynamic connectors should be added to the
+ * connector list only after fully initializing them.
+ */
drm_for_each_connector_iter(connector, &conn_iter) {
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 71573b85d924..e72f855fc495 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1282,8 +1282,7 @@ EXPORT_SYMBOL(drm_mode_set_name);
* @mode: mode
*
* Returns:
- * @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
- * value first if it is not yet set.
+ * @modes's vrefresh rate in Hz, rounded to the nearest integer.
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 5c2abc9eca9c..5530919e0ba0 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -564,6 +564,8 @@ EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep);
* Gets parent DSI bus for a DSI device controlled through a bus other
* than MIPI-DCS (SPI, I2C, etc.) using the Device Tree.
*
+ * This function assumes that the device's port@0 is the DSI input.
+ *
* Returns pointer to mipi_dsi_host if successful, -EINVAL if the
* request is unsupported, -EPROBE_DEFER if the DSI host is found but
* not available, or -ENODEV otherwise.
@@ -576,7 +578,7 @@ struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
/*
* Get first endpoint child from device.
*/
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (!endpoint)
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 19ab0a794add..9940e96d35e3 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,6 +24,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -413,7 +414,7 @@ bool drm_is_panel_follower(struct device *dev)
* don't bother trying to parse it here. We just need to know if the
* property is there.
*/
- return of_property_read_bool(dev->of_node, "panel");
+ return of_property_present(dev->of_node, "panel");
}
EXPORT_SYMBOL(drm_is_panel_follower);
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
new file mode 100644
index 000000000000..c477d98ade2b
--- /dev/null
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_utils.h>
+
+struct drm_panel_min_backlight_quirk {
+ struct {
+ enum dmi_field field;
+ const char * const value;
+ } dmi_match;
+ struct drm_edid_ident ident;
+ u8 min_brightness;
+};
+
+static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = {
+ /* 13 inch matte panel */
+ {
+ .dmi_match.field = DMI_BOARD_VENDOR,
+ .dmi_match.value = "Framework",
+ .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca),
+ .ident.name = "NE135FBM-N41",
+ .min_brightness = 0,
+ },
+ /* 13 inch glossy panel */
+ {
+ .dmi_match.field = DMI_BOARD_VENDOR,
+ .dmi_match.value = "Framework",
+ .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f),
+ .ident.name = "NE135FBM-N41",
+ .min_brightness = 0,
+ },
+ /* 13 inch 2.8k panel */
+ {
+ .dmi_match.field = DMI_BOARD_VENDOR,
+ .dmi_match.value = "Framework",
+ .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4),
+ .ident.name = "NE135A1M-NY1",
+ .min_brightness = 0,
+ },
+};
+
+static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk,
+ const struct drm_edid *edid)
+{
+ if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ return false;
+
+ if (!drm_edid_match(edid, &quirk->ident))
+ return false;
+
+ return true;
+}
+
+/**
+ * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel.
+ * @edid: EDID of the panel to check
+ *
+ * This function checks for platform specific (e.g. DMI based) quirks
+ * providing info on the minimum backlight brightness for systems where this
+ * cannot be probed correctly from the hard-/firm-ware.
+ *
+ * Returns:
+ * A negative error value or
+ * an override value in the range [0, 255] representing 0-100% to be scaled to
+ * the drivers target range.
+ */
+int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid)
+{
+ const struct drm_panel_min_backlight_quirk *quirk;
+ size_t i;
+
+ if (!IS_ENABLED(CONFIG_DMI))
+ return -ENODATA;
+
+ if (!edid)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) {
+ quirk = &drm_panel_min_backlight_quirks[i];
+
+ if (drm_panel_min_backlight_quirk_matches(quirk, edid))
+ return quirk->min_brightness;
+ }
+
+ return -ENODATA;
+}
+EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk);
+
+MODULE_DESCRIPTION("Quirks for panel backlight overrides");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 0a9ecc1380d2..f128d345b16d 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -31,6 +31,7 @@
#include <drm/drm_rect.h>
#include "drm_crtc_internal.h"
+#include "drm_draw_internal.h"
MODULE_AUTHOR("Jocelyn Falempe");
MODULE_DESCRIPTION("DRM panic handler");
@@ -139,181 +140,8 @@ device_initcall(drm_panic_setup_logo);
#endif
/*
- * Color conversion
+ * Blit & Fill functions
*/
-
-static u16 convert_xrgb8888_to_rgb565(u32 pix)
-{
- return ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
-{
- return ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
-}
-
-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
-{
- return ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_argb1555(u32 pix)
-{
- return BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u32 convert_xrgb8888_to_argb8888(u32 pix)
-{
- return pix | GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
-}
-
-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
-{
- pix = ((pix & 0x00FF0000) >> 14) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x000000FF) << 22);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-/*
- * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
- * @color: input color, in xrgb8888 format
- * @format: output format
- *
- * Returns:
- * Color in the format specified, casted to u32.
- * Or 0 if the format is not supported.
- */
-static u32 convert_from_xrgb8888(u32 color, u32 format)
-{
- switch (format) {
- case DRM_FORMAT_RGB565:
- return convert_xrgb8888_to_rgb565(color);
- case DRM_FORMAT_RGBA5551:
- return convert_xrgb8888_to_rgba5551(color);
- case DRM_FORMAT_XRGB1555:
- return convert_xrgb8888_to_xrgb1555(color);
- case DRM_FORMAT_ARGB1555:
- return convert_xrgb8888_to_argb1555(color);
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_XRGB8888:
- return color;
- case DRM_FORMAT_ARGB8888:
- return convert_xrgb8888_to_argb8888(color);
- case DRM_FORMAT_XBGR8888:
- return convert_xrgb8888_to_xbgr8888(color);
- case DRM_FORMAT_ABGR8888:
- return convert_xrgb8888_to_abgr8888(color);
- case DRM_FORMAT_XRGB2101010:
- return convert_xrgb8888_to_xrgb2101010(color);
- case DRM_FORMAT_ARGB2101010:
- return convert_xrgb8888_to_argb2101010(color);
- case DRM_FORMAT_ABGR2101010:
- return convert_xrgb8888_to_abgr2101010(color);
- default:
- WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
- return 0;
- }
-}
-
-/*
- * Blit & Fill
- */
-/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
-static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
-{
- return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
-}
-
-static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
- const u8 *sbuf8, unsigned int spitch,
- unsigned int height, unsigned int width,
- unsigned int scale, u16 fg16)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
-}
-
-static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
- const u8 *sbuf8, unsigned int spitch,
- unsigned int height, unsigned int width,
- unsigned int scale, u32 fg32)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- u32 off = y * dpitch + x * 3;
-
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
- /* write blue-green-red to output in little endianness */
- iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
- iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
- iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
- }
- }
- }
-}
-
-static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
- const u8 *sbuf8, unsigned int spitch,
- unsigned int height, unsigned int width,
- unsigned int scale, u32 fg32)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
-}
-
static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect *clip,
const u8 *sbuf8, unsigned int spitch, unsigned int scale,
u32 fg_color)
@@ -322,7 +150,7 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
for (y = 0; y < drm_rect_height(clip); y++)
for (x = 0; x < drm_rect_width(clip); x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
@@ -354,62 +182,22 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
- drm_panic_blit16(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ drm_draw_blit16(&map, sb->pitch[0], sbuf8, spitch,
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 3:
- drm_panic_blit24(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ drm_draw_blit24(&map, sb->pitch[0], sbuf8, spitch,
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 4:
- drm_panic_blit32(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ drm_draw_blit32(&map, sb->pitch[0], sbuf8, spitch,
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
default:
WARN_ONCE(1, "Can't blit with pixel width %d\n", sb->format->cpp[0]);
}
}
-static void drm_panic_fill16(struct iosys_map *dmap, unsigned int dpitch,
- unsigned int height, unsigned int width,
- u16 color)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
-}
-
-static void drm_panic_fill24(struct iosys_map *dmap, unsigned int dpitch,
- unsigned int height, unsigned int width,
- u32 color)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- unsigned int off = y * dpitch + x * 3;
-
- /* write blue-green-red to output in little endianness */
- iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
- iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
- iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
- }
- }
-}
-
-static void drm_panic_fill32(struct iosys_map *dmap, unsigned int dpitch,
- unsigned int height, unsigned int width,
- u32 color)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
-}
-
static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
struct drm_rect *clip,
u32 color)
@@ -442,27 +230,22 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
- drm_panic_fill16(&map, sb->pitch[0], drm_rect_height(clip),
- drm_rect_width(clip), color);
+ drm_draw_fill16(&map, sb->pitch[0], drm_rect_height(clip),
+ drm_rect_width(clip), color);
break;
case 3:
- drm_panic_fill24(&map, sb->pitch[0], drm_rect_height(clip),
- drm_rect_width(clip), color);
+ drm_draw_fill24(&map, sb->pitch[0], drm_rect_height(clip),
+ drm_rect_width(clip), color);
break;
case 4:
- drm_panic_fill32(&map, sb->pitch[0], drm_rect_height(clip),
- drm_rect_width(clip), color);
+ drm_draw_fill32(&map, sb->pitch[0], drm_rect_height(clip),
+ drm_rect_width(clip), color);
break;
default:
WARN_ONCE(1, "Can't fill with pixel width %d\n", sb->format->cpp[0]);
}
}
-static const u8 *get_char_bitmap(const struct font_desc *font, char c, size_t font_pitch)
-{
- return font->data + (c * font->height) * font_pitch;
-}
-
static unsigned int get_max_line_len(const struct drm_panic_line *lines, int len)
{
int i;
@@ -501,7 +284,7 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
rec.x1 += (drm_rect_width(clip) - (line_len * font->width)) / 2;
for (j = 0; j < line_len; j++) {
- src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
+ src = drm_draw_get_char_bitmap(font, msg[i].txt[j], font_pitch);
rec.x2 = rec.x1 + font->width;
drm_panic_blit(sb, &rec, src, font_pitch, 1, color);
rec.x1 += font->width;
@@ -533,8 +316,10 @@ static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *
static void draw_panic_static_user(struct drm_scanout_buffer *sb)
{
- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
+ sb->format->format);
+ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
+ sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg;
unsigned int msg_width, msg_height;
@@ -600,8 +385,10 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
*/
static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
{
- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
+ sb->format->format);
+ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
+ sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
struct kmsg_dump_iter iter;
@@ -791,8 +578,10 @@ static int drm_panic_get_qr_code(u8 **qr_image)
*/
static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
{
- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
+ sb->format->format);
+ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
+ sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg, r_qr, r_qr_canvas;
unsigned int max_qr_size, scale;
@@ -878,7 +667,7 @@ static bool drm_panic_is_format_supported(const struct drm_format_info *format)
{
if (format->num_planes != 1)
return false;
- return convert_from_xrgb8888(0xffffff, format->format) != 0;
+ return drm_draw_color_from_xrgb8888(0xffffff, format->format) != 0;
}
static void draw_panic_dispatch(struct drm_scanout_buffer *sb)
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index ef2d490965ba..bcf248f69252 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -931,7 +931,7 @@ impl QrImage<'_> {
/// They must remain valid for the duration of the function call.
#[no_mangle]
pub unsafe extern "C" fn drm_panic_qr_generate(
- url: *const i8,
+ url: *const kernel::ffi::c_char,
data: *mut u8,
data_len: usize,
data_size: usize,
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 08cfea04e22b..79517bd4418f 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -390,3 +390,26 @@ void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset)
}
}
EXPORT_SYMBOL(drm_print_regset32);
+
+/**
+ * drm_print_hex_dump - print a hex dump to a &drm_printer stream
+ * @p: The &drm_printer
+ * @prefix: Prefix for each line, may be NULL for no prefix
+ * @buf: Buffer to dump
+ * @len: Length of buffer
+ *
+ * Print hex dump to &drm_printer, with 16 space-separated hex bytes per line,
+ * optionally with a prefix on each line. No separator is added after prefix.
+ */
+void drm_print_hex_dump(struct drm_printer *p, const char *prefix,
+ const u8 *buf, size_t len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 16) {
+ int bytes_per_line = min(16, len - i);
+
+ drm_printf(p, "%s%*ph\n", prefix ?: "", bytes_per_line, buf + i);
+ }
+}
+EXPORT_SYMBOL(drm_print_hex_dump);
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 1752ffb44e1d..9cc71120246f 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -277,7 +277,7 @@ int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
INIT_LIST_HEAD(&vblank->pending_work);
init_waitqueue_head(&vblank->work_wait_queue);
- worker = kthread_create_worker(0, "card%d-crtc%d",
+ worker = kthread_run_worker(0, "card%d-crtc%d",
vblank->dev->primary->index,
vblank->pipe);
if (IS_ERR(worker))
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 7aa5f14d0c87..3a221923f15d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -9,7 +9,6 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_perfmon.h"
#define SUBALLOC_SIZE SZ_512K
#define SUBALLOC_GRANULE SZ_4K
@@ -100,7 +99,7 @@ retry:
mutex_unlock(&suballoc->lock);
ret = wait_event_interruptible_timeout(suballoc->free_event,
suballoc->free_space,
- msecs_to_jiffies(10 * 1000));
+ secs_to_jiffies(10));
if (!ret) {
dev_err(suballoc->dev,
"Timeout waiting for cmdbuf space\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index a46f9e4ac09a..3e91747ed339 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -488,7 +488,16 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_FOPS(fops);
+static void etnaviv_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ drm_show_memory_stats(p, file);
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ DRM_GEM_FOPS,
+ .show_fdinfo = drm_show_fdinfo,
+};
static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
@@ -498,12 +507,12 @@ static const struct drm_driver etnaviv_drm_driver = {
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
#endif
+ .show_fdinfo = etnaviv_show_fdinfo,
.ioctls = etnaviv_ioctls,
.num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
.fops = &fops,
.name = "etnaviv",
.desc = "etnaviv DRM",
- .date = "20151214",
.major = 1,
.minor = 4,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 16473c371444..2f844e82bc46 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -342,6 +342,7 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj)
static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
{
struct page **pages;
+ pgprot_t prot;
lockdep_assert_held(&obj->lock);
@@ -349,8 +350,19 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
if (IS_ERR(pages))
return NULL;
- return vmap(pages, obj->base.size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ switch (obj->flags & ETNA_BO_CACHE_MASK) {
+ case ETNA_BO_CACHED:
+ prot = PAGE_KERNEL;
+ break;
+ case ETNA_BO_UNCACHED:
+ prot = pgprot_noncached(PAGE_KERNEL);
+ break;
+ case ETNA_BO_WC:
+ default:
+ prot = pgprot_writecombine(PAGE_KERNEL);
+ }
+
+ return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot);
}
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
@@ -528,6 +540,17 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_unlock(&priv->gem_lock);
}
+static enum drm_gem_object_status etnaviv_gem_status(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (etnaviv_obj->pages)
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ return status;
+}
+
static const struct vm_operations_struct vm_ops = {
.fault = etnaviv_gem_fault,
.open = drm_gem_vm_open,
@@ -541,6 +564,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.get_sg_table = etnaviv_gem_prime_get_sg_table,
.vmap = etnaviv_gem_prime_vmap,
.mmap = etnaviv_gem_mmap,
+ .status = etnaviv_gem_status,
.vm_ops = &vm_ops,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 687555aae807..e5ee82a0674c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -44,9 +44,7 @@ struct etnaviv_gem_object {
u32 flags;
struct list_head gem_node;
- struct etnaviv_gpu *gpu; /* non-null if active */
atomic_t gpu_active;
- u32 access;
struct page **pages;
struct sg_table *sgt;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 2d4c112ce033..cf0d9049bcf1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h"
@@ -172,6 +173,29 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
return 0;
}
+static int etnaviv_gpu_reset_deassert(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ /*
+ * 32 core clock cycles (slowest clock) required before deassertion
+ * 1 microsecond might match all implementations without computation
+ */
+ usleep_range(1, 2);
+
+ ret = reset_control_deassert(gpu->rst);
+ if (ret)
+ return ret;
+
+ /*
+ * 128 core clock cycles (slowest clock) required before any activity on AHB
+ * 1 microsecond might match all implementations without computation
+ */
+ usleep_range(1, 2);
+
+ return 0;
+}
+
static inline bool etnaviv_is_model_rev(struct etnaviv_gpu *gpu, u32 model, u32 revision)
{
return gpu->identity.model == model &&
@@ -799,6 +823,12 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto pm_put;
}
+ ret = etnaviv_gpu_reset_deassert(gpu);
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset deassert failed\n");
+ goto fail;
+ }
+
etnaviv_hw_identify(gpu);
if (gpu->identity.model == 0) {
@@ -1860,6 +1890,17 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
if (IS_ERR(gpu->mmio))
return PTR_ERR(gpu->mmio);
+
+ /* Get Reset: */
+ gpu->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(gpu->rst))
+ return dev_err_probe(dev, PTR_ERR(gpu->rst),
+ "failed to get reset\n");
+
+ err = reset_control_assert(gpu->rst);
+ if (err)
+ return dev_err_probe(dev, err, "failed to assert reset\n");
+
/* Get Interrupt: */
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 4d8a7d48ade3..5cb46c84e03a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -93,6 +93,7 @@ struct etnaviv_event {
struct etnaviv_cmdbuf_suballoc;
struct regulator;
struct clk;
+struct reset_control;
#define ETNA_NR_EVENTS 30
@@ -158,6 +159,7 @@ struct etnaviv_gpu {
struct clk *clk_reg;
struct clk *clk_core;
struct clk *clk_shader;
+ struct reset_control *rst;
unsigned int freq_scale;
unsigned int fe_waitcycles;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 7e065b3723cf..df5192083b20 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -19,12 +19,6 @@ static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
size_t unmapped_page, unmapped = 0;
size_t pgsize = SZ_4K;
- if (!IS_ALIGNED(iova | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
- iova, size, pgsize);
- return;
- }
-
while (unmapped < size) {
unmapped_page = context->global->ops->unmap(context, iova,
pgsize);
@@ -45,12 +39,6 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
size_t orig_size = size;
int ret = 0;
- if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
- iova, &paddr, size, pgsize);
- return -EINVAL;
- }
-
while (size) {
ret = context->global->ops->map(context, iova, paddr, pgsize,
prot);
@@ -82,11 +70,19 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
return -EINVAL;
for_each_sgtable_dma_sg(sgt, sg, i) {
- phys_addr_t pa = sg_dma_address(sg) - sg->offset;
- unsigned int da_len = sg_dma_len(sg) + sg->offset;
+ phys_addr_t pa = sg_dma_address(sg);
+ unsigned int da_len = sg_dma_len(sg);
unsigned int bytes = min_t(unsigned int, da_len, va_len);
- VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);
+ VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes);
+
+ if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) {
+ dev_err(context->global->dev,
+ "unaligned: iova 0x%x pa %pa size 0x%x\n",
+ iova, &pa, bytes);
+ ret = -EINVAL;
+ goto fail;
+ }
ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1c44f85c5f54..f313ae7bc3a3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -13,9 +13,9 @@
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
@@ -35,7 +35,6 @@
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
-#define DRIVER_DATE "20180330"
/*
* Interface history:
@@ -118,7 +117,6 @@ static const struct drm_driver exynos_drm_driver = {
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 466a9e514aa1..176fd8871759 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1648,7 +1648,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_connector *connector = &hdata->connector;
+ mutex_lock(&connector->eld_mutex);
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
return 0;
}
@@ -1658,7 +1660,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.audio_shutdown = hdmi_audio_shutdown,
.mute_stream = hdmi_audio_mute,
.get_eld = hdmi_audio_get_eld,
- .no_capture_mute = 1,
};
static int hdmi_register_audio_device(struct hdmi_context *hdata)
@@ -1667,6 +1668,7 @@ static int hdmi_register_audio_device(struct hdmi_context *hdata)
.ops = &audio_codec_ops,
.max_i2s_channels = 6,
.i2s = 1,
+ .no_capture_mute = 1,
};
hdata->audio.pdev = platform_device_register_data(
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index be1ab673e49e..03b076db9381 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -18,8 +18,8 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -174,7 +174,6 @@ static const struct drm_driver fsl_dcu_drm_driver = {
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
- .date = "20160425",
.major = 1,
.minor = 1,
};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 2c2b92324a2e..c418e8496bdf 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -6,6 +6,7 @@
*/
#include <linux/backlight.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index c419ebbc49ec..85d3557c2eb9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -19,8 +19,8 @@
#include <acpi/video.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -513,7 +513,6 @@ static const struct drm_driver driver = {
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index de62cbfcdc72..7f77cb2b2751 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -26,7 +26,6 @@
#define DRIVER_NAME "gma500"
#define DRIVER_DESC "DRM driver for the Intel GMA500, GMA600, GMA3600, GMA3650"
-#define DRIVER_DATE "20140314"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index 09ccdc1dc1a2..cb405771d6e2 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -13,9 +13,9 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -381,7 +381,6 @@ static const struct drm_driver gud_drm_driver = {
.name = "gud",
.desc = "Generic USB Display",
- .date = "20200422",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 80253d39664a..93b8d32e3be1 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
- depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on DRM && PCI
depends on MMU
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index d25c75e60d3d..95a4ed599d98 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \
+ dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
new file mode 100644
index 000000000000..0a903cce1fa9
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/minmax.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_reg.h"
+
+#define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4)
+#define HIBMC_AUX_CMD_ADDR GENMASK(27, 8)
+#define HIBMC_AUX_CMD_I2C_ADDR_ONLY BIT(28)
+#define HIBMC_BYTES_IN_U32 4
+#define HIBMC_AUX_I2C_WRITE_SUCCESS 0x1
+#define HIBMC_DP_MIN_PULSE_NUM 0x9
+#define BITS_IN_U8 8
+
+static inline void hibmc_dp_aux_reset(struct hibmc_dp_dev *dp)
+{
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_DPTX_RST_CTRL, HIBMC_DP_CFG_AUX_RST_N, 0x0);
+ usleep_range(10, 15);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_DPTX_RST_CTRL, HIBMC_DP_CFG_AUX_RST_N, 0x1);
+}
+
+static void hibmc_dp_aux_read_data(struct hibmc_dp_dev *dp, u8 *buf, u8 size)
+{
+ u32 reg_num;
+ u32 value;
+ u32 num;
+ u8 i, j;
+
+ reg_num = DIV_ROUND_UP(size, HIBMC_BYTES_IN_U32);
+ for (i = 0; i < reg_num; i++) {
+ /* number of bytes read from a single register */
+ num = min(size - i * HIBMC_BYTES_IN_U32, HIBMC_BYTES_IN_U32);
+ value = readl(dp->base + HIBMC_DP_AUX_RD_DATA0 + i * HIBMC_BYTES_IN_U32);
+ /* convert the 32-bit value of the register to the buffer. */
+ for (j = 0; j < num; j++)
+ buf[i * HIBMC_BYTES_IN_U32 + j] = value >> (j * BITS_IN_U8);
+ }
+}
+
+static void hibmc_dp_aux_write_data(struct hibmc_dp_dev *dp, u8 *buf, u8 size)
+{
+ u32 reg_num;
+ u32 value;
+ u32 num;
+ u8 i, j;
+
+ reg_num = DIV_ROUND_UP(size, HIBMC_BYTES_IN_U32);
+ for (i = 0; i < reg_num; i++) {
+ /* number of bytes written to a single register */
+ num = min_t(u8, size - i * HIBMC_BYTES_IN_U32, HIBMC_BYTES_IN_U32);
+ value = 0;
+ /* obtain the 32-bit value written to a single register. */
+ for (j = 0; j < num; j++)
+ value |= buf[i * HIBMC_BYTES_IN_U32 + j] << (j * BITS_IN_U8);
+ /* writing data to a single register */
+ writel(value, dp->base + HIBMC_DP_AUX_WR_DATA0 + i * HIBMC_BYTES_IN_U32);
+ }
+}
+
+static u32 hibmc_dp_aux_build_cmd(const struct drm_dp_aux_msg *msg)
+{
+ u32 aux_cmd = msg->request;
+
+ if (msg->size)
+ aux_cmd |= FIELD_PREP(HIBMC_AUX_CMD_REQ_LEN, (msg->size - 1));
+ else
+ aux_cmd |= FIELD_PREP(HIBMC_AUX_CMD_I2C_ADDR_ONLY, 1);
+
+ aux_cmd |= FIELD_PREP(HIBMC_AUX_CMD_ADDR, msg->address);
+
+ return aux_cmd;
+}
+
+/* ret >= 0, ret is size; ret < 0, ret is err code */
+static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_msg *msg)
+{
+ u32 buf_data_cnt;
+ u32 aux_status;
+
+ aux_status = readl(dp->base + HIBMC_DP_AUX_STATUS);
+ msg->reply = FIELD_GET(HIBMC_DP_CFG_AUX_STATUS, aux_status);
+
+ if (aux_status & HIBMC_DP_CFG_AUX_TIMEOUT)
+ return -ETIMEDOUT;
+
+ /* only address */
+ if (!msg->size)
+ return 0;
+
+ if (msg->reply != DP_AUX_NATIVE_REPLY_ACK)
+ return -EIO;
+
+ buf_data_cnt = FIELD_GET(HIBMC_DP_CFG_AUX_READY_DATA_BYTE, aux_status);
+
+ switch (msg->request) {
+ case DP_AUX_NATIVE_WRITE:
+ return msg->size;
+ case DP_AUX_I2C_WRITE | DP_AUX_I2C_MOT:
+ if (buf_data_cnt == HIBMC_AUX_I2C_WRITE_SUCCESS)
+ return msg->size;
+ else
+ return FIELD_GET(HIBMC_DP_CFG_AUX, aux_status);
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ | DP_AUX_I2C_MOT:
+ buf_data_cnt--;
+ if (buf_data_cnt != msg->size) {
+ /* only the successful part of data is read */
+ return -EBUSY;
+ }
+
+ /* all data is successfully read */
+ hibmc_dp_aux_read_data(dp, msg->buffer, msg->size);
+ return msg->size;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* ret >= 0 ,ret is size; ret < 0, ret is err code */
+static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux);
+ u32 aux_cmd;
+ int ret;
+ u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */
+
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA0);
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA1);
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA2);
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA3);
+
+ hibmc_dp_aux_write_data(dp, msg->buffer, msg->size);
+
+ aux_cmd = hibmc_dp_aux_build_cmd(msg);
+ writel(aux_cmd, dp->base + HIBMC_DP_AUX_CMD_ADDR);
+
+ /* enable aux transfer */
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_REQ, 0x1);
+ ret = readl_poll_timeout(dp->base + HIBMC_DP_AUX_REQ, val,
+ !(val & HIBMC_DP_CFG_AUX_REQ), 50, 5000);
+ if (ret) {
+ hibmc_dp_aux_reset(dp);
+ return ret;
+ }
+
+ return hibmc_dp_aux_parse_xfer(dp, msg);
+}
+
+void hibmc_dp_aux_init(struct hibmc_dp_dev *dp)
+{
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
+ HIBMC_DP_MIN_PULSE_NUM);
+
+ dp->aux.transfer = hibmc_dp_aux_xfer;
+ dp->aux.is_remote = 0;
+ drm_dp_aux_init(&dp->aux);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
new file mode 100644
index 000000000000..2c52a4476c4d
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_COMM_H
+#define DP_COMM_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <drm/display/drm_dp_helper.h>
+
+#define HIBMC_DP_LANE_NUM_MAX 2
+
+struct hibmc_link_status {
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+struct hibmc_link_cap {
+ u8 link_rate;
+ u8 lanes;
+};
+
+struct hibmc_dp_link {
+ struct hibmc_link_status status;
+ u8 train_set[HIBMC_DP_LANE_NUM_MAX];
+ struct hibmc_link_cap cap;
+};
+
+struct hibmc_dp_dev {
+ struct drm_dp_aux aux;
+ struct drm_device *dev;
+ void __iomem *base;
+ struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */
+ struct hibmc_dp_link link;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+};
+
+#define dp_field_modify(reg_value, mask, val) \
+ do { \
+ (reg_value) &= ~(mask); \
+ (reg_value) |= FIELD_PREP(mask, val); \
+ } while (0) \
+
+#define hibmc_dp_reg_write_field(dp, offset, mask, val) \
+ do { \
+ typeof(dp) _dp = dp; \
+ typeof(_dp->base) addr = (_dp->base + (offset)); \
+ mutex_lock(&_dp->lock); \
+ u32 reg_value = readl(addr); \
+ dp_field_modify(reg_value, mask, val); \
+ writel(reg_value, addr); \
+ mutex_unlock(&_dp->lock); \
+ } while (0)
+
+void hibmc_dp_aux_init(struct hibmc_dp_dev *dp);
+int hibmc_dp_link_training(struct hibmc_dp_dev *dp);
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
new file mode 100644
index 000000000000..74dd9956144e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_CONFIG_H
+#define DP_CONFIG_H
+
+#define HIBMC_DP_BPP 24
+#define HIBMC_DP_SYMBOL_PER_FCLK 4
+#define HIBMC_DP_MSA1 0x20
+#define HIBMC_DP_MSA2 0x845c00
+#define HIBMC_DP_OFFSET 0x1e0000
+#define HIBMC_DP_HDCP 0x2
+#define HIBMC_DP_INT_RST 0xffff
+#define HIBMC_DP_DPTX_RST 0x3ff
+#define HIBMC_DP_CLK_EN 0x7
+#define HIBMC_DP_SYNC_EN_MASK 0x3
+#define HIBMC_DP_LINK_RATE_CAL 27
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
new file mode 100644
index 000000000000..a8d543881c09
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "dp_config.h"
+#include "dp_comm.h"
+#include "dp_reg.h"
+#include "dp_hw.h"
+
+static void hibmc_dp_set_tu(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
+{
+ u32 tu_symbol_frac_size;
+ u32 tu_symbol_size;
+ u32 rate_ks;
+ u8 lane_num;
+ u32 value;
+ u32 bpp;
+
+ lane_num = dp->link.cap.lanes;
+ if (lane_num == 0) {
+ drm_err(dp->dev, "set tu failed, lane num cannot be 0!\n");
+ return;
+ }
+
+ bpp = HIBMC_DP_BPP;
+ rate_ks = dp->link.cap.link_rate * HIBMC_DP_LINK_RATE_CAL;
+ value = (mode->clock * bpp * 5) / (61 * lane_num * rate_ks);
+
+ if (value % 10 == 9) { /* 9 carry */
+ tu_symbol_size = value / 10 + 1;
+ tu_symbol_frac_size = 0;
+ } else {
+ tu_symbol_size = value / 10;
+ tu_symbol_frac_size = value % 10 + 1;
+ }
+
+ drm_dbg_dp(dp->dev, "tu value: %u.%u value: %u\n",
+ tu_symbol_size, tu_symbol_frac_size, value);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE, tu_symbol_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE, tu_symbol_frac_size);
+}
+
+static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
+{
+ u32 hblank_size;
+ u32 htotal_size;
+ u32 htotal_int;
+ u32 hblank_int;
+ u32 fclk; /* flink_clock */
+
+ fclk = dp->link.cap.link_rate * HIBMC_DP_LINK_RATE_CAL;
+
+ /* Considering the effect of spread spectrum, the value may be deviated.
+ * The coefficient (0.9947) is used to offset the deviation.
+ */
+ htotal_int = mode->htotal * 9947 / 10000;
+ htotal_size = htotal_int * fclk / (HIBMC_DP_SYMBOL_PER_FCLK * (mode->clock / 1000));
+
+ hblank_int = mode->htotal - mode->hdisplay - mode->hdisplay * 53 / 10000;
+ hblank_size = hblank_int * fclk * 9947 /
+ (mode->clock * 10 * HIBMC_DP_SYMBOL_PER_FCLK);
+
+ drm_dbg_dp(dp->dev, "h_active %u v_active %u htotal_size %u hblank_size %u",
+ mode->hdisplay, mode->vdisplay, htotal_size, hblank_size);
+ drm_dbg_dp(dp->dev, "flink_clock %u pixel_clock %d", fclk, mode->clock / 1000);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
+ HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
+ HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size);
+}
+
+static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
+{
+ u32 timing_delay;
+ u32 vblank;
+ u32 hstart;
+ u32 vstart;
+
+ vblank = mode->vtotal - mode->vdisplay;
+ timing_delay = mode->htotal - mode->hsync_start;
+ hstart = mode->htotal - mode->hsync_start;
+ vstart = mode->vtotal - mode->vsync_start;
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG0,
+ HIBMC_DP_CFG_TIMING_GEN0_HBLANK, mode->htotal - mode->hdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG0,
+ HIBMC_DP_CFG_TIMING_GEN0_HACTIVE, mode->hdisplay);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG2,
+ HIBMC_DP_CFG_TIMING_GEN0_VBLANK, vblank);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG2,
+ HIBMC_DP_CFG_TIMING_GEN0_VACTIVE, mode->vdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG3,
+ HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH,
+ mode->vsync_start - mode->vdisplay);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG0,
+ HIBMC_DP_CFG_STREAM_HACTIVE, mode->hdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG0,
+ HIBMC_DP_CFG_STREAM_HBLANK, mode->htotal - mode->hdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG2,
+ HIBMC_DP_CFG_STREAM_HSYNC_WIDTH,
+ mode->hsync_end - mode->hsync_start);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG1,
+ HIBMC_DP_CFG_STREAM_VACTIVE, mode->vdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG1,
+ HIBMC_DP_CFG_STREAM_VBLANK, vblank);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG3,
+ HIBMC_DP_CFG_STREAM_VFRONT_PORCH,
+ mode->vsync_start - mode->vdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG3,
+ HIBMC_DP_CFG_STREAM_VSYNC_WIDTH,
+ mode->vsync_end - mode->vsync_start);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_MSA0,
+ HIBMC_DP_CFG_STREAM_VSTART, vstart);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_MSA0,
+ HIBMC_DP_CFG_STREAM_HSTART, hstart);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_VSYNC_POLARITY,
+ mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 : 0);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_HSYNC_POLARITY,
+ mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 : 0);
+
+ /* MSA mic 0 and 1 */
+ writel(HIBMC_DP_MSA1, dp->base + HIBMC_DP_VIDEO_MSA1);
+ writel(HIBMC_DP_MSA2, dp->base + HIBMC_DP_VIDEO_MSA2);
+
+ hibmc_dp_set_tu(dp, mode);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_RGB_ENABLE, 0x1);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_VIDEO_MAPPING, 0);
+
+ /* divide 2: up even */
+ if (timing_delay % 2)
+ timing_delay++;
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_MODEL_CTRL,
+ HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1, timing_delay);
+
+ hibmc_dp_set_sst(dp, mode);
+}
+
+int hibmc_dp_hw_init(struct hibmc_dp *dp)
+{
+ struct drm_device *drm_dev = dp->drm_dev;
+ struct hibmc_dp_dev *dp_dev;
+
+ dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL);
+ if (!dp_dev)
+ return -ENOMEM;
+
+ mutex_init(&dp_dev->lock);
+
+ dp->dp_dev = dp_dev;
+
+ dp_dev->dev = drm_dev;
+ dp_dev->base = dp->mmio + HIBMC_DP_OFFSET;
+
+ hibmc_dp_aux_init(dp_dev);
+
+ dp_dev->link.cap.lanes = 0x2;
+ dp_dev->link.cap.link_rate = DP_LINK_BW_2_7;
+
+ /* hdcp data */
+ writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
+ /* int init */
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+ /* rst */
+ writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL);
+ /* clock enable */
+ writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL);
+
+ return 0;
+}
+
+void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ if (enable) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_VIDEO_CTRL, BIT(0), 0x1);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_DPTX_GCTL0, BIT(10), 0x1);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ } else {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_DPTX_GCTL0, BIT(10), 0);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_VIDEO_CTRL, BIT(0), 0);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ }
+
+ msleep(50);
+}
+
+int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+ int ret;
+
+ if (!dp_dev->link.status.channel_equalized) {
+ ret = hibmc_dp_link_training(dp_dev);
+ if (ret) {
+ drm_err(dp->drm_dev, "dp link training failed, ret: %d\n", ret);
+ return ret;
+ }
+ }
+
+ hibmc_dp_display_en(dp, false);
+ hibmc_dp_link_cfg(dp_dev, mode);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
new file mode 100644
index 000000000000..4dc13b3d9875
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_KAPI_H
+#define DP_KAPI_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_print.h>
+
+struct hibmc_dp_dev;
+
+struct hibmc_dp {
+ struct hibmc_dp_dev *dp_dev;
+ struct drm_device *drm_dev;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ void __iomem *mmio;
+};
+
+int hibmc_dp_hw_init(struct hibmc_dp *dp);
+int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode);
+void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
new file mode 100644
index 000000000000..f6355c16cc0a
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_reg.h"
+
+#define HIBMC_EQ_MAX_RETRY 5
+
+static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
+{
+ u8 buf[2];
+ int ret;
+
+ /* DP 2 lane */
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_LANE_DATA_EN,
+ dp->link.cap.lanes == 0x2 ? 0x3 : 0x1);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_DPTX_GCTL0, HIBMC_DP_CFG_PHY_LANE_NUM,
+ dp->link.cap.lanes == 0x2 ? 0x1 : 0);
+
+ /* enhanced frame */
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_FRAME_MODE, 0x1);
+
+ /* set rate and lane count */
+ buf[0] = dp->link.cap.link_rate;
+ buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes;
+ ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ /* set 8b/10b and downspread */
+ buf[0] = DP_SPREAD_AMP_0_5;
+ buf[1] = DP_SET_ANSI_8B10B;
+ ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
+ if (ret)
+ drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
+
+ return ret;
+}
+
+static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
+{
+ int ret;
+ u8 val;
+ u8 buf;
+
+ buf = (u8)pattern;
+ if (pattern != DP_TRAINING_PATTERN_DISABLE && pattern != DP_TRAINING_PATTERN_4) {
+ buf |= DP_LINK_SCRAMBLING_DISABLE;
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_SCRAMBLE_EN, 0x1);
+ } else {
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_SCRAMBLE_EN, 0);
+ }
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ val = 0;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ val = 1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ val = 2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ val = 3;
+ break;
+ case DP_TRAINING_PATTERN_4:
+ val = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val);
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n");
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp)
+{
+ u8 *train_set = dp->link.train_set;
+ int ret;
+ u8 i;
+
+ ret = hibmc_dp_link_training_configure(dp);
+ if (ret)
+ return ret;
+
+ ret = hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dp->link.cap.lanes; i++)
+ train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
+ if (ret != dp->link.cap.lanes) {
+ drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n");
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp,
+ u8 lane_status[DP_LINK_STATUS_SIZE])
+{
+ u8 train_set[HIBMC_DP_LANE_NUM_MAX] = {0};
+ u8 lane;
+
+ for (lane = 0; lane < dp->link.cap.lanes; lane++)
+ train_set[lane] = drm_dp_get_adjust_request_voltage(lane_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(lane_status, lane);
+
+ if (memcmp(dp->link.train_set, train_set, HIBMC_DP_LANE_NUM_MAX)) {
+ memcpy(dp->link.train_set, train_set, HIBMC_DP_LANE_NUM_MAX);
+ return true;
+ }
+
+ return false;
+}
+
+static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.link_rate) {
+ case DP_LINK_BW_2_7:
+ dp->link.cap.link_rate = DP_LINK_BW_1_62;
+ return 0;
+ case DP_LINK_BW_5_4:
+ dp->link.cap.link_rate = DP_LINK_BW_2_7;
+ return 0;
+ case DP_LINK_BW_8_1:
+ dp->link.cap.link_rate = DP_LINK_BW_5_4;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.lanes) {
+ case 0x2:
+ dp->link.cap.lanes--;
+ break;
+ case 0x1:
+ drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n");
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
+{
+ u8 lane_status[DP_LINK_STATUS_SIZE] = {0};
+ bool level_changed;
+ u32 voltage_tries;
+ u32 cr_tries;
+ int ret;
+
+ /*
+ * DP 1.4 spec define 10 for maxtries value, for pre DP 1.4 version set a limit of 80
+ * (4 voltage levels x 4 preemphasis levels x 5 identical voltage retries)
+ */
+
+ voltage_tries = 1;
+ for (cr_tries = 0; cr_tries < 80; cr_tries++) {
+ drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
+ if (ret != DP_LINK_STATUS_SIZE) {
+ drm_err(dp->dev, "Get lane status failed\n");
+ return ret;
+ }
+
+ if (drm_dp_clock_recovery_ok(lane_status, dp->link.cap.lanes)) {
+ drm_dbg_dp(dp->dev, "dp link training cr done\n");
+ dp->link.status.clock_recovered = true;
+ return 0;
+ }
+
+ if (voltage_tries == 5) {
+ drm_dbg_dp(dp->dev, "same voltage tries 5 times\n");
+ dp->link.status.clock_recovered = false;
+ return 0;
+ }
+
+ level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status);
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
+ dp->link.cap.lanes);
+ if (ret != dp->link.cap.lanes) {
+ drm_dbg_dp(dp->dev, "Update link training failed\n");
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ voltage_tries = level_changed ? 1 : voltage_tries + 1;
+ }
+
+ drm_err(dp->dev, "dp link training clock recovery 80 times failed\n");
+ dp->link.status.clock_recovered = false;
+
+ return 0;
+}
+
+static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
+{
+ u8 lane_status[DP_LINK_STATUS_SIZE] = {0};
+ u8 eq_tries;
+ int ret;
+
+ ret = hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_2);
+ if (ret)
+ return ret;
+
+ for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) {
+ drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
+ if (ret != DP_LINK_STATUS_SIZE) {
+ drm_err(dp->dev, "get lane status failed\n");
+ break;
+ }
+
+ if (!drm_dp_clock_recovery_ok(lane_status, dp->link.cap.lanes)) {
+ drm_dbg_dp(dp->dev, "clock recovery check failed\n");
+ drm_dbg_dp(dp->dev, "cannot continue channel equalization\n");
+ dp->link.status.clock_recovered = false;
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(lane_status, dp->link.cap.lanes)) {
+ dp->link.status.channel_equalized = true;
+ drm_dbg_dp(dp->dev, "dp link training eq done\n");
+ break;
+ }
+
+ hibmc_dp_link_get_adjust_train(dp, lane_status);
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+ dp->link.train_set, dp->link.cap.lanes);
+ if (ret != dp->link.cap.lanes) {
+ drm_dbg_dp(dp->dev, "Update link training failed\n");
+ ret = (ret >= 0) ? -EIO : ret;
+ break;
+ }
+ }
+
+ if (eq_tries == HIBMC_EQ_MAX_RETRY)
+ drm_err(dp->dev, "channel equalization failed %u times\n", eq_tries);
+
+ hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int hibmc_dp_link_downgrade_training_cr(struct hibmc_dp_dev *dp)
+{
+ if (hibmc_dp_link_reduce_rate(dp))
+ return hibmc_dp_link_reduce_lane(dp);
+
+ return 0;
+}
+
+static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp)
+{
+ if ((dp->link.status.clock_recovered && !dp->link.status.channel_equalized)) {
+ if (!hibmc_dp_link_reduce_lane(dp))
+ return 0;
+ }
+
+ return hibmc_dp_link_reduce_rate(dp);
+}
+
+int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
+{
+ struct hibmc_dp_link *link = &dp->link;
+ int ret;
+
+ while (true) {
+ ret = hibmc_dp_link_training_cr_pre(dp);
+ if (ret)
+ goto err;
+
+ ret = hibmc_dp_link_training_cr(dp);
+ if (ret)
+ goto err;
+
+ if (!link->status.clock_recovered) {
+ ret = hibmc_dp_link_downgrade_training_cr(dp);
+ if (ret)
+ goto err;
+ continue;
+ }
+
+ ret = hibmc_dp_link_training_channel_eq(dp);
+ if (ret)
+ goto err;
+
+ if (!link->status.channel_equalized) {
+ ret = hibmc_dp_link_downgrade_training_eq(dp);
+ if (ret)
+ goto err;
+ continue;
+ }
+
+ return 0;
+ }
+
+err:
+ hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
new file mode 100644
index 000000000000..4a515c726d52
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_REG_H
+#define DP_REG_H
+
+#define HIBMC_DP_AUX_CMD_ADDR 0x50
+#define HIBMC_DP_AUX_WR_DATA0 0x54
+#define HIBMC_DP_AUX_WR_DATA1 0x58
+#define HIBMC_DP_AUX_WR_DATA2 0x5c
+#define HIBMC_DP_AUX_WR_DATA3 0x60
+#define HIBMC_DP_AUX_RD_DATA0 0x64
+#define HIBMC_DP_AUX_REQ 0x74
+#define HIBMC_DP_AUX_STATUS 0x78
+#define HIBMC_DP_PHYIF_CTRL0 0xa0
+#define HIBMC_DP_VIDEO_CTRL 0x100
+#define HIBMC_DP_VIDEO_CONFIG0 0x104
+#define HIBMC_DP_VIDEO_CONFIG1 0x108
+#define HIBMC_DP_VIDEO_CONFIG2 0x10c
+#define HIBMC_DP_VIDEO_CONFIG3 0x110
+#define HIBMC_DP_VIDEO_PACKET 0x114
+#define HIBMC_DP_VIDEO_MSA0 0x118
+#define HIBMC_DP_VIDEO_MSA1 0x11c
+#define HIBMC_DP_VIDEO_MSA2 0x120
+#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
+#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
+#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
+#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
+#define HIBMC_DP_HDCP_CFG 0x600
+#define HIBMC_DP_DPTX_RST_CTRL 0x700
+#define HIBMC_DP_DPTX_CLK_CTRL 0x704
+#define HIBMC_DP_DPTX_GCTL0 0x708
+#define HIBMC_DP_INTR_ENABLE 0x720
+#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
+#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
+#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
+
+#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
+#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
+#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
+#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
+#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
+#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
+#define HIBMC_DP_CFG_AUX_REQ BIT(0)
+#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
+#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
+#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
+#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
+#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
+#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
+#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
+#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
+#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
+#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
+#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
+#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
+#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
new file mode 100644
index 000000000000..603d6b198a54
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/io.h>
+
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+
+#include "hibmc_drm_drv.h"
+#include "dp/dp_hw.h"
+
+static int hibmc_dp_connector_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, 1024, 768); // temporary implementation
+
+ return count;
+}
+
+static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = {
+ .get_modes = hibmc_dp_connector_get_modes,
+};
+
+static const struct drm_connector_funcs hibmc_dp_conn_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode)
+{
+ int ret;
+
+ hibmc_dp_display_en(dp, false);
+
+ ret = hibmc_dp_mode_set(dp, mode);
+ if (ret)
+ drm_err(dp->drm_dev, "hibmc dp mode set failed: %d\n", ret);
+
+ return ret;
+}
+
+static void hibmc_dp_encoder_enable(struct drm_encoder *drm_encoder,
+ struct drm_atomic_state *state)
+{
+ struct hibmc_dp *dp = container_of(drm_encoder, struct hibmc_dp, encoder);
+ struct drm_display_mode *mode = &drm_encoder->crtc->state->mode;
+
+ if (hibmc_dp_prepare(dp, mode))
+ return;
+
+ hibmc_dp_display_en(dp, true);
+}
+
+static void hibmc_dp_encoder_disable(struct drm_encoder *drm_encoder,
+ struct drm_atomic_state *state)
+{
+ struct hibmc_dp *dp = container_of(drm_encoder, struct hibmc_dp, encoder);
+
+ hibmc_dp_display_en(dp, false);
+}
+
+static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = {
+ .atomic_enable = hibmc_dp_encoder_enable,
+ .atomic_disable = hibmc_dp_encoder_disable,
+};
+
+int hibmc_dp_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = &priv->dev;
+ struct drm_crtc *crtc = &priv->crtc;
+ struct hibmc_dp *dp = &priv->dp;
+ struct drm_connector *connector = &dp->connector;
+ struct drm_encoder *encoder = &dp->encoder;
+ int ret;
+
+ dp->mmio = priv->mmio;
+ dp->drm_dev = dev;
+
+ ret = hibmc_dp_hw_init(&priv->dp);
+ if (ret) {
+ drm_err(dev, "hibmc dp hw init failed: %d\n", ret);
+ return ret;
+ }
+
+ hibmc_dp_display_en(&priv->dp, false);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret) {
+ drm_err(dev, "init dp encoder failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs);
+
+ ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ drm_err(dev, "init dp connector failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &hibmc_dp_conn_helper_funcs);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 8c488c98ac97..e6de6d5edf6b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -15,8 +15,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -28,6 +28,10 @@
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
+#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
+#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00
+#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff
+
DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_interrupt(int irq, void *arg)
@@ -57,7 +61,6 @@ static const struct drm_driver hibmc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hibmc_fops,
.name = "hibmc",
- .date = "20160828",
.desc = "hibmc drm driver",
.major = 1,
.minor = 0,
@@ -118,6 +121,14 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
+ /* if DP existed, init DP */
+ if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) &
+ HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) {
+ ret = hibmc_dp_init(priv);
+ if (ret)
+ drm_err(dev, "failed to init dp: %d\n", ret);
+ }
+
ret = hibmc_vdac_init(priv);
if (ret) {
drm_err(dev, "failed to init vdac: %d\n", ret);
@@ -328,6 +339,8 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
goto err_return;
}
+ pci_set_master(pdev);
+
ret = hibmc_load(dev);
if (ret) {
drm_err(dev, "failed to load hibmc: %d\n", ret);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 6b566f3aeecb..d982f1e4b958 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -20,9 +20,12 @@
#include <drm/drm_framebuffer.h>
-struct hibmc_connector {
- struct drm_connector base;
+#include "dp/dp_hw.h"
+struct hibmc_vdac {
+ struct drm_device *dev;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
struct i2c_adapter adapter;
struct i2c_algo_bit_data bit_data;
};
@@ -35,13 +38,13 @@ struct hibmc_drm_private {
struct drm_device dev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
- struct drm_encoder encoder;
- struct hibmc_connector connector;
+ struct hibmc_vdac vdac;
+ struct hibmc_dp dp;
};
-static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
+static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
{
- return container_of(connector, struct hibmc_connector, base);
+ return container_of(connector, struct hibmc_vdac, connector);
}
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
@@ -57,6 +60,8 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
-int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
+
+int hibmc_dp_init(struct hibmc_drm_private *priv);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index e6e48651c15c..99b3b77b5445 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -25,8 +25,8 @@
static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
{
- struct hibmc_connector *hibmc_connector = data;
- struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ struct hibmc_vdac *vdac = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if (value) {
@@ -45,8 +45,8 @@ static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
static int hibmc_get_i2c_signal(void *data, u32 mask)
{
- struct hibmc_connector *hibmc_connector = data;
- struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ struct hibmc_vdac *vdac = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if ((tmp_dir & mask) != mask) {
@@ -77,22 +77,21 @@ static int hibmc_ddc_getscl(void *data)
return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
}
-int hibmc_ddc_create(struct drm_device *drm_dev,
- struct hibmc_connector *connector)
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
{
- connector->adapter.owner = THIS_MODULE;
- snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
- connector->adapter.dev.parent = drm_dev->dev;
- i2c_set_adapdata(&connector->adapter, connector);
- connector->adapter.algo_data = &connector->bit_data;
-
- connector->bit_data.udelay = 20;
- connector->bit_data.timeout = usecs_to_jiffies(2000);
- connector->bit_data.data = connector;
- connector->bit_data.setsda = hibmc_ddc_setsda;
- connector->bit_data.setscl = hibmc_ddc_setscl;
- connector->bit_data.getsda = hibmc_ddc_getsda;
- connector->bit_data.getscl = hibmc_ddc_getscl;
-
- return i2c_bit_add_bus(&connector->adapter);
+ vdac->adapter.owner = THIS_MODULE;
+ snprintf(vdac->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+ vdac->adapter.dev.parent = drm_dev->dev;
+ i2c_set_adapdata(&vdac->adapter, vdac);
+ vdac->adapter.algo_data = &vdac->bit_data;
+
+ vdac->bit_data.udelay = 20;
+ vdac->bit_data.timeout = usecs_to_jiffies(2000);
+ vdac->bit_data.data = vdac;
+ vdac->bit_data.setsda = hibmc_ddc_setsda;
+ vdac->bit_data.setscl = hibmc_ddc_setscl;
+ vdac->bit_data.getsda = hibmc_ddc_getsda;
+ vdac->bit_data.getscl = hibmc_ddc_getscl;
+
+ return i2c_bit_add_bus(&vdac->adapter);
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 409c551c92af..05e19ea4c9f9 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -24,11 +24,11 @@
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
- struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
const struct drm_edid *drm_edid;
int count;
- drm_edid = drm_edid_read_ddc(connector, &hibmc_connector->adapter);
+ drm_edid = drm_edid_read_ddc(connector, &vdac->adapter);
drm_edid_connector_update(connector, drm_edid);
@@ -51,9 +51,9 @@ out:
static void hibmc_connector_destroy(struct drm_connector *connector)
{
- struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
- i2c_del_adapter(&hibmc_connector->adapter);
+ i2c_del_adapter(&vdac->adapter);
drm_connector_cleanup(connector);
}
@@ -93,20 +93,20 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
- struct hibmc_connector *hibmc_connector = &priv->connector;
- struct drm_encoder *encoder = &priv->encoder;
+ struct hibmc_vdac *vdac = &priv->vdac;
+ struct drm_encoder *encoder = &vdac->encoder;
struct drm_crtc *crtc = &priv->crtc;
- struct drm_connector *connector = &hibmc_connector->base;
+ struct drm_connector *connector = &vdac->connector;
int ret;
- ret = hibmc_ddc_create(dev, hibmc_connector);
+ ret = hibmc_ddc_create(dev, vdac);
if (ret) {
drm_err(dev, "failed to create ddc: %d\n", ret);
return ret;
}
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+ ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
return ret;
@@ -117,7 +117,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
ret = drm_connector_init_with_ddc(dev, connector,
&hibmc_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
- &hibmc_connector->adapter);
+ &vdac->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 5616c3917c03..2eb49177ac42 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -929,7 +929,6 @@ static const struct drm_driver ade_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "kirin",
.desc = "Hisilicon Kirin620 SoC DRM Driver",
- .date = "20150718",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index b3ab944652a6..1e1c87be1204 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -17,8 +17,8 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index e0953777a206..f59abfa7622a 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -9,8 +9,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -20,7 +20,6 @@
#define DRIVER_NAME "hyperv_drm"
#define DRIVER_DESC "DRM driver for Hyper-V synthetic video device"
-#define DRIVER_DATE "2020"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -31,7 +30,6 @@ static struct drm_driver hyperv_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 131512a5f3bd..fcb0fcd6c897 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -486,7 +486,7 @@ static int ch7006_encoder_init(struct i2c_client *client,
}
static const struct i2c_device_id ch7006_ids[] = {
- { "ch7006", 0 },
+ { "ch7006" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ch7006_ids);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index ff23422727fc..c17afa025d9d 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -413,7 +413,7 @@ sil164_encoder_init(struct i2c_client *client,
}
static const struct i2c_device_id sil164_ids[] = {
- { "sil164", 0 },
+ { "sil164" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sil164_ids);
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 82d618c40dce..cbff851e0c85 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -486,8 +486,8 @@ static void tda9950_remove(struct i2c_client *client)
}
static struct i2c_device_id tda9950_ids[] = {
- { "tda9950", 0 },
- { },
+ { "tda9950" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, tda9950_ids);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 2160f05bbd16..82d4a4e206a5 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1165,7 +1165,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.audio_shutdown = tda998x_audio_shutdown,
.mute_stream = tda998x_audio_mute_stream,
.get_eld = tda998x_audio_get_eld,
- .no_capture_mute = 1,
};
static int tda998x_audio_codec_init(struct tda998x_priv *priv,
@@ -1176,6 +1175,7 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
.max_i2s_channels = 2,
.no_i2s_capture = 1,
.no_spdif_capture = 1,
+ .no_capture_mute = 1,
};
if (priv->audio_port_enable[AUDIO_ROUTE_I2S])
@@ -2094,7 +2094,7 @@ MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
#endif
static const struct i2c_device_id tda998x_ids[] = {
- { "tda998x", 0 },
+ { "tda998x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda998x_ids);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 31710d98cad5..3dda9f0eda82 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -30,11 +30,11 @@ i915-y += \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
- i915_suspend.o \
i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
intel_clock_gating.o \
+ intel_cpu_info.o \
intel_device_info.o \
intel_memory_region.o \
intel_pcode.o \
@@ -43,6 +43,7 @@ i915-y += \
intel_sbi.o \
intel_step.o \
intel_uncore.o \
+ intel_uncore_trace.o \
intel_wakeref.o \
vlv_sideband.o \
vlv_suspend.o
@@ -220,6 +221,7 @@ i915-$(CONFIG_HWMON) += \
i915-y += \
display/hsw_ips.o \
display/i9xx_plane.o \
+ display/i9xx_display_sr.o \
display/i9xx_wm.o \
display/intel_alpm.o \
display/intel_atomic.o \
@@ -236,6 +238,7 @@ i915-y += \
display/intel_crtc_state_dump.o \
display/intel_cursor.o \
display/intel_display.o \
+ display/intel_display_conversion.o \
display/intel_display_driver.o \
display/intel_display_irq.o \
display/intel_display_params.o \
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 9d47f8a93e94..686393dfbbf5 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,7 +26,6 @@
*
*/
-#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 4fbec065d53e..56353377466c 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -8,6 +8,7 @@
#include <linux/string_helpers.h>
#include "g4x_dp.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_audio.h"
#include "intel_backlight.h"
@@ -55,8 +56,8 @@ const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
return IS_CHERRYVIEW(i915) ? &chv_dpll[0] : &vlv_dpll[0];
}
-void g4x_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void g4x_dp_set_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dpll *divisor = NULL;
@@ -1223,6 +1224,25 @@ static bool ilk_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(display, DEISR) & bit;
}
+static int g4x_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ int ret;
+
+ if (HAS_PCH_SPLIT(i915) && encoder->port != PORT_A)
+ crtc_state->has_pch_encoder = true;
+
+ ret = intel_dp_compute_config(encoder, crtc_state, conn_state);
+ if (ret)
+ return ret;
+
+ g4x_dp_set_clock(encoder, crtc_state);
+
+ return 0;
+}
+
static void g4x_dp_suspend_complete(struct intel_encoder *encoder)
{
/*
@@ -1307,7 +1327,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder_link_check_init(intel_encoder, intel_dp_link_check);
intel_encoder->hotplug = intel_dp_hotplug;
- intel_encoder->compute_config = intel_dp_compute_config;
+ intel_encoder->compute_config = g4x_dp_compute_config;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->sync_state = intel_dp_sync_state;
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h
index c75e64ae79b7..839a251dc069 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.h
+++ b/drivers/gpu/drm/i915/display/g4x_dp.h
@@ -19,8 +19,6 @@ struct intel_encoder;
#ifdef I915
const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
-void g4x_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe);
@@ -31,10 +29,6 @@ static inline const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
{
return NULL;
}
-static inline void g4x_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
-}
static inline bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, int port,
enum pipe *pipe)
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index d1a7d0d57c6b..98e6a931042f 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -6,6 +6,7 @@
*/
#include "g4x_hdmi.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -682,7 +683,7 @@ static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port
"Platform does not support HDMI %c\n", port_name(port));
}
-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
struct intel_display *display = &dev_priv->display;
@@ -692,10 +693,10 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
struct intel_connector *intel_connector;
if (!assert_port_valid(dev_priv, port))
- return;
+ return false;
if (!assert_hdmi_port_valid(dev_priv, port))
- return;
+ return false;
devdata = intel_bios_encoder_data_lookup(display, port);
@@ -706,15 +707,13 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
- return;
+ return false;
dig_port->aux_ch = AUX_CH_NONE;
intel_connector = intel_connector_alloc();
- if (!intel_connector) {
- kfree(dig_port);
- return;
- }
+ if (!intel_connector)
+ goto err_connector_alloc;
intel_encoder = &dig_port->base;
@@ -722,9 +721,10 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
mutex_init(&dig_port->hdcp_mutex);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
- &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
- "HDMI %c", port_name(port));
+ if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "HDMI %c", port_name(port)))
+ goto err_encoder_init;
intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = g4x_hdmi_compute_config;
@@ -787,5 +787,17 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(dig_port);
- intel_hdmi_init_connector(dig_port, intel_connector);
+ if (!intel_hdmi_init_connector(dig_port, intel_connector))
+ goto err_init_connector;
+
+ return true;
+
+err_init_connector:
+ drm_encoder_cleanup(&intel_encoder->base);
+err_encoder_init:
+ kfree(intel_connector);
+err_connector_alloc:
+ kfree(dig_port);
+
+ return false;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h
index 817f55c7a3a1..a52e8986ec7a 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.h
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
@@ -16,14 +16,15 @@ struct drm_connector;
struct drm_i915_private;
#ifdef I915
-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port);
int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state);
#else
-static inline void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+static inline bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, int port)
{
+ return false;
}
static inline int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 34c5d28fc866..d02c328bf902 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -185,10 +185,12 @@ void hsw_ips_post_update(struct intel_atomic_state *state,
/* IPS only exists on ULT machines and is tied to pipe A. */
bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
- return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
+ struct intel_display *display = to_intel_display(crtc);
+
+ return HAS_IPS(display) && crtc->pipe == PIPE_A;
}
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -218,6 +220,20 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
return true;
}
+int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (!IS_BROADWELL(i915))
+ return 0;
+
+ if (!hsw_crtc_state_ips_capable(crtc_state))
+ return 0;
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+}
+
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h
index 35364228e1c1..7af12f88a8ce 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.h
+++ b/drivers/gpu/drm/i915/display/hsw_ips.h
@@ -19,7 +19,7 @@ bool hsw_ips_pre_update(struct intel_atomic_state *state,
void hsw_ips_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
bool hsw_crtc_supports_ips(struct intel_crtc *crtc);
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state);
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
@@ -42,9 +42,9 @@ static inline bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
return false;
}
-static inline bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+static inline int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- return false;
+ return 0;
}
static inline int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.c b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
new file mode 100644
index 000000000000..32abe9743014
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/drm_device.h>
+
+#include "i915_reg.h"
+#include "i9xx_display_sr.h"
+#include "i9xx_wm_regs.h"
+#include "intel_de.h"
+#include "intel_gmbus.h"
+#include "intel_pci_config.h"
+
+static void i9xx_display_save_swf(struct intel_display *display)
+{
+ int i;
+
+ /* Scratch space */
+ if (DISPLAY_VER(display) == 2 && display->platform.mobile) {
+ for (i = 0; i < 7; i++) {
+ display->restore.saveSWF0[i] = intel_de_read(display, SWF0(display, i));
+ display->restore.saveSWF1[i] = intel_de_read(display, SWF1(display, i));
+ }
+ for (i = 0; i < 3; i++)
+ display->restore.saveSWF3[i] = intel_de_read(display, SWF3(display, i));
+ } else if (DISPLAY_VER(display) == 2) {
+ for (i = 0; i < 7; i++)
+ display->restore.saveSWF1[i] = intel_de_read(display, SWF1(display, i));
+ } else if (HAS_GMCH(display)) {
+ for (i = 0; i < 16; i++) {
+ display->restore.saveSWF0[i] = intel_de_read(display, SWF0(display, i));
+ display->restore.saveSWF1[i] = intel_de_read(display, SWF1(display, i));
+ }
+ for (i = 0; i < 3; i++)
+ display->restore.saveSWF3[i] = intel_de_read(display, SWF3(display, i));
+ }
+}
+
+static void i9xx_display_restore_swf(struct intel_display *display)
+{
+ int i;
+
+ /* Scratch space */
+ if (DISPLAY_VER(display) == 2 && display->platform.mobile) {
+ for (i = 0; i < 7; i++) {
+ intel_de_write(display, SWF0(display, i), display->restore.saveSWF0[i]);
+ intel_de_write(display, SWF1(display, i), display->restore.saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ intel_de_write(display, SWF3(display, i), display->restore.saveSWF3[i]);
+ } else if (DISPLAY_VER(display) == 2) {
+ for (i = 0; i < 7; i++)
+ intel_de_write(display, SWF1(display, i), display->restore.saveSWF1[i]);
+ } else if (HAS_GMCH(display)) {
+ for (i = 0; i < 16; i++) {
+ intel_de_write(display, SWF0(display, i), display->restore.saveSWF0[i]);
+ intel_de_write(display, SWF1(display, i), display->restore.saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ intel_de_write(display, SWF3(display, i), display->restore.saveSWF3[i]);
+ }
+}
+
+void i9xx_display_sr_save(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ /* Display arbitration control */
+ if (DISPLAY_VER(display) <= 4)
+ display->restore.saveDSPARB = intel_de_read(display, DSPARB(display));
+
+ if (DISPLAY_VER(display) == 4)
+ pci_read_config_word(pdev, GCDGMBUS, &display->restore.saveGCDGMBUS);
+
+ i9xx_display_save_swf(display);
+}
+
+void i9xx_display_sr_restore(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ i9xx_display_restore_swf(display);
+
+ if (DISPLAY_VER(display) == 4)
+ pci_write_config_word(pdev, GCDGMBUS, display->restore.saveGCDGMBUS);
+
+ /* Display arbitration */
+ if (DISPLAY_VER(display) <= 4)
+ intel_de_write(display, DSPARB(display), display->restore.saveDSPARB);
+}
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.h b/drivers/gpu/drm/i915/display/i9xx_display_sr.h
new file mode 100644
index 000000000000..39b8c18fe738
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __I9XX_DISPLAY_SR_H__
+#define __I9XX_DISPLAY_SR_H__
+
+struct intel_display;
+
+void i9xx_display_sr_save(struct intel_display *display);
+void i9xx_display_sr_restore(struct intel_display *display);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 17a1e3801a85..48e657a80a16 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -8,6 +8,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index e3b13886177a..db78c1e6b0a3 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
+#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
#include "intel_bo.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm_regs.h b/drivers/gpu/drm/i915/display/i9xx_wm_regs.h
new file mode 100644
index 000000000000..d68d22235cf2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm_regs.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __I9XX_WM_REGS_H__
+#define __I9XX_WM_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define DSPARB(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
+#define DSPARB_CSTART_MASK (0x7f << 7)
+#define DSPARB_CSTART_SHIFT 7
+#define DSPARB_BSTART_MASK (0x7f)
+#define DSPARB_BSTART_SHIFT 0
+#define DSPARB_BEND_SHIFT 9 /* on 855 */
+#define DSPARB_AEND_SHIFT 0
+#define DSPARB_SPRITEA_SHIFT_VLV 0
+#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEB_SHIFT_VLV 8
+#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
+#define DSPARB_SPRITEC_SHIFT_VLV 16
+#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
+#define DSPARB_SPRITED_SHIFT_VLV 24
+#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
+#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
+#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
+#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
+#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
+#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
+#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
+#define DSPARB_SPRITED_HI_SHIFT_VLV 12
+#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
+#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
+#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
+#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
+#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
+#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define DSPARB_SPRITEE_SHIFT_VLV 0
+#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEF_SHIFT_VLV 8
+#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
+
+/* pnv/gen4/g4x/vlv/chv */
+#define DSPFW1(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
+#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff << 23)
+#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f << 16)
+#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f << 8)
+#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
+#define DSPFW_PLANEA_SHIFT 0
+#define DSPFW_PLANEA_MASK (0x7f << 0)
+#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
+#define DSPFW2(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
+#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
+#define DSPFW_FBC_SR_SHIFT 28
+#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
+#define DSPFW_FBC_HPLL_SR_SHIFT 24
+#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
+#define DSPFW_SPRITEB_SHIFT (16)
+#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
+#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
+#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_CURSORA_MASK (0x3f << 8)
+#define DSPFW_PLANEC_OLD_SHIFT 0
+#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
+#define DSPFW_SPRITEA_SHIFT 0
+#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
+#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
+#define DSPFW3(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
+#define DSPFW_HPLL_SR_EN (1 << 31)
+#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
+#define DSPFW_CURSOR_SR_SHIFT 24
+#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
+#define DSPFW_HPLL_SR_SHIFT 0
+#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
+
+/* vlv/chv */
+#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
+#define DSPFW_SPRITEB_WM1_SHIFT 16
+#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
+#define DSPFW_CURSORA_WM1_SHIFT 8
+#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
+#define DSPFW_SPRITEA_WM1_SHIFT 0
+#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
+#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
+#define DSPFW_PLANEB_WM1_SHIFT 24
+#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
+#define DSPFW_PLANEA_WM1_SHIFT 16
+#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
+#define DSPFW_CURSORB_WM1_SHIFT 8
+#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
+#define DSPFW_CURSOR_SR_WM1_SHIFT 0
+#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
+#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
+#define DSPFW_SR_WM1_SHIFT 0
+#define DSPFW_SR_WM1_MASK (0x1ff << 0)
+#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
+#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
+#define DSPFW_SPRITED_WM1_SHIFT 24
+#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
+#define DSPFW_SPRITED_SHIFT 16
+#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
+#define DSPFW_SPRITEC_WM1_SHIFT 8
+#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
+#define DSPFW_SPRITEC_SHIFT 0
+#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
+#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
+#define DSPFW_SPRITEF_WM1_SHIFT 24
+#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
+#define DSPFW_SPRITEF_SHIFT 16
+#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
+#define DSPFW_SPRITEE_WM1_SHIFT 8
+#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
+#define DSPFW_SPRITEE_SHIFT 0
+#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
+#define DSPFW_PLANEC_WM1_SHIFT 24
+#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
+#define DSPFW_PLANEC_SHIFT 16
+#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
+#define DSPFW_CURSORC_WM1_SHIFT 8
+#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
+#define DSPFW_CURSORC_SHIFT 0
+#define DSPFW_CURSORC_MASK (0x3f << 0)
+
+/* vlv/chv high order bits */
+#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
+#define DSPFW_SR_HI_SHIFT 24
+#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SPRITEF_HI_SHIFT 23
+#define DSPFW_SPRITEF_HI_MASK (1 << 23)
+#define DSPFW_SPRITEE_HI_SHIFT 22
+#define DSPFW_SPRITEE_HI_MASK (1 << 22)
+#define DSPFW_PLANEC_HI_SHIFT 21
+#define DSPFW_PLANEC_HI_MASK (1 << 21)
+#define DSPFW_SPRITED_HI_SHIFT 20
+#define DSPFW_SPRITED_HI_MASK (1 << 20)
+#define DSPFW_SPRITEC_HI_SHIFT 16
+#define DSPFW_SPRITEC_HI_MASK (1 << 16)
+#define DSPFW_PLANEB_HI_SHIFT 12
+#define DSPFW_PLANEB_HI_MASK (1 << 12)
+#define DSPFW_SPRITEB_HI_SHIFT 8
+#define DSPFW_SPRITEB_HI_MASK (1 << 8)
+#define DSPFW_SPRITEA_HI_SHIFT 4
+#define DSPFW_SPRITEA_HI_MASK (1 << 4)
+#define DSPFW_PLANEA_HI_SHIFT 0
+#define DSPFW_PLANEA_HI_MASK (1 << 0)
+#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
+#define DSPFW_SR_WM1_HI_SHIFT 24
+#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
+#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
+#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
+#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
+#define DSPFW_PLANEC_WM1_HI_SHIFT 21
+#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
+#define DSPFW_SPRITED_WM1_HI_SHIFT 20
+#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
+#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
+#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
+#define DSPFW_PLANEB_WM1_HI_SHIFT 12
+#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
+#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
+#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
+#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
+#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
+#define DSPFW_PLANEA_WM1_HI_SHIFT 0
+#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
+
+/* drain latency register values*/
+#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
+#define DDL_CURSOR_SHIFT 24
+#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
+#define DDL_PLANE_SHIFT 0
+#define DDL_PRECISION_HIGH (1 << 7)
+#define DDL_PRECISION_LOW (0 << 7)
+#define DRAIN_LATENCY_MASK 0x7f
+
+/* FIFO watermark sizes etc */
+#define G4X_FIFO_LINE_SIZE 64
+#define I915_FIFO_LINE_SIZE 64
+#define I830_FIFO_LINE_SIZE 32
+
+#define VALLEYVIEW_FIFO_SIZE 255
+#define G4X_FIFO_SIZE 127
+#define I965_FIFO_SIZE 512
+#define I945_FIFO_SIZE 127
+#define I915_FIFO_SIZE 95
+#define I855GM_FIFO_SIZE 127 /* In cachelines */
+#define I830_FIFO_SIZE 95
+
+#define VALLEYVIEW_MAX_WM 0xff
+#define G4X_MAX_WM 0x3f
+#define I915_MAX_WM 0x3f
+
+#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE 64
+#define PINEVIEW_MAX_WM 0x1ff
+#define PINEVIEW_DFT_WM 0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM 0
+#define PINEVIEW_GUARD_WM 10
+#define PINEVIEW_CURSOR_FIFO 64
+#define PINEVIEW_CURSOR_MAX_WM 0x3f
+#define PINEVIEW_CURSOR_DFT_WM 0
+#define PINEVIEW_CURSOR_GUARD_WM 5
+
+#define VALLEYVIEW_CURSOR_MAX_WM 64
+#define I965_CURSOR_FIFO 64
+#define I965_CURSOR_MAX_WM 32
+#define I965_CURSOR_DFT_WM 8
+
+/* define the Watermark register on Ironlake */
+#define _WM0_PIPEA_ILK 0x45100
+#define _WM0_PIPEB_ILK 0x45104
+#define _WM0_PIPEC_IVB 0x45200
+#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
+ _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
+#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
+#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
+#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x))
+#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x))
+#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x))
+#define WM1_LP_ILK _MMIO(0x45108)
+#define WM2_LP_ILK _MMIO(0x4510c)
+#define WM3_LP_ILK _MMIO(0x45110)
+#define WM_LP_ENABLE REG_BIT(31)
+#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24)
+#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19)
+#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20)
+#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8)
+#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x))
+#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x))
+#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x))
+#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x))
+#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x))
+#define WM1S_LP_ILK _MMIO(0x45120)
+#define WM2S_LP_IVB _MMIO(0x45124)
+#define WM3S_LP_IVB _MMIO(0x45128)
+#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */
+#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0)
+#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x))
+
+#define WM_MISC _MMIO(0x45260)
+#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
+
+#define WM_DBG _MMIO(0x45280)
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
+#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
+#define WM_DBG_DISALLOW_SPRITE (1 << 2)
+
+#endif /* __I9XX_WM_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 8a49f499e3fb..c977b74f82f0 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -31,6 +31,7 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_probe_helper.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "icl_dsi.h"
#include "icl_dsi_regs.h"
@@ -1602,7 +1603,9 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: split only when necessary */
if (crtc_state->dsc.slice_count > 1)
- crtc_state->dsc.dsc_split = true;
+ crtc_state->dsc.num_streams = 2;
+ else
+ crtc_state->dsc.num_streams = 1;
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index c3b29a331d72..bbf8c5a8fdbd 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -9,8 +9,9 @@
#include <linux/acpi.h>
#include <acpi/video.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_acpi.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index d89630b2d5c1..612e9b0ec14a 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -40,6 +40,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
+#include "i915_drv.h"
#include "i915_config.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic_plane.h"
@@ -207,17 +208,6 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
fb->format->cpp[color_plane];
}
-static bool
-use_min_ddb(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- return DISPLAY_VER(i915) >= 13 &&
- crtc_state->uapi.async_flip &&
- plane->async_flip;
-}
-
static unsigned int
intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
@@ -225,8 +215,8 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- int width, height;
unsigned int rel_data_rate;
+ int width, height;
if (plane->id == PLANE_CURSOR)
return 0;
@@ -235,14 +225,6 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
return 0;
/*
- * We calculate extra ddb based on ratio plane rate/total data rate
- * in case, in some cases we should not allocate extra ddb for the plane,
- * so do not count its data rate, if this is the case.
- */
- if (use_min_ddb(crtc_state, plane))
- return 0;
-
- /*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
@@ -256,7 +238,11 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
height /= 2;
}
- rel_data_rate = width * height * fb->format->cpp[color_plane];
+ rel_data_rate =
+ skl_plane_relative_data_rate(crtc_state, plane, width, height,
+ fb->format->cpp[color_plane]);
+ if (!rel_data_rate)
+ return 0;
return intel_adjusted_rate(&plane_state->uapi.src,
&plane_state->uapi.dst,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 32aa9ec1a204..ce8a4319a63c 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -681,12 +681,11 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder trans = crtc_state->cpu_transcoder;
- if (HAS_DP20(i915))
- intel_de_rmw(i915, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
+ if (HAS_DP20(display))
+ intel_de_rmw(display, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
}
@@ -699,10 +698,12 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ mutex_lock(&connector->eld_mutex);
if (!connector->eld[0]) {
drm_dbg_kms(&i915->drm,
"Bogus ELD on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ mutex_unlock(&connector->eld_mutex);
return false;
}
@@ -710,6 +711,7 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
memcpy(crtc_state->eld, connector->eld, sizeof(crtc_state->eld));
crtc_state->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ mutex_unlock(&connector->eld_mutex);
return true;
}
@@ -978,6 +980,53 @@ retry:
drm_modeset_acquire_fini(&ctx);
}
+int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ int min_cdclk = 0;
+
+ if (!crtc_state->has_audio)
+ return 0;
+
+ /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
+ * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
+ * there may be audio corruption or screen corruption." This cdclk
+ * restriction for GLK is 316.8 MHz.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->port_clock >= 540000 &&
+ crtc_state->lane_count == 4) {
+ if (DISPLAY_VER(display) == 10) {
+ /* Display WA #1145: glk */
+ min_cdclk = max(min_cdclk, 316800);
+ } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) {
+ /* Display WA #1144: skl,bxt */
+ min_cdclk = max(min_cdclk, 432000);
+ }
+ }
+
+ /*
+ * According to BSpec, "The CD clock frequency must be at least twice
+ * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+ */
+ if (DISPLAY_VER(display) >= 9)
+ min_cdclk = max(min_cdclk, 2 * 96000);
+
+ /*
+ * "For DP audio configuration, cdclk frequency shall be set to
+ * meet the following requirements:
+ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
+ * 270 | 320 or higher
+ * 162 | 200 or higher"
+ */
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_dp_encoder(crtc_state))
+ min_cdclk = max(min_cdclk, crtc_state->port_clock);
+
+ return min_cdclk;
+}
+
static unsigned long i915_audio_component_get_power(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 576c061d72a4..1bafc155434a 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -27,6 +27,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
+int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 3f81a726cc7d..fc1e517e074a 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -10,6 +10,7 @@
#include <acpi/video.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a4cdd82c4a75..e0e4e9b62d8d 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1402,12 +1402,21 @@ parse_power_conservation_features(struct intel_display *display,
panel_type);
}
+static void vbt_edp_to_pps_delays(struct intel_pps_delays *pps,
+ const struct edp_power_seq *edp_pps)
+{
+ pps->power_up = edp_pps->t1_t3;
+ pps->backlight_on = edp_pps->t8;
+ pps->backlight_off = edp_pps->t9;
+ pps->power_down = edp_pps->t10;
+ pps->power_cycle = edp_pps->t11_t12;
+}
+
static void
parse_edp(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_edp *edp;
- const struct edp_power_seq *edp_pps;
const struct edp_fast_link_params *edp_link_params;
int panel_type = panel->vbt.panel_type;
@@ -1428,10 +1437,10 @@ parse_edp(struct intel_display *display,
}
/* Get the eDP sequencing and link info */
- edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->fast_link_params[panel_type];
- panel->vbt.edp.pps = *edp_pps;
+ vbt_edp_to_pps_delays(&panel->vbt.edp.pps,
+ &edp->power_seqs[panel_type]);
if (display->vbt.version >= 224) {
panel->vbt.edp.rate =
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 8b703f6cfe17..f9841f0498c6 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -50,14 +50,6 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
-struct edp_power_seq {
- u16 t1_t3;
- u16 t8;
- u16 t9;
- u16 t10;
- u16 t11_t12;
-} __packed;
-
/*
* MIPI Sequence Block definitions
*
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index a52b0ae68b96..23edc81741de 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -1256,7 +1256,7 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915,
min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
for_each_pipe(i915, pipe)
- min_cdclk = max(bw_state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
return min_cdclk;
}
@@ -1447,13 +1447,14 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
int intel_bw_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_bw_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.bw.obj,
+ intel_atomic_global_obj_init(display, &display->bw.obj,
&state->base, &intel_bw_funcs);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 03c4eef3f92a..c7a603589412 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -29,6 +29,7 @@
#include "soc/intel_dram.h"
#include "hsw_ips.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -37,7 +38,6 @@
#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_dp.h"
#include "intel_display_types.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
@@ -46,6 +46,7 @@
#include "intel_vdsc.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
+#include "vlv_dsi.h"
#include "vlv_sideband.h"
/**
@@ -2761,154 +2762,62 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
"Post changing CDCLK to");
}
-static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+/* pixels per CDCLK */
+static int intel_cdclk_ppc(struct intel_display *display, bool double_wide)
+{
+ return DISPLAY_VER(display) >= 10 || double_wide ? 2 : 1;
+}
+
+/* max pixel rate as % of CDCLK (not accounting for PPC) */
+static int intel_cdclk_guardband(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *dev_priv = to_i915(display->drm);
- int pixel_rate = crtc_state->pixel_rate;
- if (DISPLAY_VER(display) >= 10)
- return DIV_ROUND_UP(pixel_rate, 2);
- else if (DISPLAY_VER(display) == 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return pixel_rate;
+ if (DISPLAY_VER(display) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return 100;
else if (IS_CHERRYVIEW(dev_priv))
- return DIV_ROUND_UP(pixel_rate * 100, 95);
- else if (crtc_state->double_wide)
- return DIV_ROUND_UP(pixel_rate * 100, 90 * 2);
+ return 95;
else
- return DIV_ROUND_UP(pixel_rate * 100, 90);
+ return 90;
}
-static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct intel_plane *plane;
- int min_cdclk = 0;
-
- for_each_intel_plane_on_crtc(display->drm, crtc, plane)
- min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk);
+ struct intel_display *display = to_intel_display(crtc_state);
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+ int guardband = intel_cdclk_guardband(display);
+ int pixel_rate = crtc_state->pixel_rate;
- return min_cdclk;
+ return DIV_ROUND_UP(pixel_rate * 100, guardband * ppc);
}
-static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_display *display = to_intel_display(crtc);
- int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
+ struct intel_plane *plane;
int min_cdclk = 0;
- /*
- * When we decide to use only one VDSC engine, since
- * each VDSC operates with 1 ppc throughput, pixel clock
- * cannot be higher than the VDSC clock (cdclk)
- * If there 2 VDSC engines, then pixel clock can't be higher than
- * VDSC clock(cdclk) * 2 and so on.
- */
- min_cdclk = max_t(int, min_cdclk,
- DIV_ROUND_UP(crtc_state->pixel_rate, num_vdsc_instances));
-
- if (crtc_state->joiner_pipes) {
- int pixel_clock = intel_dp_mode_to_fec_clock(crtc_state->hw.adjusted_mode.clock);
-
- /*
- * According to Bigjoiner bw check:
- * compressed_bpp <= PPC * CDCLK * Big joiner Interface bits / Pixel clock
- *
- * We have already computed compressed_bpp, so now compute the min CDCLK that
- * is required to support this compressed_bpp.
- *
- * => CDCLK >= compressed_bpp * Pixel clock / (PPC * Bigjoiner Interface bits)
- *
- * Since PPC = 2 with bigjoiner
- * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
- */
- int bigjoiner_interface_bits = DISPLAY_VER(display) >= 14 ? 36 : 24;
- int min_cdclk_bj =
- (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
- pixel_clock) / (2 * bigjoiner_interface_bits);
-
- min_cdclk = max(min_cdclk, min_cdclk_bj);
- }
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane)
+ min_cdclk = max(min_cdclk, crtc_state->min_cdclk[plane->id]);
return min_cdclk;
}
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int min_cdclk;
if (!crtc_state->hw.enable)
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
- min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
-
- /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
- * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
- * there may be audio corruption or screen corruption." This cdclk
- * restriction for GLK is 316.8 MHz.
- */
- if (intel_crtc_has_dp_encoder(crtc_state) &&
- crtc_state->has_audio &&
- crtc_state->port_clock >= 540000 &&
- crtc_state->lane_count == 4) {
- if (DISPLAY_VER(display) == 10) {
- /* Display WA #1145: glk */
- min_cdclk = max(316800, min_cdclk);
- } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) {
- /* Display WA #1144: skl,bxt */
- min_cdclk = max(432000, min_cdclk);
- }
- }
-
- /*
- * According to BSpec, "The CD clock frequency must be at least twice
- * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
- */
- if (crtc_state->has_audio && DISPLAY_VER(display) >= 9)
- min_cdclk = max(2 * 96000, min_cdclk);
-
- /*
- * "For DP audio configuration, cdclk frequency shall be set to
- * meet the following requirements:
- * DP Link Frequency(MHz) | Cdclk frequency(MHz)
- * 270 | 320 or higher
- * 162 | 200 or higher"
- */
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
- min_cdclk = max(crtc_state->port_clock, min_cdclk);
-
- /*
- * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
- * than 320000KHz.
- */
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
- IS_VALLEYVIEW(dev_priv))
- min_cdclk = max(320000, min_cdclk);
-
- /*
- * On Geminilake once the CDCLK gets as low as 79200
- * picture gets unstable, despite that values are
- * correct for DSI PLL and DE PLL.
- */
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
- IS_GEMINILAKE(dev_priv))
- min_cdclk = max(158400, min_cdclk);
-
- /* Account for additional needs from the planes */
- min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
-
- if (crtc_state->dsc.compression_enable)
- min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, hsw_ips_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_audio_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, vlv_dsi_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_planes_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
return min_cdclk;
}
@@ -2960,7 +2869,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
min_cdclk = max(cdclk_state->force_min_cdclk,
cdclk_state->bw_min_cdclk);
for_each_pipe(display, pipe)
- min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(min_cdclk, cdclk_state->min_cdclk[pipe]);
/*
* Avoid glk_force_audio_cdclk() causing excessive screen
@@ -2972,7 +2881,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
*/
if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
!is_power_of_2(cdclk_state->active_pipes))
- min_cdclk = max(2 * 96000, min_cdclk);
+ min_cdclk = max(min_cdclk, 2 * 96000);
if (min_cdclk > display->cdclk.max_cdclk_freq) {
drm_dbg_kms(display->drm,
@@ -3028,8 +2937,8 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
min_voltage_level = 0;
for_each_pipe(display, pipe)
- min_voltage_level = max(cdclk_state->min_voltage_level[pipe],
- min_voltage_level);
+ min_voltage_level = max(min_voltage_level,
+ cdclk_state->min_voltage_level[pipe]);
return min_voltage_level;
}
@@ -3308,14 +3217,13 @@ int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joi
int intel_cdclk_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL);
if (!cdclk_state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &display->cdclk.obj,
+ intel_atomic_global_obj_init(display, &display->cdclk.obj,
&cdclk_state->base, &intel_cdclk_funcs);
return 0;
@@ -3452,20 +3360,11 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
static int intel_compute_max_dotclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ int ppc = intel_cdclk_ppc(display, HAS_DOUBLE_WIDE(display));
+ int guardband = intel_cdclk_guardband(display);
int max_cdclk_freq = display->cdclk.max_cdclk_freq;
- if (DISPLAY_VER(display) >= 10)
- return 2 * max_cdclk_freq;
- else if (DISPLAY_VER(display) == 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return max_cdclk_freq;
- else if (IS_CHERRYVIEW(dev_priv))
- return max_cdclk_freq*95/100;
- else if (DISPLAY_VER(display) < 4)
- return 2*max_cdclk_freq*90/100;
- else
- return max_cdclk_freq*90/100;
+ return ppc * max_cdclk_freq * guardband / 100;
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 7cd902bbd244..2f51eccdb27a 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -22,6 +22,7 @@
*
*/
+#include "i915_drv.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 3252dab56430..4fbe2e3542ca 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,6 +3,7 @@
* Copyright © 2018 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 74c1983fe07e..4634d3fd9f20 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -38,6 +38,7 @@
#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crt.h"
+#include "intel_crt_regs.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -55,18 +56,23 @@
#include "intel_pch_refclk.h"
/* Here's the desired hotplug mode */
-#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_ENABLE | \
+ ADPA_CRT_HOTPLUG_PERIOD_128 | \
ADPA_CRT_HOTPLUG_WARMUP_10MS | \
ADPA_CRT_HOTPLUG_SAMPLE_4S | \
ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
- ADPA_CRT_HOTPLUG_VOLREF_325MV | \
- ADPA_CRT_HOTPLUG_ENABLE)
+ ADPA_CRT_HOTPLUG_VOLREF_325MV)
+#define ADPA_HOTPLUG_MASK (ADPA_CRT_HOTPLUG_MONITOR_MASK | \
+ ADPA_CRT_HOTPLUG_ENABLE | \
+ ADPA_CRT_HOTPLUG_PERIOD_MASK | \
+ ADPA_CRT_HOTPLUG_WARMUP_MASK | \
+ ADPA_CRT_HOTPLUG_SAMPLE_MASK | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_MASK | \
+ ADPA_CRT_HOTPLUG_VOLREF_MASK | \
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
struct intel_crt {
struct intel_encoder base;
- /* DPMS state is stored in the connector, which we need in the
- * encoder's enable/disable callbacks */
- struct intel_connector *connector;
bool force_hotplug_required;
i915_reg_t adpa_reg;
};
@@ -91,9 +97,9 @@ bool intel_crt_port_enabled(struct intel_display *display,
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
- *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+ *pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK_CPT, val);
else
- *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK, val);
return val & ADPA_DAC_ENABLE;
}
@@ -141,27 +147,27 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
}
static void intel_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
- pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ crtc_state->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
- lpt_pch_get_config(pipe_config);
+ lpt_pch_get_config(crtc_state);
- hsw_ddi_get_config(encoder, pipe_config);
+ hsw_ddi_get_config(encoder, crtc_state);
- pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
- DRM_MODE_FLAG_NHSYNC |
- DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_NVSYNC);
- pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ crtc_state->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ crtc_state->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -244,7 +250,7 @@ static void hsw_disable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder);
@@ -257,7 +263,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -287,7 +293,7 @@ static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
@@ -300,7 +306,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
@@ -319,7 +325,7 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
@@ -355,8 +361,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -399,48 +404,48 @@ intel_crt_mode_valid(struct drm_connector *connector,
}
static int intel_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
return 0;
}
static int pch_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- pipe_config->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(pipe_config))
+ crtc_state->has_pch_encoder = true;
+ if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
return 0;
}
static int hsw_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -450,30 +455,30 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_hblank_start > 4096)
return -EINVAL;
- pipe_config->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(pipe_config))
+ crtc_state->has_pch_encoder = true;
+ if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
- if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+ if (crtc_state->bw_constrained && crtc_state->pipe_bpp < 24) {
drm_dbg_kms(display->drm,
"LPT only supports 24bpp\n");
return -EINVAL;
}
- pipe_config->pipe_bpp = 24;
+ crtc_state->pipe_bpp = 24;
}
/* FDI must always be 2.7 GHz */
- pipe_config->port_clock = 135000 * 2;
+ crtc_state->port_clock = 135000 * 2;
- pipe_config->enhanced_framing = true;
+ crtc_state->enhanced_framing = true;
- adjusted_mode->crtc_clock = lpt_iclkip(pipe_config);
+ adjusted_mode->crtc_clock = lpt_iclkip(crtc_state);
return 0;
}
@@ -481,9 +486,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 adpa;
bool ret;
@@ -532,9 +536,8 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool reenable_hpd;
u32 adpa;
bool ret;
@@ -588,8 +591,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 stat;
bool ret = false;
int i, tries = 0;
@@ -856,7 +858,7 @@ intel_crt_detect(struct drm_connector *connector,
struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct intel_encoder *intel_encoder = &crt->base;
+ struct intel_encoder *encoder = &crt->base;
struct drm_atomic_state *state;
intel_wakeref_t wakeref;
int status;
@@ -865,15 +867,14 @@ intel_crt_detect(struct drm_connector *connector,
connector->base.id, connector->name,
force);
- if (!intel_display_device_enabled(dev_priv))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return connector->status;
if (display->params.load_detect_test) {
- wakeref = intel_display_power_get(dev_priv,
- intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
goto load_detect;
}
@@ -881,8 +882,7 @@ intel_crt_detect(struct drm_connector *connector,
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
- wakeref = intel_display_power_get(dev_priv,
- intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
if (I915_HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
@@ -939,7 +939,7 @@ load_detect:
}
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return status;
}
@@ -947,19 +947,17 @@ out:
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct intel_encoder *intel_encoder = &crt->base;
+ struct intel_encoder *encoder = &crt->base;
intel_wakeref_t wakeref;
struct i2c_adapter *ddc;
int ret;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
- wakeref = intel_display_power_get(dev_priv,
- intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
ret = intel_crt_ddc_get_modes(connector, connector->ddc);
if (ret || !IS_G4X(dev_priv))
@@ -970,7 +968,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
ret = intel_crt_ddc_get_modes(connector, ddc);
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -984,7 +982,7 @@ void intel_crt_reset(struct drm_encoder *encoder)
u32 adpa;
adpa = intel_de_read(display, crt->adpa_reg);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa &= ~ADPA_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
intel_de_write(display, crt->adpa_reg, adpa);
intel_de_posting_read(display, crt->adpa_reg);
@@ -1022,9 +1020,8 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct drm_connector *connector;
+ struct intel_connector *connector;
struct intel_crt *crt;
- struct intel_connector *intel_connector;
i915_reg_t adpa_reg;
u8 ddc_pin;
u32 adpa;
@@ -1047,7 +1044,9 @@ void intel_crt_init(struct intel_display *display)
* it and see what happens.
*/
intel_de_write(display, adpa_reg,
- adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ adpa | ADPA_DAC_ENABLE |
+ ADPA_HSYNC_CNTL_DISABLE |
+ ADPA_VSYNC_CNTL_DISABLE);
if ((intel_de_read(display, adpa_reg) & ADPA_DAC_ENABLE) == 0)
return;
intel_de_write(display, adpa_reg, adpa);
@@ -1057,17 +1056,15 @@ void intel_crt_init(struct intel_display *display)
if (!crt)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(crt);
return;
}
ddc_pin = display->vbt.crt_ddc_pin;
- connector = &intel_connector->base;
- crt->connector = intel_connector;
- drm_connector_init_with_ddc(display->drm, connector,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_crt_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
intel_gmbus_get_adapter(display, ddc_pin));
@@ -1075,7 +1072,7 @@ void intel_crt_init(struct intel_display *display)
drm_encoder_init(display->drm, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC, "CRT");
- intel_connector_attach_encoder(intel_connector, &crt->base);
+ intel_connector_attach_encoder(connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI);
@@ -1085,7 +1082,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.pipe_mask = ~0;
if (DISPLAY_VER(display) != 2)
- connector->interlace_allowed = true;
+ connector->base.interlace_allowed = true;
crt->adpa_reg = adpa_reg;
@@ -1095,11 +1092,11 @@ void intel_crt_init(struct intel_display *display)
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
- intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
} else {
- intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
- intel_connector->base.polled = intel_connector->polled;
+ connector->base.polled = connector->polled;
if (HAS_DDI(display)) {
assert_port_valid(dev_priv, PORT_E);
@@ -1132,9 +1129,9 @@ void intel_crt_init(struct intel_display *display)
crt->base.get_hw_state = intel_crt_get_hw_state;
crt->base.enable = intel_enable_crt;
}
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ connector->get_hw_state = intel_connector_get_hw_state;
- drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &intel_crt_connector_helper_funcs);
/*
* TODO: find a proper way to discover whether we need to set the the
diff --git a/drivers/gpu/drm/i915/display/intel_crt_regs.h b/drivers/gpu/drm/i915/display/intel_crt_regs.h
new file mode 100644
index 000000000000..571a67ae9afa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crt_regs.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_CRT_REGS_H__
+#define __INTEL_CRT_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define ADPA _MMIO(0x61100)
+#define PCH_ADPA _MMIO(0xe1100)
+#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
+#define ADPA_DAC_ENABLE REG_BIT(31)
+#define ADPA_PIPE_SEL_MASK REG_BIT(30)
+#define ADPA_PIPE_SEL(pipe) REG_FIELD_PREP(ADPA_PIPE_SEL_MASK, (pipe))
+#define ADPA_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29)
+#define ADPA_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(ADPA_PIPE_SEL_MASK_CPT, (pipe))
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK REG_GENMASK(25, 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE REG_FIELD_PREP(ADPA_CRT_HOTPLUG_MONITOR_MASK, 0)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR REG_FIELD_PREP(ADPA_CRT_HOTPLUG_MONITOR_MASK, 3)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO REG_FIELD_PREP(ADPA_CRT_HOTPLUG_MONITOR_MASK, 2)
+#define ADPA_CRT_HOTPLUG_ENABLE REG_BIT(23)
+#define ADPA_CRT_HOTPLUG_PERIOD_MASK REG_BIT(22)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_PERIOD_MASK, 0)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_PERIOD_MASK, 1)
+#define ADPA_CRT_HOTPLUG_WARMUP_MASK REG_BIT(21)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS REG_FIELD_PREP(ADPA_CRT_HOTPLUG_WARMUP_MASK, 0)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS REG_FIELD_PREP(ADPA_CRT_HOTPLUG_WARMUP_MASK, 1)
+#define ADPA_CRT_HOTPLUG_SAMPLE_MASK REG_BIT(20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S REG_FIELD_PREP(ADPA_CRT_HOTPLUG_SAMPLE_MASK, 0)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S REG_FIELD_PREP(ADPA_CRT_HOTPLUG_SAMPLE_MASK, 1)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_MASK REG_GENMASK(19, 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 0)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 1)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 2)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 3)
+#define ADPA_CRT_HOTPLUG_VOLREF_MASK REG_BIT(17)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLREF_MASK, 0)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLREF_MASK, 1)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER REG_BIT(16)
+#define ADPA_USE_VGA_HVPOLARITY REG_BIT(15)
+#define ADPA_HSYNC_CNTL_DISABLE REG_BIT(11)
+#define ADPA_VSYNC_CNTL_DISABLE REG_BIT(10)
+#define ADPA_VSYNC_ACTIVE_HIGH REG_BIT(4)
+#define ADPA_HSYNC_ACTIVE_HIGH REG_BIT(3)
+
+#define _VGA_MSR_WRITE _MMIO(0x3c2)
+
+#endif /* __INTEL_CRT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index a2c528d707f4..c910168602d2 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -12,6 +12,7 @@
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
+#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i9xx_plane.h"
#include "icl_dsi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 705ec5ad385c..1faef60be472 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -50,16 +50,6 @@ intel_dump_infoframe(struct drm_i915_private *i915,
hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
}
-static void
-intel_dump_buffer(const char *prefix, const u8 *buf, size_t len)
-{
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE,
- 16, 0, buf, len, false);
-}
-
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -293,8 +283,8 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_dp_as_sdp_log(&p, &pipe_config->infoframes.as_sdp);
if (pipe_config->has_audio)
- intel_dump_buffer("ELD: ", pipe_config->eld,
- drm_eld_size(pipe_config->eld));
+ drm_print_hex_dump(&p, "ELD: ", pipe_config->eld,
+ drm_eld_size(pipe_config->eld));
drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
str_yes_no(pipe_config->vrr.enable),
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 9ba77970dab7..57cf8f46a458 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -11,6 +11,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -619,7 +620,6 @@ static void skl_write_cursor_wm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -627,14 +627,14 @@ static void skl_write_cursor_wm(struct intel_dsb *dsb,
&crtc_state->wm.skl.plane_ddb[plane_id];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
intel_de_write_dsb(display, dsb, CUR_WM(pipe, level),
skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
intel_de_write_dsb(display, dsb, CUR_WM_TRANS(pipe),
skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
intel_de_write_dsb(display, dsb, CUR_WM_SAGV(pipe),
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 0c7aee13495a..e768dc6a15b3 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -5,6 +5,8 @@
#include <linux/log2.h>
#include <linux/math64.h>
+
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
@@ -2983,7 +2985,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ bool lane_reversal = dig_port->lane_reversal;
u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
INTEL_CX0_LANE0;
intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -3066,7 +3068,10 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ if (DISPLAY_VER(display) >= 30)
+ clock = REG_FIELD_GET(XE3_DDI_CLOCK_SELECT_MASK, val);
+ else
+ clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
@@ -3081,13 +3086,18 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
return 540000;
case XELPDP_DDI_CLOCK_SELECT_TBT_810:
return 810000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_312_5:
+ return 1000000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_625:
+ return 2000000;
default:
MISSING_CASE(clock);
return 162000;
}
}
-static int intel_mtl_tbt_clock_select(int clock)
+static int intel_mtl_tbt_clock_select(struct intel_display *display,
+ int clock)
{
switch (clock) {
case 162000:
@@ -3098,6 +3108,18 @@ static int intel_mtl_tbt_clock_select(int clock)
return XELPDP_DDI_CLOCK_SELECT_TBT_540;
case 810000:
return XELPDP_DDI_CLOCK_SELECT_TBT_810;
+ case 1000000:
+ if (DISPLAY_VER(display) < 30) {
+ drm_WARN_ON(display->drm, "UHBR10 not supported for the platform\n");
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ }
+ return XELPDP_DDI_CLOCK_SELECT_TBT_312_5;
+ case 2000000:
+ if (DISPLAY_VER(display) < 30) {
+ drm_WARN_ON(display->drm, "UHBR20 not supported for the platform\n");
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ }
+ return XELPDP_DDI_CLOCK_SELECT_TBT_625;
default:
MISSING_CASE(clock);
return XELPDP_DDI_CLOCK_SELECT_TBT_162;
@@ -3110,15 +3132,26 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val = 0;
+ u32 mask;
/*
* 1. Program PORT_CLOCK_CTL REGISTER to configure
* clock muxes, gating and SSC
*/
- val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(crtc_state->port_clock));
+
+ if (DISPLAY_VER(display) >= 30) {
+ mask = XE3_DDI_CLOCK_SELECT_MASK;
+ val |= XE3_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
+ } else {
+ mask = XELPDP_DDI_CLOCK_SELECT_MASK;
+ val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
+ }
+
+ mask |= XELPDP_FORWARD_CLOCK_UNGATE;
val |= XELPDP_FORWARD_CLOCK_UNGATE;
+
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val);
+ mask, val);
/* 2. Read back PORT_CLOCK_CTL REGISTER */
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index f0e5c196eae4..da154ff26b96 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -9,6 +9,11 @@
#include "i915_reg_defs.h"
#include "intel_display_limits.h"
+/* DDI Buffer Control */
+#define _DDI_CLK_VALFREQ_A 0x64030
+#define _DDI_CLK_VALFREQ_B 0x64130
+#define DDI_CLK_VALFREQ(port) _MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
+
/*
* Wrapper macro to convert from port number to the index used in some of the
* registers. For Display version 20 and above it converts the port number to a
@@ -187,7 +192,9 @@
#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19)
#define XELPDP_TBT_CLOCK_ACK REG_BIT(18)
#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
+#define XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
+#define XE3_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XE3_DDI_CLOCK_SELECT_MASK, val)
#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0
#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8
#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9
@@ -195,11 +202,20 @@
#define XELPDP_DDI_CLOCK_SELECT_TBT_270 0xd
#define XELPDP_DDI_CLOCK_SELECT_TBT_540 0xe
#define XELPDP_DDI_CLOCK_SELECT_TBT_810 0xf
+#define XELPDP_DDI_CLOCK_SELECT_TBT_312_5 0x18
+#define XELPDP_DDI_CLOCK_SELECT_TBT_625 0x19
#define XELPDP_FORWARD_CLOCK_UNGATE REG_BIT(10)
#define XELPDP_LANE1_PHY_CLOCK_SELECT REG_BIT(8)
#define XELPDP_SSC_ENABLE_PLLA REG_BIT(1)
#define XELPDP_SSC_ENABLE_PLLB REG_BIT(0)
+#define TCSS_DISP_MAILBOX_IN_CMD _MMIO(0x161300)
+#define TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY REG_BIT(31)
+#define TCSS_DISP_MAILBOX_IN_CMD_CMD_MASK REG_GENMASK(7, 0)
+#define TCSS_DISP_MAILBOX_IN_CMD_DATA(val) REG_FIELD_PREP(TCSS_DISP_MAILBOX_IN_CMD_CMD_MASK, val)
+
+#define TCSS_DISP_MAILBOX_IN_DATA _MMIO(0x161304)
+
/* C10 Vendor Registers */
#define PHY_C10_VDR_PLL(idx) (0xC00 + (idx))
#define C10_PLL0_FRACEN REG_BIT8(4)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 49b5cc01ce40..acb986bc1f33 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -28,6 +28,7 @@
#include <linux/iopoll.h>
#include <linux/string_helpers.h>
+#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_privacy_screen_consumer.h>
@@ -335,10 +336,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
- intel_dp->DP = dig_port->saved_port_bits |
- DDI_PORT_WIDTH(crtc_state->lane_count) |
+ intel_dp->DP = DDI_PORT_WIDTH(crtc_state->lane_count) |
DDI_BUF_TRANS_SELECT(0);
+ if (dig_port->lane_reversal)
+ intel_dp->DP |= DDI_BUF_PORT_REVERSAL;
+ if (dig_port->ddi_a_4_lanes)
+ intel_dp->DP |= DDI_A_4_LANES;
+
if (DISPLAY_VER(i915) >= 14) {
if (intel_dp_is_uhbr(crtc_state))
intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
@@ -455,17 +460,20 @@ static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
}
static void
-intel_ddi_config_transcoder_dp2(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+intel_ddi_config_transcoder_dp2(const struct intel_crtc_state *crtc_state,
+ bool enable)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
- if (intel_dp_is_uhbr(crtc_state))
+ if (!HAS_DP20(display))
+ return;
+
+ if (enable && intel_dp_is_uhbr(crtc_state))
val = TRANS_DP2_128B132B_CHANNEL_CODING;
- intel_de_write(i915, TRANS_DP2_CTL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_DP2_CTL(cpu_transcoder), val);
}
/*
@@ -554,7 +562,8 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
temp |= (crtc_state->fdi_lanes - 1) << 1;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
if (intel_dp_is_uhbr(crtc_state))
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
else
@@ -617,9 +626,10 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
/*
* Same as intel_ddi_enable_transcoder_func(), but it does not set the enable
- * bit.
+ * bit for the DDI function and enables the DP2 configuration. Called for all
+ * transcoder types.
*/
-static void
+void
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -628,18 +638,27 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
+ intel_ddi_config_transcoder_dp2(crtc_state, true);
+
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
ctl);
}
+/*
+ * Disable the DDI function and port syncing.
+ * For SST, pre-TGL MST, TGL+ MST-slave transcoders: deselect the DDI port,
+ * SST/MST mode and disable the DP2 configuration. For TGL+ MST-master
+ * transcoders these are done later in intel_ddi_post_disable_dp().
+ */
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
u32 ctl;
if (DISPLAY_VER(dev_priv) >= 11)
@@ -659,7 +678,8 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
if (DISPLAY_VER(dev_priv) >= 12) {
- if (!intel_dp_mst_is_master_trans(crtc_state)) {
+ if (!intel_dp_mst_is_master_trans(crtc_state) ||
+ (!is_mst && intel_dp_is_uhbr(crtc_state))) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
}
@@ -670,6 +690,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
ctl);
+ if (intel_dp_mst_is_slave_trans(crtc_state))
+ intel_ddi_config_transcoder_dp2(crtc_state, false);
+
if (intel_has_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
drm_dbg_kms(display->drm, "Quirk Increase DDI disabled time\n");
@@ -700,15 +723,15 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
- struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(intel_connector);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
enum transcoder cpu_transcoder;
intel_wakeref_t wakeref;
enum pipe pipe = 0;
- u32 tmp;
+ u32 ddi_mode;
bool ret;
wakeref = intel_display_power_get_if_enabled(dev_priv,
@@ -716,6 +739,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
if (!wakeref)
return false;
+ /* Note: This returns false for DP MST primary encoders. */
if (!encoder->get_hw_state(encoder, &pipe)) {
ret = false;
goto out;
@@ -726,38 +750,28 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
else
cpu_transcoder = (enum transcoder) pipe;
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ TRANS_DDI_MODE_SELECT_MASK;
- switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
- case TRANS_DDI_MODE_SELECT_HDMI:
- case TRANS_DDI_MODE_SELECT_DVI:
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI ||
+ ddi_mode == TRANS_DDI_MODE_SELECT_DVI) {
ret = type == DRM_MODE_CONNECTOR_HDMIA;
- break;
-
- case TRANS_DDI_MODE_SELECT_DP_SST:
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && !HAS_DP20(display)) {
+ ret = type == DRM_MODE_CONNECTOR_VGA;
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_SST) {
ret = type == DRM_MODE_CONNECTOR_eDP ||
- type == DRM_MODE_CONNECTOR_DisplayPort;
- break;
-
- case TRANS_DDI_MODE_SELECT_DP_MST:
- /* if the transcoder is in MST state then
- * connector isn't connected */
+ type == DRM_MODE_CONNECTOR_DisplayPort;
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
+ /*
+ * encoder->get_hw_state() should have bailed out on MST. This
+ * must be SST and non-eDP.
+ */
+ ret = type == DRM_MODE_CONNECTOR_DisplayPort;
+ } else if (drm_WARN_ON(display->drm, ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST)) {
+ /* encoder->get_hw_state() should have bailed out on MST. */
ret = false;
- break;
-
- case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
- if (HAS_DP20(dev_priv))
- /* 128b/132b */
- ret = false;
- else
- /* FDI */
- ret = type == DRM_MODE_CONNECTOR_VGA;
- break;
-
- default:
+ } else {
ret = false;
- break;
}
out:
@@ -769,13 +783,13 @@ out:
static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
u32 tmp;
- u8 mst_pipe_mask;
+ u8 mst_pipe_mask = 0, dp128b132b_pipe_mask = 0;
*pipe_mask = 0;
*is_dp_mst = false;
@@ -812,10 +826,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
goto out;
}
- mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
- unsigned int port_mask, ddi_select;
+ u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
@@ -839,10 +852,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if ((tmp & port_mask) != ddi_select)
continue;
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST ||
- (HAS_DP20(dev_priv) &&
- (tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B))
+ ddi_mode = tmp & TRANS_DDI_MODE_SELECT_MASK;
+
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST)
mst_pipe_mask |= BIT(p);
+ else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display))
+ dp128b132b_pipe_mask |= BIT(p);
*pipe_mask |= BIT(p);
}
@@ -852,6 +867,23 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
"No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
+ if (!mst_pipe_mask && dp128b132b_pipe_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ /*
+ * If we don't have 8b/10b MST, but have more than one
+ * transcoder in 128b/132b mode, we know it must be 128b/132b
+ * MST.
+ *
+ * Otherwise, we fall back to checking the current MST
+ * state. It's not accurate for hardware takeover at probe, but
+ * we don't expect MST to have been enabled at that point, and
+ * can assume it's SST.
+ */
+ if (hweight8(dp128b132b_pipe_mask) > 1 || intel_dp->is_mst)
+ mst_pipe_mask = dp128b132b_pipe_mask;
+ }
+
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
drm_dbg_kms(&dev_priv->drm,
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
@@ -862,9 +894,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
drm_dbg_kms(&dev_priv->drm,
- "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n",
encoder->base.base.id, encoder->base.name,
- *pipe_mask, mst_pipe_mask);
+ *pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -2196,8 +2228,8 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
return DP_TP_CTL(encoder->port);
}
-i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -2208,6 +2240,25 @@ i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
return DP_TP_STATUS(encoder->port);
}
+void intel_ddi_clear_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ intel_de_write(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT);
+}
+
+void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT, 1))
+ drm_err(display->drm, "Timed out waiting for ACT sent\n");
+}
+
static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable)
@@ -2376,12 +2427,10 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
if (intel_encoder_is_combo(encoder)) {
enum phy phy = intel_encoder_to_phy(encoder);
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
intel_combo_phy_power_up_lanes(i915, phy, false,
crtc_state->lane_count,
- lane_reversal);
+ dig_port->lane_reversal);
}
}
@@ -2506,25 +2555,24 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- u32 val;
+ u32 val = 0;
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port));
- val &= ~XELPDP_PORT_WIDTH_MASK;
val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count));
- val &= ~XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK;
if (intel_dp_is_uhbr(crtc_state))
val |= XELPDP_PORT_BUF_PORT_DATA_40BIT;
else
val |= XELPDP_PORT_BUF_PORT_DATA_10BIT;
- if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
+ if (dig_port->lane_reversal)
val |= XELPDP_PORT_REVERSAL;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(i915, port), val);
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK,
+ val);
}
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
@@ -2546,6 +2594,7 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int ret;
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2583,10 +2632,6 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/*
* 6.b If DP v2.0/128b mode - Configure TRANS_DP2_CTL register settings.
- */
- intel_ddi_config_transcoder_dp2(encoder, crtc_state);
-
- /*
* 6.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
* Transport Select
*/
@@ -2644,6 +2689,14 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 6.o Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
+ /* 7.a 128b/132b SST. */
+ if (!is_mst && intel_dp_is_uhbr(crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, tu slots */
+ ret = drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, crtc_state->dp_m_n.tu);
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+ }
+
if (!is_mst)
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2657,6 +2710,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int ret;
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2721,9 +2775,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_ddi_enable_transcoder_clock(encoder, crtc_state);
- if (HAS_DP20(dev_priv))
- intel_ddi_config_transcoder_dp2(encoder, crtc_state);
-
/*
* 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
* Transport Select
@@ -2786,6 +2837,13 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
+ if (!is_mst && intel_dp_is_uhbr(crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, tu slots */
+ ret = drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, crtc_state->dp_m_n.tu);
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+ }
+
if (!is_mst)
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2862,9 +2920,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (HAS_DP20(dev_priv))
+ if (HAS_DP20(display))
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
@@ -2872,9 +2930,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (crtc_state->has_panel_replay)
intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2911,6 +2969,24 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
crtc_state, conn_state);
}
+/*
+ * Note: Also called from the ->pre_enable of the first active MST stream
+ * encoder on its primary encoder.
+ *
+ * When called from DP MST code:
+ *
+ * - conn_state will be NULL
+ *
+ * - encoder will be the primary encoder (i.e. mst->primary)
+ *
+ * - the main connector associated with this port won't be active or linked to a
+ * crtc
+ *
+ * - crtc_state will be the state of the first stream to be activated on this
+ * port, and it may not be the same stream that will be deactivated last, but
+ * each stream should have a state that is identical when it comes to the DP
+ * link parameteres
+ */
static void intel_ddi_pre_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -2920,19 +2996,6 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- /*
- * When called from DP MST code:
- * - conn_state will be NULL
- * - encoder will be the main encoder (ie. mst->primary)
- * - the main connector associated with this port
- * won't be active or linked to a crtc
- * - crtc_state will be the state of the first stream to
- * be activated on this port, and it may not be the same
- * stream that will be deactivated last, but each stream
- * should have a state that is identical when it comes to
- * the DP link parameteres
- */
-
drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -3088,6 +3151,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false);
+ intel_ddi_config_transcoder_dp2(old_crtc_state, false);
+
/*
* From TGL spec: "If single stream or multi-stream master transcoder:
* Configure Transcoder Clock select to direct no clock to the
@@ -3153,7 +3218,9 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *pipe_crtc;
+ bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI);
int i;
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3165,6 +3232,20 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(old_crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, clear */
+ drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, 0);
+
+ intel_ddi_clear_act_sent(encoder, old_crtc_state);
+
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder),
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
+
+ intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
+ drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
+ }
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3180,6 +3261,11 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
}
}
+/*
+ * Note: Also called from the ->post_disable of the last active MST stream
+ * encoder on its primary encoder. See also the comment for
+ * intel_ddi_pre_enable().
+ */
static void intel_ddi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -3210,6 +3296,11 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
old_conn_state);
}
+/*
+ * Note: Also called from the ->post_pll_disable of the last active MST stream
+ * encoder on its primary encoder. See also the comment for
+ * intel_ddi_pre_enable().
+ */
static void intel_ddi_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -3260,7 +3351,7 @@ static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
crtc_state);
}
-static void intel_enable_ddi_dp(struct intel_atomic_state *state,
+static void intel_ddi_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3282,18 +3373,8 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
-/* FIXME bad home for this function */
-i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- return DISPLAY_VER(i915) >= 14 ?
- MTL_CHICKEN_TRANS(cpu_transcoder) :
- CHICKEN_TRANS(cpu_transcoder);
-}
-
static i915_reg_t
-gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
- enum port port)
+gen9_chicken_trans_reg_by_port(struct intel_display *display, enum port port)
{
static const enum transcoder trans[] = {
[PORT_A] = TRANSCODER_EDP,
@@ -3303,19 +3384,20 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
[PORT_E] = TRANSCODER_A,
};
- drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) < 9);
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) < 9);
- if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
+ if (drm_WARN_ON(display->drm, port < PORT_A || port > PORT_E))
port = PORT_A;
- return CHICKEN_TRANS(trans[port]);
+ return CHICKEN_TRANS(display, trans[port]);
}
-static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
@@ -3346,7 +3428,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
* the bits affect a specific DDI port rather than
* a specific transcoder.
*/
- i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
+ i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port);
u32 val;
val = intel_de_read(dev_priv, reg);
@@ -3386,14 +3468,20 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
* is filled with lane count, already set in the crtc_state.
* The same is required to be filled in PORT_BUF_CTL for C10/20 Phy.
*/
- buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE;
+ buf_ctl = DDI_BUF_CTL_ENABLE;
+
+ if (dig_port->lane_reversal)
+ buf_ctl |= DDI_BUF_PORT_REVERSAL;
+ if (dig_port->ddi_a_4_lanes)
+ buf_ctl |= DDI_A_4_LANES;
+
if (DISPLAY_VER(dev_priv) >= 14) {
u8 lane_count = mtl_get_port_width(crtc_state->lane_count);
u32 port_buf = 0;
port_buf |= XELPDP_PORT_WIDTH(lane_count);
- if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
+ if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
@@ -3413,20 +3501,46 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_wait_ddi_buf_active(encoder);
}
-static void intel_enable_ddi(struct intel_atomic_state *state,
+static void intel_ddi_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *pipe_crtc;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ bool is_hdmi = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
int i;
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
+
+ intel_de_write(display, TRANS_DP2_VFREQHIGH(cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
+ intel_de_write(display, TRANS_DP2_VFREQLOW(cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
+ }
+
intel_ddi_enable_transcoder_func(encoder, crtc_state);
/* Enable/Disable DP2.0 SDP split config before transcoder */
intel_audio_sdp_split_update(crtc_state);
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_ddi_clear_act_sent(encoder, crtc_state);
+
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder), 0,
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+
+ intel_ddi_wait_for_act_sent(encoder, crtc_state);
+ drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
+ }
+
intel_enable_transcoder(crtc_state);
intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
@@ -3438,16 +3552,16 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_crtc_vblank_on(pipe_crtc_state);
}
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
+ if (is_hdmi)
+ intel_ddi_enable_hdmi(state, encoder, crtc_state, conn_state);
else
- intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
+ intel_ddi_enable_dp(state, encoder, crtc_state, conn_state);
intel_hdcp_enable(state, encoder, crtc_state, conn_state);
}
-static void intel_disable_ddi_dp(struct intel_atomic_state *state,
+static void intel_ddi_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -3468,7 +3582,7 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
false);
}
-static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
+static void intel_ddi_disable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -3483,7 +3597,7 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
connector->base.id, connector->name);
}
-static void intel_disable_ddi(struct intel_atomic_state *state,
+static void intel_ddi_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -3493,10 +3607,10 @@ static void intel_disable_ddi(struct intel_atomic_state *state,
intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- intel_disable_ddi_hdmi(state, encoder, old_crtc_state,
+ intel_ddi_disable_hdmi(state, encoder, old_crtc_state,
old_conn_state);
else
- intel_disable_ddi_dp(state, encoder, old_crtc_state,
+ intel_ddi_disable_dp(state, encoder, old_crtc_state,
old_conn_state);
}
@@ -3556,6 +3670,11 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
intel_update_active_dpll(state, pipe_crtc, encoder);
}
+/*
+ * Note: Also called from the ->pre_pll_enable of the first active MST stream
+ * encoder on its primary encoder. See also the comment for
+ * intel_ddi_pre_enable().
+ */
static void
intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -3599,9 +3718,9 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
u32 dp_tp_ctl;
@@ -3609,21 +3728,22 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
* TODO: To train with only a different voltage swing entry is not
* necessary disable and enable port
*/
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
if (dp_tp_ctl & DP_TP_CTL_ENABLE)
mtl_disable_ddi_buf(encoder, crtc_state);
/* 6.d Configure and enable DP_TP_CTL with link training pattern 1 selected */
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
} else {
dp_tp_ctl |= DP_TP_CTL_MODE_SST;
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
/* 6.f Enable D2D Link */
mtl_ddi_enable_d2d(encoder);
@@ -3636,11 +3756,11 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
/* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
intel_dp->DP |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
/* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
intel_wait_ddi_buf_active(encoder);
@@ -3675,7 +3795,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
}
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
} else {
dp_tp_ctl |= DP_TP_CTL_MODE_SST;
@@ -3868,29 +3989,141 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
crtc_state->sync_mode_slaves_mask);
}
+static void intel_ddi_read_func_ctl_dvi(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_HDMI);
+ if (DISPLAY_VER(display) >= 14)
+ crtc_state->lane_count =
+ ((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+ else
+ crtc_state->lane_count = 4;
+}
+
+static void intel_ddi_read_func_ctl_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ crtc_state->has_hdmi_sink = true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, crtc_state);
+
+ if (crtc_state->infoframes.enable)
+ crtc_state->has_infoframe = true;
+
+ if (ddi_func_ctl & TRANS_DDI_HDMI_SCRAMBLING)
+ crtc_state->hdmi_scrambling = true;
+ if (ddi_func_ctl & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
+ crtc_state->hdmi_high_tmds_clock_ratio = true;
+
+ intel_ddi_read_func_ctl_dvi(encoder, crtc_state, ddi_func_ctl);
+}
+
+static void intel_ddi_read_func_ctl_fdi(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+ crtc_state->enhanced_framing =
+ intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state)) &
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+}
+
+static void intel_ddi_read_func_ctl_dp_sst(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_DP);
+ crtc_state->lane_count =
+ ((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (DISPLAY_VER(display) >= 12 &&
+ (ddi_func_ctl & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B)
+ crtc_state->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, ddi_func_ctl);
+
+ intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n);
+ intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder, &crtc_state->dp_m2_n2);
+
+ crtc_state->enhanced_framing =
+ intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state)) &
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+
+ if (DISPLAY_VER(display) >= 11)
+ crtc_state->fec_enable =
+ intel_de_read(display,
+ dp_tp_ctl_reg(encoder, crtc_state)) & DP_TP_CTL_FEC_ENABLE;
+
+ if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
+ crtc_state->infoframes.enable |=
+ intel_lspcon_infoframes_enabled(encoder, crtc_state);
+ else
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, crtc_state);
+}
+
+static void intel_ddi_read_func_ctl_dp_mst(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_DP_MST);
+ crtc_state->lane_count =
+ ((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (DISPLAY_VER(display) >= 12)
+ crtc_state->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, ddi_func_ctl);
+
+ intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n);
+
+ if (DISPLAY_VER(display) >= 11)
+ crtc_state->fec_enable =
+ intel_de_read(display,
+ dp_tp_ctl_reg(encoder, crtc_state)) & DP_TP_CTL_FEC_ENABLE;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, crtc_state);
+}
+
static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- u32 temp, flags = 0;
+ u32 ddi_func_ctl, ddi_mode, flags = 0;
- temp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
- if (temp & TRANS_DDI_PHSYNC)
+ ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ if (ddi_func_ctl & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
- if (temp & TRANS_DDI_PVSYNC)
+ if (ddi_func_ctl & TRANS_DDI_PVSYNC)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->hw.adjusted_mode.flags |= flags;
- switch (temp & TRANS_DDI_BPC_MASK) {
+ switch (ddi_func_ctl & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
pipe_config->pipe_bpp = 18;
break;
@@ -3907,93 +4140,37 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
break;
}
- switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
- case TRANS_DDI_MODE_SELECT_HDMI:
- pipe_config->has_hdmi_sink = true;
-
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
-
- if (pipe_config->infoframes.enable)
- pipe_config->has_infoframe = true;
-
- if (temp & TRANS_DDI_HDMI_SCRAMBLING)
- pipe_config->hdmi_scrambling = true;
- if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
- pipe_config->hdmi_high_tmds_clock_ratio = true;
- fallthrough;
- case TRANS_DDI_MODE_SELECT_DVI:
- pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
- if (DISPLAY_VER(dev_priv) >= 14)
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
- else
- pipe_config->lane_count = 4;
- break;
- case TRANS_DDI_MODE_SELECT_DP_SST:
- if (encoder->type == INTEL_OUTPUT_EDP)
- pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
- else
- pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
-
- intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder,
- &pipe_config->dp_m_n);
- intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder,
- &pipe_config->dp_m2_n2);
-
- pipe_config->enhanced_framing =
- intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) &
- DP_TP_CTL_ENHANCED_FRAME_ENABLE;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- pipe_config->fec_enable =
- intel_de_read(dev_priv,
- dp_tp_ctl_reg(encoder, pipe_config)) & DP_TP_CTL_FEC_ENABLE;
+ ddi_mode = ddi_func_ctl & TRANS_DDI_MODE_SELECT_MASK;
+
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI) {
+ intel_ddi_read_func_ctl_hdmi(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DVI) {
+ intel_ddi_read_func_ctl_dvi(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && !HAS_DP20(display)) {
+ intel_ddi_read_func_ctl_fdi(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_SST) {
+ intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
+ intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
- pipe_config->infoframes.enable |=
- intel_lspcon_infoframes_enabled(encoder, pipe_config);
+ /*
+ * If this is true, we know we're being called from mst stream
+ * encoder's ->get_config().
+ */
+ if (intel_dp->is_mst)
+ intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
- break;
- case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
- if (!HAS_DP20(dev_priv)) {
- /* FDI */
- pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- pipe_config->enhanced_framing =
- intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) &
- DP_TP_CTL_ENHANCED_FRAME_ENABLE;
- break;
- }
- fallthrough; /* 128b/132b */
- case TRANS_DDI_MODE_SELECT_DP_MST:
- pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
-
- if (DISPLAY_VER(dev_priv) >= 12)
- pipe_config->mst_master_transcoder =
- REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
-
- intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder,
- &pipe_config->dp_m_n);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- pipe_config->fec_enable =
- intel_de_read(dev_priv,
- dp_tp_ctl_reg(encoder, pipe_config)) & DP_TP_CTL_FEC_ENABLE;
-
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
- break;
- default:
- break;
+ intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
}
}
+/*
+ * Note: Also called from the ->get_config of the MST stream encoders on their
+ * primary encoder, via the platform specific hooks here. See also the comment
+ * for intel_ddi_pre_enable().
+ */
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -4461,8 +4638,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.late_register = intel_ddi_encoder_late_register,
};
-static struct intel_connector *
-intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_connector *connector;
@@ -4470,7 +4646,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
connector = intel_connector_alloc();
if (!connector)
- return NULL;
+ return -ENOMEM;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
if (DISPLAY_VER(i915) >= 14)
@@ -4485,7 +4661,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
if (!intel_dp_init_connector(dig_port, connector)) {
kfree(connector);
- return NULL;
+ return -EINVAL;
}
if (dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -4501,7 +4677,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
}
}
- return connector;
+ return 0;
}
static int intel_hdmi_reset_link(struct intel_encoder *encoder,
@@ -4667,20 +4843,28 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
}
-static struct intel_connector *
-intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
+static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
{
struct intel_connector *connector;
enum port port = dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
- return NULL;
+ return -ENOMEM;
dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
- intel_hdmi_init_connector(dig_port, connector);
- return connector;
+ if (!intel_hdmi_init_connector(dig_port, connector)) {
+ /*
+ * HDMI connector init failures may just mean conflicting DDC
+ * pins or not having enough lanes. Handle them gracefully, but
+ * don't fail the entire DDI init.
+ */
+ dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG;
+ kfree(connector);
+ }
+
+ return 0;
}
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
@@ -4690,7 +4874,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
if (dig_port->base.port != PORT_A)
return false;
- if (dig_port->saved_port_bits & DDI_A_4_LANES)
+ if (dig_port->ddi_a_4_lanes)
return false;
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
@@ -4728,7 +4912,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
if (intel_ddi_a_force_4_lanes(dig_port)) {
drm_dbg_kms(&dev_priv->drm,
"Forcing DDI_A_4_LANES for port A\n");
- dig_port->saved_port_bits |= DDI_A_4_LANES;
+ dig_port->ddi_a_4_lanes = true;
max_lanes = 4;
}
@@ -4835,8 +5019,10 @@ static void intel_ddi_tc_encoder_suspend_complete(struct intel_encoder *encoder)
static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
{
- intel_dp_encoder_shutdown(encoder);
- intel_hdmi_encoder_shutdown(encoder);
+ if (intel_encoder_is_dp(encoder))
+ intel_dp_encoder_shutdown(encoder);
+ if (intel_encoder_is_hdmi(encoder))
+ intel_hdmi_encoder_shutdown(encoder);
}
static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder)
@@ -4907,6 +5093,7 @@ void intel_ddi_init(struct intel_display *display,
bool init_hdmi, init_dp;
enum port port;
enum phy phy;
+ u32 ddi_buf_ctl;
port = intel_bios_encoder_port(devdata);
if (port == PORT_NONE)
@@ -5030,10 +5217,10 @@ void intel_ddi_init(struct intel_display *display,
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
encoder->compute_config_late = intel_ddi_compute_config_late;
- encoder->enable = intel_enable_ddi;
+ encoder->enable = intel_ddi_enable;
encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
encoder->pre_enable = intel_ddi_pre_enable;
- encoder->disable = intel_disable_ddi;
+ encoder->disable = intel_ddi_disable;
encoder->post_pll_disable = intel_ddi_post_pll_disable;
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
@@ -5156,17 +5343,12 @@ void intel_ddi_init(struct intel_display *display,
else
encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
- if (DISPLAY_VER(dev_priv) >= 11)
- dig_port->saved_port_bits =
- intel_de_read(dev_priv, DDI_BUF_CTL(port))
- & DDI_BUF_PORT_REVERSAL;
- else
- dig_port->saved_port_bits =
- intel_de_read(dev_priv, DDI_BUF_CTL(port))
- & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+ ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+
+ dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) ||
+ ddi_buf_ctl & DDI_BUF_PORT_REVERSAL;
- if (intel_bios_encoder_lane_reversal(devdata))
- dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
+ dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
@@ -5229,7 +5411,7 @@ void intel_ddi_init(struct intel_display *display,
intel_infoframe_init(dig_port);
if (init_dp) {
- if (!intel_ddi_init_dp_connector(dig_port))
+ if (intel_ddi_init_dp_connector(dig_port))
goto err;
dig_port->hpd_pulse = intel_dp_hpd_pulse;
@@ -5243,7 +5425,7 @@ void intel_ddi_init(struct intel_display *display,
* but leave it just in case we have some really bad VBTs...
*/
if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(dig_port))
+ if (intel_ddi_init_hdmi_connector(dig_port))
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 640851d46b1b..2faadd1441e2 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -26,10 +26,12 @@ enum transcoder;
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder);
+
+void intel_ddi_clear_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -57,6 +59,8 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+void intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 4d21ce734343..9389b295036e 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1687,18 +1687,24 @@ dg2_get_snps_buf_trans(struct intel_encoder *encoder,
}
static const struct intel_ddi_buf_trans *
-mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
+mtl_get_c10_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000)
+ return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+mtl_get_c20_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state) && intel_dp_is_uhbr(crtc_state))
return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_encoder_is_c10phy(encoder)))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
- else if (!intel_encoder_is_c10phy(encoder))
- return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
else
- return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
+ return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
}
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
@@ -1706,7 +1712,10 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (DISPLAY_VER(i915) >= 14) {
- encoder->get_buf_trans = mtl_get_cx0_buf_trans;
+ if (intel_encoder_is_c10phy(encoder))
+ encoder->get_buf_trans = mtl_get_c10_buf_trans;
+ else
+ encoder->get_buf_trans = mtl_get_c20_buf_trans;
} else if (IS_DG2(i915)) {
encoder->get_buf_trans = dg2_get_snps_buf_trans;
} else if (IS_ALDERLAKE_P(i915)) {
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index bb51f974e9e2..b7399e9d11cc 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,14 +6,16 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
-#include "i915_drv.h"
-#include "i915_trace.h"
+#include "intel_display_conversion.h"
+#include "intel_display_core.h"
+#include "intel_dmc_wl.h"
#include "intel_dsb.h"
#include "intel_uncore.h"
+#include "intel_uncore_trace.h"
static inline struct intel_uncore *__to_uncore(struct intel_display *display)
{
- return &to_i915(display->drm)->uncore;
+ return to_intel_uncore(display->drm);
}
static inline u32
@@ -118,6 +120,16 @@ __intel_de_wait_for_register_nowl(struct intel_display *display,
}
static inline int
+__intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
+ i915_reg_t reg,
+ u32 mask, u32 value,
+ unsigned int fast_timeout_us)
+{
+ return __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, fast_timeout_us, 0, NULL);
+}
+
+static inline int
intel_de_wait(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout)
{
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 863927f429aa..4271da219b41 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -511,6 +511,7 @@ void vlv_wait_port_ready(struct intel_display *display,
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
@@ -554,8 +555,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
if (DISPLAY_VER(dev_priv) == 14)
set |= DP_FEC_BS_JITTER_WA;
- intel_de_rmw(dev_priv,
- hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
clear, set);
}
@@ -591,6 +591,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
@@ -628,7 +629,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
if ((val & TRANSCONF_ENABLE) == 0)
@@ -1744,10 +1745,9 @@ static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, crtc_state->cpu_transcoder),
HSW_FRAME_START_DELAY_MASK,
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
@@ -2371,7 +2371,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* GDG double wide on either pipe, otherwise pipe A only */
- return DISPLAY_VER(dev_priv) < 4 &&
+ return HAS_DOUBLE_WIDE(dev_priv) &&
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
@@ -3137,9 +3137,14 @@ bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
if (tmp & PIPE_MISC_YUV420_ENABLE) {
- /* We support 4:2:0 in full blend mode only */
- drm_WARN_ON(&dev_priv->drm,
- (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
+ /*
+ * We support 4:2:0 in full blend mode only.
+ * For xe3_lpd+ this is implied in YUV420 Enable bit.
+ * Ensure the same for prior platforms in YUV420 Mode bit.
+ */
+ if (DISPLAY_VER(dev_priv) < 30)
+ drm_WARN_ON(&dev_priv->drm,
+ (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
} else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
@@ -3207,7 +3212,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
- if (DISPLAY_VER(dev_priv) < 4)
+ if (HAS_DOUBLE_WIDE(dev_priv))
pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
@@ -3388,8 +3393,8 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- val |= PIPE_MISC_YUV420_ENABLE |
- PIPE_MISC_YUV420_MODE_FULL_BLEND;
+ val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE :
+ PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND;
if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
val |= PIPE_MISC_HDR_MODE_PRECISION;
@@ -3746,12 +3751,13 @@ static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes,
static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
u8 *primary_pipes, u8 *secondary_pipes)
{
+ struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
*primary_pipes = 0;
*secondary_pipes = 0;
- if (!HAS_ULTRAJOINER(i915))
+ if (!HAS_ULTRAJOINER(display))
return;
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
@@ -4111,6 +4117,7 @@ static void intel_joiner_get_config(struct intel_crtc_state *crtc_state)
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool active;
u32 tmp;
@@ -4187,7 +4194,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
} else {
@@ -4545,6 +4552,7 @@ static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -4581,12 +4589,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
+ ret = intel_atomic_setup_scalers(state, crtc);
if (ret)
return ret;
}
- if (HAS_IPS(dev_priv)) {
+ if (HAS_IPS(display)) {
ret = hsw_ips_compute_config(state, crtc);
if (ret)
return ret;
@@ -5208,7 +5216,7 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
- pipe_config_mismatch(p, fastset, crtc, name, "dp sdp");
+ pipe_config_mismatch(p, fastset, crtc, name, "dp vsc sdp");
drm_printf(p, "expected:\n");
drm_dp_vsc_sdp_log(p, a);
@@ -5217,27 +5225,18 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
}
static void
-pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915,
- bool fastset, const char *name,
+pipe_config_dp_as_sdp_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
const struct drm_dp_as_sdp *a,
const struct drm_dp_as_sdp *b)
{
- struct drm_printer p;
-
- if (fastset) {
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
-
- drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name);
- } else {
- p = drm_err_printer(&i915->drm, NULL);
+ pipe_config_mismatch(p, fastset, crtc, name, "dp as sdp");
- drm_printf(&p, "mismatch in %s dp sdp\n", name);
- }
-
- drm_printf(&p, "expected:\n");
- drm_dp_as_sdp_log(&p, a);
- drm_printf(&p, "found:\n");
- drm_dp_as_sdp_log(&p, b);
+ drm_printf(p, "expected:\n");
+ drm_dp_as_sdp_log(p, a);
+ drm_printf(p, "found:\n");
+ drm_dp_as_sdp_log(p, b);
}
/* Returns the length up to and including the last differing byte */
@@ -5260,26 +5259,13 @@ pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset,
const char *name,
const u8 *a, const u8 *b, size_t len)
{
- const char *loglevel;
-
- if (fastset) {
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- loglevel = KERN_DEBUG;
- } else {
- loglevel = KERN_ERR;
- }
-
pipe_config_mismatch(p, fastset, crtc, name, "buffer");
/* only dump up to the last difference */
len = memcmp_diff_len(a, b, len);
- print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE,
- 16, 0, a, len, false);
- print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE,
- 16, 0, b, len, false);
+ drm_print_hex_dump(p, "expected: ", a, len);
+ drm_print_hex_dump(p, "found: ", b, len);
}
static void
@@ -5322,6 +5308,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
{
+ struct intel_display *display = to_intel_display(current_config);
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_printer p;
@@ -5498,7 +5485,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \
if (!intel_compare_dp_as_sdp(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_dp_as_sdp_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
@@ -5562,7 +5549,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
+ if (HAS_DOUBLE_BUFFERED_M_N(display)) {
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
} else {
@@ -5743,7 +5730,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
- PIPE_CONF_CHECK_BOOL(dsc.dsc_split);
+ PIPE_CONF_CHECK_I(dsc.num_streams);
PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
PIPE_CONF_CHECK_BOOL(splitter.enable);
@@ -6797,6 +6784,7 @@ static int intel_atomic_check_config_and_link(struct intel_atomic_state *state)
int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *_state)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -6804,7 +6792,7 @@ int intel_atomic_check(struct drm_device *dev,
int ret, i;
bool any_ms = false;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return -ENODEV;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -7572,7 +7560,7 @@ static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state)
static void intel_atomic_cleanup_work(struct work_struct *work)
{
struct intel_atomic_state *state =
- container_of(work, struct intel_atomic_state, base.commit_work);
+ container_of(work, struct intel_atomic_state, cleanup_work);
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
@@ -7822,6 +7810,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.funcs.display->commit_modeset_enables(state);
+ intel_program_dpkgc_latency(state);
+
if (state->modeset)
intel_set_cdclk_post_plane_update(state);
@@ -7927,8 +7917,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* schedule point (cond_resched()) here anyway to keep latencies
* down.
*/
- INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
- queue_work(system_highpri_wq, &state->base.commit_work);
+ INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work);
+ queue_work(dev_priv->display.wq.cleanup, &state->cleanup_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -8166,7 +8156,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_lvds_init(dev_priv);
intel_crt_init(display);
- dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
+ dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
if (ilk_has_edp_a(dev_priv))
g4x_dp_init(dev_priv, DP_A, PORT_A);
@@ -8212,14 +8202,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
+ has_edp = intel_dp_is_port_edp(display, PORT_B);
has_port = intel_bios_is_port_present(display, PORT_B);
if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
+ has_edp = intel_dp_is_port_edp(display, PORT_C);
has_port = intel_bios_is_port_present(display, PORT_C);
if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
@@ -8308,11 +8298,12 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
static int max_dotclock(struct drm_i915_private *i915)
{
- int max_dotclock = i915->display.cdclk.max_dotclk_freq;
+ struct intel_display *display = &i915->display;
+ int max_dotclock = display->cdclk.max_dotclk_freq;
- if (HAS_ULTRAJOINER(i915))
+ if (HAS_ULTRAJOINER(display))
max_dotclock *= 4;
- else if (HAS_UNCOMPRESSED_JOINER(i915) || HAS_BIGJOINER(i915))
+ else if (HAS_UNCOMPRESSED_JOINER(display) || HAS_BIGJOINER(display))
max_dotclock *= 2;
return max_dotclock;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index caef04f655c5..49a246feb1ae 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -238,9 +238,6 @@ enum phy_fia {
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
for_each_if((__phys_mask) & BIT(__phy))
-#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-
#define for_each_intel_plane(dev, intel_plane) \
list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
new file mode 100644
index 000000000000..0578b68404da
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include "i915_drv.h"
+
+struct intel_display *__i915_to_display(struct drm_i915_private *i915)
+{
+ return &i915->display;
+}
+
+struct intel_display *__drm_to_display(struct drm_device *drm)
+{
+ return __i915_to_display(to_i915(drm));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
index ad8545c8055d..46c7208d42ba 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.h
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -8,14 +8,20 @@
#ifndef __INTEL_DISPLAY_CONVERSION__
#define __INTEL_DISPLAY_CONVERSION__
+struct drm_device;
+struct drm_i915_private;
+struct intel_display;
+
+struct intel_display *__i915_to_display(struct drm_i915_private *i915);
+struct intel_display *__drm_to_display(struct drm_device *drm);
/*
* Transitional macro to optionally convert struct drm_i915_private * to struct
* intel_display *, also accepting the latter.
*/
#define __to_intel_display(p) \
_Generic(p, \
- const struct drm_i915_private *: (&((const struct drm_i915_private *)(p))->display), \
- struct drm_i915_private *: (&((struct drm_i915_private *)(p))->display), \
+ const struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
+ struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
const struct intel_display *: (p), \
struct intel_display *: (p))
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 45b7c6900adc..554870d2494b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -453,7 +453,14 @@ struct intel_display {
} ips;
struct {
- bool display_irqs_enabled;
+ /*
+ * Most platforms treat the display irq block as an always-on
+ * power domain. vlv/chv can disable it at runtime and need
+ * special care to avoid writing any of the display block
+ * registers outside of the power domain. We defer setting up
+ * the display irqs in this case to the runtime pm.
+ */
+ bool vlv_display_irqs_enabled;
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
@@ -505,6 +512,11 @@ struct intel_display {
/* restore state for suspend/resume and display reset */
struct drm_atomic_state *modeset_state;
struct drm_modeset_acquire_ctx reset_ctx;
+ u32 saveDSPARB;
+ u32 saveSWF0[16];
+ u32 saveSWF1[16];
+ u32 saveSWF3[3];
+ u16 saveGCDGMBUS;
} restore;
struct {
@@ -542,6 +554,9 @@ struct intel_display {
/* unbound hipri wq for page flips/plane updates */
struct workqueue_struct *flip;
+
+ /* hipri wq for commit cleanups */
+ struct workqueue_struct *cleanup;
} wq;
/* Grouping using named structs. Keep sorted. */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 11aff485d8fa..f1d76484025a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -11,8 +11,10 @@
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
+#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i9xx_wm_regs.h"
#include "intel_alpm.h"
#include "intel_bo.h"
#include "intel_crtc.h"
@@ -730,11 +732,12 @@ static bool
intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
enum i915_power_well_id power_well_id)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t wakeref;
bool is_enabled;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- is_enabled = intel_display_power_well_is_enabled(i915,
+ is_enabled = intel_display_power_well_is_enabled(display,
power_well_id);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -1012,6 +1015,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
DP_DSC_YCbCr444)));
seq_printf(m, "DSC_Sink_BPP_Precision: %d\n",
drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd));
+ seq_printf(m, "DSC_Sink_Max_Slice_Count: %d\n",
+ drm_dp_dsc_sink_max_slice_count((connector->dp.dsc_dpcd), intel_dp_is_edp(intel_dp)));
seq_printf(m, "Force_DSC_Enable: %s\n",
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
@@ -1331,7 +1336,7 @@ static ssize_t i915_joiner_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int force_joined_pipes = 0;
int ret;
@@ -1349,7 +1354,7 @@ static ssize_t i915_joiner_write(struct file *file,
connector->force_joined_pipes = force_joined_pipes;
break;
case 4:
- if (HAS_ULTRAJOINER(i915)) {
+ if (HAS_ULTRAJOINER(display)) {
connector->force_joined_pipes = force_joined_pipes;
break;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index ec3ed29a83c9..88914a1f3f62 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -7,9 +7,10 @@
#include <linux/kernel.h>
#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include "intel_display_core.h"
#include "intel_display_debugfs_params.h"
-#include "i915_drv.h"
#include "intel_display_params.h"
/* int param */
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 5f98e1b2a401..68cb7f9b9ef3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -16,6 +16,7 @@
#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_step.h"
@@ -252,6 +253,7 @@ static const struct intel_display_device_info no_display = {};
static const struct platform_desc i830_desc = {
PLATFORM(i830),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
I830_DISPLAY,
@@ -270,6 +272,7 @@ static const struct platform_desc i845_desc = {
static const struct platform_desc i85x_desc = {
PLATFORM(i85x),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
I830_DISPLAY,
@@ -312,6 +315,7 @@ static const struct platform_desc i915g_desc = {
static const struct platform_desc i915gm_desc = {
PLATFORM(i915gm),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN3_DISPLAY,
I9XX_COLORS,
@@ -336,6 +340,7 @@ static const struct platform_desc i945g_desc = {
static const struct platform_desc i945gm_desc = {
PLATFORM(i915gm),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN3_DISPLAY,
I9XX_COLORS,
@@ -357,13 +362,21 @@ static const struct platform_desc g33_desc = {
},
};
-static const struct platform_desc pnv_desc = {
+static const struct intel_display_device_info pnv_display = {
+ GEN3_DISPLAY,
+ I9XX_COLORS,
+ .has_hotplug = 1,
+};
+
+static const struct platform_desc pnv_g_desc = {
PLATFORM(pineview),
- .info = &(const struct intel_display_device_info) {
- GEN3_DISPLAY,
- I9XX_COLORS,
- .has_hotplug = 1,
- },
+ .info = &pnv_display,
+};
+
+static const struct platform_desc pnv_m_desc = {
+ PLATFORM(pineview),
+ PLATFORM_GROUP(mobile),
+ .info = &pnv_display,
};
#define GEN4_DISPLAY \
@@ -390,6 +403,7 @@ static const struct platform_desc i965g_desc = {
static const struct platform_desc i965gm_desc = {
PLATFORM(i965gm),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN4_DISPLAY,
.has_overlay = 1,
@@ -413,6 +427,7 @@ static const struct platform_desc g45_desc = {
static const struct platform_desc gm45_desc = {
PLATFORM(gm45),
PLATFORM_GROUP(g4x),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN4_DISPLAY,
.supports_tv = 1,
@@ -443,6 +458,7 @@ static const struct platform_desc ilk_d_desc = {
static const struct platform_desc ilk_m_desc = {
PLATFORM(ironlake),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
ILK_DISPLAY,
@@ -450,38 +466,54 @@ static const struct platform_desc ilk_m_desc = {
},
};
-static const struct platform_desc snb_desc = {
+static const struct intel_display_device_info snb_display = {
+ .has_hotplug = 1,
+ I9XX_PIPE_OFFSETS,
+ I9XX_CURSOR_OFFSETS,
+ ILK_COLORS,
+
+ .__runtime_defaults.ip.ver = 6,
+ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
+ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
+};
+
+static const struct platform_desc snb_d_desc = {
PLATFORM(sandybridge),
- .info = &(const struct intel_display_device_info) {
- .has_hotplug = 1,
- I9XX_PIPE_OFFSETS,
- I9XX_CURSOR_OFFSETS,
- ILK_COLORS,
+ .info = &snb_display,
+};
- .__runtime_defaults.ip.ver = 6,
- .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
- .__runtime_defaults.cpu_transcoder_mask =
- BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
- .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
- .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
- },
+static const struct platform_desc snb_m_desc = {
+ PLATFORM(sandybridge),
+ PLATFORM_GROUP(mobile),
+ .info = &snb_display,
};
-static const struct platform_desc ivb_desc = {
+static const struct intel_display_device_info ivb_display = {
+ .has_hotplug = 1,
+ IVB_PIPE_OFFSETS,
+ IVB_CURSOR_OFFSETS,
+ IVB_COLORS,
+
+ .__runtime_defaults.ip.ver = 7,
+ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
+ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
+};
+
+static const struct platform_desc ivb_d_desc = {
PLATFORM(ivybridge),
- .info = &(const struct intel_display_device_info) {
- .has_hotplug = 1,
- IVB_PIPE_OFFSETS,
- IVB_CURSOR_OFFSETS,
- IVB_COLORS,
+ .info = &ivb_display,
+};
- .__runtime_defaults.ip.ver = 7,
- .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .__runtime_defaults.cpu_transcoder_mask =
- BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
- .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
- .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
- },
+static const struct platform_desc ivb_m_desc = {
+ PLATFORM(ivybridge),
+ PLATFORM_GROUP(mobile),
+ .info = &ivb_display,
};
static const struct platform_desc vlv_desc = {
@@ -1011,6 +1043,7 @@ static const enum intel_step dg1_steppings[] = {
static const struct platform_desc dg1_desc = {
PLATFORM(dg1),
+ PLATFORM_GROUP(dgfx),
.info = &(const struct intel_display_device_info) {
XE_D_DISPLAY,
@@ -1238,6 +1271,7 @@ static const enum intel_step dg2_g12_steppings[] = {
static const struct platform_desc dg2_desc = {
PLATFORM(dg2),
+ PLATFORM_GROUP(dgfx),
.subplatforms = (const struct subplatform_desc[]) {
{
SUBPLATFORM(dg2, g10),
@@ -1338,6 +1372,7 @@ static const struct platform_desc lnl_desc = {
static const struct platform_desc bmg_desc = {
PLATFORM(battlemage),
+ PLATFORM_GROUP(dgfx),
};
static const struct platform_desc ptl_desc = {
@@ -1381,11 +1416,14 @@ static const struct {
INTEL_I965GM_IDS(INTEL_DISPLAY_DEVICE, &i965gm_desc),
INTEL_GM45_IDS(INTEL_DISPLAY_DEVICE, &gm45_desc),
INTEL_G45_IDS(INTEL_DISPLAY_DEVICE, &g45_desc),
- INTEL_PNV_IDS(INTEL_DISPLAY_DEVICE, &pnv_desc),
+ INTEL_PNV_G_IDS(INTEL_DISPLAY_DEVICE, &pnv_g_desc),
+ INTEL_PNV_M_IDS(INTEL_DISPLAY_DEVICE, &pnv_m_desc),
INTEL_ILK_D_IDS(INTEL_DISPLAY_DEVICE, &ilk_d_desc),
INTEL_ILK_M_IDS(INTEL_DISPLAY_DEVICE, &ilk_m_desc),
- INTEL_SNB_IDS(INTEL_DISPLAY_DEVICE, &snb_desc),
- INTEL_IVB_IDS(INTEL_DISPLAY_DEVICE, &ivb_desc),
+ INTEL_SNB_D_IDS(INTEL_DISPLAY_DEVICE, &snb_d_desc),
+ INTEL_SNB_M_IDS(INTEL_DISPLAY_DEVICE, &snb_m_desc),
+ INTEL_IVB_D_IDS(INTEL_DISPLAY_DEVICE, &ivb_d_desc),
+ INTEL_IVB_M_IDS(INTEL_DISPLAY_DEVICE, &ivb_m_desc),
INTEL_HSW_IDS(INTEL_DISPLAY_DEVICE, &hsw_desc),
INTEL_VLV_IDS(INTEL_DISPLAY_DEVICE, &vlv_desc),
INTEL_BDW_IDS(INTEL_DISPLAY_DEVICE, &bdw_desc),
@@ -1429,9 +1467,9 @@ static const struct {
};
static const struct intel_display_device_info *
-probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *ip_ver)
+probe_gmdid_display(struct intel_display *display, struct intel_display_ip_ver *ip_ver)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
struct intel_display_ip_ver gmd_id;
void __iomem *addr;
u32 val;
@@ -1439,7 +1477,8 @@ probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *
addr = pci_iomap_range(pdev, 0, i915_mmio_reg_offset(GMD_ID_DISPLAY), sizeof(u32));
if (!addr) {
- drm_err(&i915->drm, "Cannot map MMIO BAR to read display GMD_ID\n");
+ drm_err(display->drm,
+ "Cannot map MMIO BAR to read display GMD_ID\n");
return NULL;
}
@@ -1447,7 +1486,7 @@ probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *
pci_iounmap(pdev, addr);
if (val == 0) {
- drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+ drm_dbg_kms(display->drm, "Device doesn't have display\n");
return NULL;
}
@@ -1463,7 +1502,8 @@ probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *
}
}
- drm_err(&i915->drm, "Unrecognized display IP version %d.%02d; disabling display.\n",
+ drm_err(display->drm,
+ "Unrecognized display IP version %d.%02d; disabling display.\n",
gmd_id.ver, gmd_id.rel);
return NULL;
}
@@ -1564,10 +1604,9 @@ static void display_platforms_or(struct intel_display_platforms *dst,
bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits());
}
-void intel_display_device_probe(struct drm_i915_private *i915)
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
{
- struct intel_display *display = &i915->display;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct intel_display *display = to_intel_display(pdev);
const struct intel_display_device_info *info;
struct intel_display_ip_ver ip_ver = {};
const struct platform_desc *desc;
@@ -1575,55 +1614,56 @@ void intel_display_device_probe(struct drm_i915_private *i915)
enum intel_step step;
/* Add drm device backpointer as early as possible. */
- i915->display.drm = &i915->drm;
+ display->drm = pci_get_drvdata(pdev);
- intel_display_params_copy(&i915->display.params);
+ intel_display_params_copy(&display->params);
if (has_no_display(pdev)) {
- drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+ drm_dbg_kms(display->drm, "Device doesn't have display\n");
goto no_display;
}
desc = find_platform_desc(pdev);
if (!desc) {
- drm_dbg_kms(&i915->drm, "Unknown device ID %04x; disabling display.\n",
+ drm_dbg_kms(display->drm,
+ "Unknown device ID %04x; disabling display.\n",
pdev->device);
goto no_display;
}
info = desc->info;
if (!info)
- info = probe_gmdid_display(i915, &ip_ver);
+ info = probe_gmdid_display(display, &ip_ver);
if (!info)
goto no_display;
- DISPLAY_INFO(i915) = info;
+ DISPLAY_INFO(display) = info;
- memcpy(DISPLAY_RUNTIME_INFO(i915),
- &DISPLAY_INFO(i915)->__runtime_defaults,
- sizeof(*DISPLAY_RUNTIME_INFO(i915)));
+ memcpy(DISPLAY_RUNTIME_INFO(display),
+ &DISPLAY_INFO(display)->__runtime_defaults,
+ sizeof(*DISPLAY_RUNTIME_INFO(display)));
- drm_WARN_ON(&i915->drm, !desc->name ||
+ drm_WARN_ON(display->drm, !desc->name ||
!display_platforms_weight(&desc->platforms));
display->platform = desc->platforms;
subdesc = find_subplatform_desc(pdev, desc);
if (subdesc) {
- drm_WARN_ON(&i915->drm, !subdesc->name ||
+ drm_WARN_ON(display->drm, !subdesc->name ||
!display_platforms_weight(&subdesc->platforms));
display_platforms_or(&display->platform, &subdesc->platforms);
/* Ensure platform and subplatform are distinct */
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
display_platforms_weight(&display->platform) !=
display_platforms_weight(&desc->platforms) +
display_platforms_weight(&subdesc->platforms));
}
if (ip_ver.ver || ip_ver.rel || ip_ver.step) {
- DISPLAY_RUNTIME_INFO(i915)->ip = ip_ver;
+ DISPLAY_RUNTIME_INFO(display)->ip = ip_ver;
step = STEP_A0 + ip_ver.step;
if (step > STEP_FUTURE) {
drm_dbg_kms(display->drm, "Using future display stepping\n");
@@ -1634,29 +1674,32 @@ void intel_display_device_probe(struct drm_i915_private *i915)
subdesc ? &subdesc->step_info : NULL);
}
- DISPLAY_RUNTIME_INFO(i915)->step = step;
+ DISPLAY_RUNTIME_INFO(display)->step = step;
- drm_info(&i915->drm, "Found %s%s%s (device ID %04x) display version %u.%02u stepping %s\n",
+ drm_info(display->drm, "Found %s%s%s (device ID %04x) %s display version %u.%02u stepping %s\n",
desc->name, subdesc ? "/" : "", subdesc ? subdesc->name : "",
- pdev->device, DISPLAY_RUNTIME_INFO(i915)->ip.ver,
- DISPLAY_RUNTIME_INFO(i915)->ip.rel,
+ pdev->device, display->platform.dgfx ? "discrete" : "integrated",
+ DISPLAY_RUNTIME_INFO(display)->ip.ver,
+ DISPLAY_RUNTIME_INFO(display)->ip.rel,
step != STEP_NONE ? intel_step_name(step) : "N/A");
- return;
+ return display;
no_display:
- DISPLAY_INFO(i915) = &no_display;
+ DISPLAY_INFO(display) = &no_display;
+
+ return display;
}
-void intel_display_device_remove(struct drm_i915_private *i915)
+void intel_display_device_remove(struct intel_display *display)
{
- intel_display_params_free(&i915->display.params);
+ intel_display_params_free(&display->params);
}
-static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915)
+static void __intel_display_device_info_runtime_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(display);
enum pipe pipe;
BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->pipe_mask) < I915_MAX_PIPES);
@@ -1664,35 +1707,35 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->port_mask) < I915_MAX_PORTS);
/* This covers both ULT and ULX */
- if (IS_HASWELL_ULT(i915) || IS_BROADWELL_ULT(i915))
+ if (display->platform.haswell_ult || display->platform.broadwell_ult)
display_runtime->port_mask &= ~BIT(PORT_D);
- if (IS_ICL_WITH_PORT_F(i915))
+ if (display->platform.icelake_port_f)
display_runtime->port_mask |= BIT(PORT_F);
/* Wa_14011765242: adl-s A0,A1 */
- if (IS_ALDERLAKE_S(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_A2))
- for_each_pipe(i915, pipe)
+ if (display->platform.alderlake_s && IS_DISPLAY_STEP(display, STEP_A0, STEP_A2))
+ for_each_pipe(display, pipe)
display_runtime->num_scalers[pipe] = 0;
- else if (DISPLAY_VER(i915) >= 11) {
- for_each_pipe(i915, pipe)
+ else if (DISPLAY_VER(display) >= 11) {
+ for_each_pipe(display, pipe)
display_runtime->num_scalers[pipe] = 2;
- } else if (DISPLAY_VER(i915) >= 9) {
+ } else if (DISPLAY_VER(display) >= 9) {
display_runtime->num_scalers[PIPE_A] = 2;
display_runtime->num_scalers[PIPE_B] = 2;
display_runtime->num_scalers[PIPE_C] = 1;
}
- if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915))
- for_each_pipe(i915, pipe)
+ if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 4;
- else if (DISPLAY_VER(i915) >= 11)
- for_each_pipe(i915, pipe)
+ else if (DISPLAY_VER(display) >= 11)
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 6;
- else if (DISPLAY_VER(i915) == 10)
- for_each_pipe(i915, pipe)
+ else if (DISPLAY_VER(display) == 10)
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 3;
- else if (IS_BROXTON(i915)) {
+ else if (display->platform.broxton) {
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
@@ -1705,23 +1748,23 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
display_runtime->num_sprites[PIPE_A] = 2;
display_runtime->num_sprites[PIPE_B] = 2;
display_runtime->num_sprites[PIPE_C] = 1;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- for_each_pipe(i915, pipe)
+ } else if (display->platform.valleyview || display->platform.cherryview) {
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 2;
- } else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) {
- for_each_pipe(i915, pipe)
+ } else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) {
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 1;
}
- if ((IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) &&
- !(intel_de_read(i915, GU_CNTL_PROTECTED) & DEPRESENT)) {
- drm_info(&i915->drm, "Display not present, disabling\n");
+ if ((display->platform.dgfx || DISPLAY_VER(display) >= 14) &&
+ !(intel_de_read(display, GU_CNTL_PROTECTED) & DEPRESENT)) {
+ drm_info(display->drm, "Display not present, disabling\n");
goto display_fused_off;
}
- if (IS_DISPLAY_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
- u32 fuse_strap = intel_de_read(i915, FUSE_STRAP);
- u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP);
+ if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 sfuse_strap = intel_de_read(display, SFUSE_STRAP);
/*
* SFUSE_STRAP is supposed to have a bit signalling the display
@@ -1736,16 +1779,16 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(HAS_PCH_CPT(i915) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
- drm_info(&i915->drm,
+ drm_info(display->drm,
"Display fused off, disabling\n");
goto display_fused_off;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
- drm_info(&i915->drm, "PipeC fused off\n");
+ drm_info(display->drm, "PipeC fused off\n");
display_runtime->pipe_mask &= ~BIT(PIPE_C);
display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- } else if (DISPLAY_VER(i915) >= 9) {
- u32 dfsm = intel_de_read(i915, SKL_DFSM);
+ } else if (DISPLAY_VER(display) >= 9) {
+ u32 dfsm = intel_de_read(display, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
display_runtime->pipe_mask &= ~BIT(PIPE_A);
@@ -1763,7 +1806,7 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
display_runtime->fbc_mask &= ~BIT(INTEL_FBC_C);
}
- if (DISPLAY_VER(i915) >= 12 &&
+ if (DISPLAY_VER(display) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
display_runtime->pipe_mask &= ~BIT(PIPE_D);
display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -1776,15 +1819,15 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
display_runtime->has_hdcp = 0;
- if (IS_DG2(i915) || DISPLAY_VER(i915) < 13) {
+ if (display->platform.dg2 || DISPLAY_VER(display) < 13) {
if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
display_runtime->fbc_mask = 0;
}
- if (DISPLAY_VER(i915) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ if (DISPLAY_VER(display) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
display_runtime->has_dmc = 0;
- if (IS_DISPLAY_VER(i915, 10, 12) &&
+ if (IS_DISPLAY_VER(display, 10, 12) &&
(dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
display_runtime->has_dsc = 0;
@@ -1793,8 +1836,8 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
display_runtime->has_dbuf_overlap_detection = false;
}
- if (DISPLAY_VER(i915) >= 20) {
- u32 cap = intel_de_read(i915, XE2LPD_DE_CAP);
+ if (DISPLAY_VER(display) >= 20) {
+ u32 cap = intel_de_read(display, XE2LPD_DE_CAP);
if (REG_FIELD_GET(XE2LPD_DE_CAP_DSC_MASK, cap) ==
XE2LPD_DE_CAP_DSC_REMOVED)
@@ -1802,18 +1845,19 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
if (REG_FIELD_GET(XE2LPD_DE_CAP_SCALER_MASK, cap) ==
XE2LPD_DE_CAP_SCALER_SINGLE) {
- for_each_pipe(i915, pipe)
+ for_each_pipe(display, pipe)
if (display_runtime->num_scalers[pipe])
display_runtime->num_scalers[pipe] = 1;
}
}
- if (DISPLAY_VER(i915) >= 30)
+ if (DISPLAY_VER(display) >= 30)
display_runtime->edp_typec_support =
intel_de_read(display, PICA_PHY_CONFIG_CONTROL) & EDP_ON_TYPEC;
display_runtime->rawclk_freq = intel_read_rawclk(display);
- drm_dbg_kms(&i915->drm, "rawclk rate: %d kHz\n", display_runtime->rawclk_freq);
+ drm_dbg_kms(display->drm, "rawclk rate: %d kHz\n",
+ display_runtime->rawclk_freq);
return;
@@ -1821,21 +1865,21 @@ display_fused_off:
memset(display_runtime, 0, sizeof(*display_runtime));
}
-void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
+void intel_display_device_info_runtime_init(struct intel_display *display)
{
- if (HAS_DISPLAY(i915))
- __intel_display_device_info_runtime_init(i915);
+ if (HAS_DISPLAY(display))
+ __intel_display_device_info_runtime_init(display);
/* Display may have been disabled by runtime init */
- if (!HAS_DISPLAY(i915)) {
- i915->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
- i915->display.info.__device_info = &no_display;
+ if (!HAS_DISPLAY(display)) {
+ display->drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ display->info.__device_info = &no_display;
}
/* Disable nuclear pageflip by default on pre-g4x */
- if (!i915->display.params.nuclear_pageflip &&
- DISPLAY_VER(i915) < 5 && !IS_G4X(i915))
- i915->drm.driver_features &= ~DRIVER_ATOMIC;
+ if (!display->params.nuclear_pageflip &&
+ DISPLAY_VER(display) < 5 && !display->platform.g4x)
+ display->drm->driver_features &= ~DRIVER_ATOMIC;
}
void intel_display_device_info_print(const struct intel_display_device_info *info,
@@ -1872,10 +1916,8 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
* Disabling display means taking over the display hardware, putting it to
* sleep, and preventing connectors from being connected via any means.
*/
-bool intel_display_device_enabled(struct drm_i915_private *i915)
+bool intel_display_device_enabled(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
/* Only valid when HAS_DISPLAY() is true */
drm_WARN_ON(display->drm, !HAS_DISPLAY(display));
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 43144a037f9f..9a333d9e6601 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -12,8 +12,9 @@
#include "intel_display_conversion.h"
#include "intel_display_limits.h"
-struct drm_i915_private;
struct drm_printer;
+struct intel_display;
+struct pci_dev;
/*
* Display platforms and subplatforms. Keep platforms in display version based
@@ -21,6 +22,10 @@ struct drm_printer;
* platform.
*/
#define INTEL_DISPLAY_PLATFORMS(func) \
+ /* Platform group aliases */ \
+ func(g4x) /* g45 and gm45 */ \
+ func(mobile) /* mobile platforms */ \
+ func(dgfx) /* discrete graphics */ \
/* Display ver 2 */ \
func(i830) \
func(i845g) \
@@ -38,7 +43,6 @@ struct drm_printer;
func(i965gm) \
func(g45) \
func(gm45) \
- func(g4x) /* group alias for g45 and gm45 */ \
/* Display ver 5 */ \
func(ironlake) \
/* Display ver 6 */ \
@@ -136,61 +140,64 @@ struct intel_display_platforms {
func(overlay_needs_physical); \
func(supports_tv);
-#define HAS_4TILE(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
-#define HAS_BIGJOINER(i915) (DISPLAY_VER(i915) >= 11 && HAS_DSC(i915))
-#define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl)
-#define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash)
-#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && IS_DISPLAY_VER(i915, 7, 13))
-#define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915))
-#define HAS_DBUF_OVERLAP_DETECTION(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dbuf_overlap_detection)
-#define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi)
-#define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0)
-#define HAS_DMC(i915) (DISPLAY_RUNTIME_INFO(i915)->has_dmc)
-#define HAS_DOUBLE_BUFFERED_M_N(i915) (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915))
-#define HAS_DP_MST(i915) (DISPLAY_INFO(i915)->has_dp_mst)
-#define HAS_DP20(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
-#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
-#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
-#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
-#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
-#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
-#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
-#define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4)
-#define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 10 || IS_KABYLAKE(i915))
-#define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch)
-#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-#define HAS_IPC(i915) (DISPLAY_INFO(i915)->has_ipc)
-#define HAS_IPS(i915) (IS_HASWELL_ULT(i915) || IS_BROADWELL(i915))
-#define HAS_LRR(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_LSPCON(i915) (IS_DISPLAY_VER(i915, 9, 10))
-#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_OVERLAY(i915) (DISPLAY_INFO(i915)->has_overlay)
-#define HAS_PSR(i915) (DISPLAY_INFO(i915)->has_psr)
-#define HAS_PSR_HW_TRACKING(i915) (DISPLAY_INFO(i915)->has_psr_hw_tracking)
-#define HAS_PSR2_SEL_FETCH(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915) && !IS_GEMINILAKE(i915))
-#define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \
- BIT(trans)) != 0)
-#define HAS_UNCOMPRESSED_JOINER(i915) (DISPLAY_VER(i915) >= 13)
-#define HAS_ULTRAJOINER(i915) ((DISPLAY_VER(i915) >= 20 || \
- (IS_DGFX(i915) && DISPLAY_VER(i915) == 14)) && \
- HAS_DSC(i915))
-#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
-#define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13)
-#define HAS_CMRR(i915) (DISPLAY_VER(i915) >= 20)
-#define INTEL_NUM_PIPES(i915) (hweight8(DISPLAY_RUNTIME_INFO(i915)->pipe_mask))
-#define I915_HAS_HOTPLUG(i915) (DISPLAY_INFO(i915)->has_hotplug)
-#define OVERLAY_NEEDS_PHYSICAL(i915) (DISPLAY_INFO(i915)->overlay_needs_physical)
-#define SUPPORTS_TV(i915) (DISPLAY_INFO(i915)->supports_tv)
+#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
+#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
+#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
+#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
+#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
+#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
+#define HAS_DBUF_OVERLAP_DETECTION(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dbuf_overlap_detection)
+#define HAS_DDI(__display) (DISPLAY_INFO(__display)->has_ddi)
+#define HAS_DISPLAY(__display) (DISPLAY_RUNTIME_INFO(__display)->pipe_mask != 0)
+#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
+#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
+#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
+#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
+#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
+#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
+#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
+#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
+#define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0)
+#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
+#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
+#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
+#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
+#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
+#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
+#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
+#define HAS_LRR(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_LSPCON(__display) (IS_DISPLAY_VER(__display, 9, 10))
+#define HAS_MBUS_JOINING(__display) ((__display)->platform.alderlake_p || DISPLAY_VER(__display) >= 14)
+#define HAS_MSO(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_OVERLAY(__display) (DISPLAY_INFO(__display)->has_overlay)
+#define HAS_PSR(__display) (DISPLAY_INFO(__display)->has_psr)
+#define HAS_PSR_HW_TRACKING(__display) (DISPLAY_INFO(__display)->has_psr_hw_tracking)
+#define HAS_PSR2_SEL_FETCH(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_SAGV(__display) (DISPLAY_VER(__display) >= 9 && \
+ !(__display)->platform.broxton && !(__display)->platform.geminilake)
+#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
+ BIT(trans)) != 0)
+#define HAS_UNCOMPRESSED_JOINER(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_ULTRAJOINER(__display) ((DISPLAY_VER(__display) >= 20 || \
+ ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
+ HAS_DSC(__display))
+#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
+#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
+#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
+#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
+#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
+#define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv)
/* Check that device has a display IP version within the specific range. */
-#define IS_DISPLAY_VERx100(__i915, from, until) ( \
+#define IS_DISPLAY_VERx100(__display, from, until) ( \
BUILD_BUG_ON_ZERO((from) < 200) + \
- (DISPLAY_VERx100(__i915) >= (from) && \
- DISPLAY_VERx100(__i915) <= (until)))
+ (DISPLAY_VERx100(__display) >= (from) && \
+ DISPLAY_VERx100(__display) <= (until)))
/*
* Check if a device has a specific IP version as well as a stepping within the
@@ -201,30 +208,30 @@ struct intel_display_platforms {
* hardware fix is present and the software workaround is no longer necessary.
* E.g.,
*
- * IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_B2)
- * IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_C0, STEP_FOREVER)
+ * IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B2)
+ * IS_DISPLAY_VERx100_STEP(display, 1400, STEP_C0, STEP_FOREVER)
*
* "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
* stepping bound for the specified IP version.
*/
-#define IS_DISPLAY_VERx100_STEP(__i915, ipver, from, until) \
- (IS_DISPLAY_VERx100((__i915), (ipver), (ipver)) && \
- IS_DISPLAY_STEP((__i915), (from), (until)))
+#define IS_DISPLAY_VERx100_STEP(__display, ipver, from, until) \
+ (IS_DISPLAY_VERx100((__display), (ipver), (ipver)) && \
+ IS_DISPLAY_STEP((__display), (from), (until)))
-#define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info)
-#define DISPLAY_RUNTIME_INFO(i915) (&__to_intel_display(i915)->info.__runtime_info)
+#define DISPLAY_INFO(__display) (__to_intel_display(__display)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(__display) (&__to_intel_display(__display)->info.__runtime_info)
-#define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver)
-#define DISPLAY_VERx100(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver * 100 + \
- DISPLAY_RUNTIME_INFO(i915)->ip.rel)
-#define IS_DISPLAY_VER(i915, from, until) \
- (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
+#define DISPLAY_VER(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver)
+#define DISPLAY_VERx100(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver * 100 + \
+ DISPLAY_RUNTIME_INFO(__display)->ip.rel)
+#define IS_DISPLAY_VER(__display, from, until) \
+ (DISPLAY_VER(__display) >= (from) && DISPLAY_VER(__display) <= (until))
-#define INTEL_DISPLAY_STEP(__i915) (DISPLAY_RUNTIME_INFO(__i915)->step)
+#define INTEL_DISPLAY_STEP(__display) (DISPLAY_RUNTIME_INFO(__display)->step)
-#define IS_DISPLAY_STEP(__i915, since, until) \
- (drm_WARN_ON(__to_intel_display(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
- INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
+#define IS_DISPLAY_STEP(__display, since, until) \
+ (drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
+ INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until))
struct intel_display_runtime_info {
struct intel_display_ip_ver {
@@ -283,10 +290,10 @@ struct intel_display_device_info {
} color;
};
-bool intel_display_device_enabled(struct drm_i915_private *i915);
-void intel_display_device_probe(struct drm_i915_private *i915);
-void intel_display_device_remove(struct drm_i915_private *i915);
-void intel_display_device_info_runtime_init(struct drm_i915_private *i915);
+bool intel_display_device_enabled(struct intel_display *display);
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev);
+void intel_display_device_remove(struct intel_display *display);
+void intel_display_device_info_runtime_init(struct intel_display *display);
void intel_display_device_info_print(const struct intel_display_device_info *info,
const struct intel_display_runtime_info *runtime,
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 56b78cf6b854..1aa0b298c278 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -80,12 +80,12 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
return false;
}
-void intel_display_driver_init_hw(struct drm_i915_private *i915)
+void intel_display_driver_init_hw(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
@@ -112,12 +112,12 @@ static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
};
-static void intel_mode_config_init(struct drm_i915_private *i915)
+static void intel_mode_config_init(struct intel_display *display)
{
- struct drm_mode_config *mode_config = &i915->drm.mode_config;
+ struct drm_mode_config *mode_config = &display->drm->mode_config;
- drm_mode_config_init(&i915->drm);
- INIT_LIST_HEAD(&i915->display.global.obj_list);
+ drm_mode_config_init(display->drm);
+ INIT_LIST_HEAD(&display->global.obj_list);
mode_config->min_width = 0;
mode_config->min_height = 0;
@@ -128,19 +128,19 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
mode_config->helper_private = &intel_mode_config_funcs;
- mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
+ mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
/*
* Maximum framebuffer dimensions, chosen to match
* the maximum render engine surface size on gen4+.
*/
- if (DISPLAY_VER(i915) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
mode_config->max_width = 16384;
mode_config->max_height = 16384;
- } else if (DISPLAY_VER(i915) >= 4) {
+ } else if (DISPLAY_VER(display) >= 4) {
mode_config->max_width = 8192;
mode_config->max_height = 8192;
- } else if (DISPLAY_VER(i915) == 3) {
+ } else if (DISPLAY_VER(display) == 3) {
mode_config->max_width = 4096;
mode_config->max_height = 4096;
} else {
@@ -148,11 +148,11 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->max_height = 2048;
}
- if (IS_I845G(i915) || IS_I865G(i915)) {
- mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
+ if (display->platform.i845g || display->platform.i865g) {
+ mode_config->cursor_width = display->platform.i845g ? 64 : 512;
mode_config->cursor_height = 1023;
- } else if (IS_I830(i915) || IS_I85X(i915) ||
- IS_I915G(i915) || IS_I915GM(i915)) {
+ } else if (display->platform.i830 || display->platform.i85x ||
+ display->platform.i915g || display->platform.i915gm) {
mode_config->cursor_width = 64;
mode_config->cursor_height = 64;
} else {
@@ -161,18 +161,17 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
}
}
-static void intel_mode_config_cleanup(struct drm_i915_private *i915)
+static void intel_mode_config_cleanup(struct intel_display *display)
{
- intel_atomic_global_obj_cleanup(i915);
- drm_mode_config_cleanup(&i915->drm);
+ intel_atomic_global_obj_cleanup(display);
+ drm_mode_config_cleanup(display->drm);
}
-static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+static void intel_plane_possible_crtcs_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc = intel_crtc_for_pipe(display,
plane->pipe);
@@ -180,41 +179,43 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
}
}
-void intel_display_driver_early_probe(struct drm_i915_private *i915)
+void intel_display_driver_early_probe(struct intel_display *display)
{
- if (!HAS_DISPLAY(i915))
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (!HAS_DISPLAY(display))
return;
- spin_lock_init(&i915->display.fb_tracking.lock);
- mutex_init(&i915->display.backlight.lock);
- mutex_init(&i915->display.audio.mutex);
- mutex_init(&i915->display.wm.wm_mutex);
- mutex_init(&i915->display.pps.mutex);
- mutex_init(&i915->display.hdcp.hdcp_mutex);
+ spin_lock_init(&display->fb_tracking.lock);
+ mutex_init(&display->backlight.lock);
+ mutex_init(&display->audio.mutex);
+ mutex_init(&display->wm.wm_mutex);
+ mutex_init(&display->pps.mutex);
+ mutex_init(&display->hdcp.hdcp_mutex);
intel_display_irq_init(i915);
intel_dkl_phy_init(i915);
- intel_color_init_hooks(&i915->display);
- intel_init_cdclk_hooks(&i915->display);
+ intel_color_init_hooks(display);
+ intel_init_cdclk_hooks(display);
intel_audio_hooks_init(i915);
intel_dpll_init_clock_hook(i915);
intel_init_display_hooks(i915);
intel_fdi_init_hook(i915);
- intel_dmc_wl_init(&i915->display);
+ intel_dmc_wl_init(display);
}
/* part #1: call before irq install */
-int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
+int intel_display_driver_probe_noirq(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (i915_inject_probe_failure(i915))
return -ENODEV;
- if (HAS_DISPLAY(i915)) {
- ret = drm_vblank_init(&i915->drm,
- INTEL_NUM_PIPES(i915));
+ if (HAS_DISPLAY(display)) {
+ ret = drm_vblank_init(display->drm,
+ INTEL_NUM_PIPES(display));
if (ret)
return ret;
}
@@ -226,24 +227,25 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
goto cleanup_bios;
/* FIXME: completely on the wrong abstraction layer */
- ret = intel_power_domains_init(i915);
+ ret = intel_power_domains_init(display);
if (ret < 0)
goto cleanup_vga;
- intel_pmdemand_init_early(i915);
+ intel_pmdemand_init_early(display);
- intel_power_domains_init_hw(i915, false);
+ intel_power_domains_init_hw(display, false);
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
intel_dmc_init(display);
- i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
- i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
- intel_mode_config_init(i915);
+ intel_mode_config_init(display);
ret = intel_cdclk_init(display);
if (ret)
@@ -261,7 +263,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- ret = intel_pmdemand_init(i915);
+ ret = intel_pmdemand_init(display);
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
@@ -273,7 +275,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
cleanup_vga_client_pw_domain_dmc:
intel_dmc_fini(display);
- intel_power_domains_driver_remove(i915);
+ intel_power_domains_driver_remove(display);
cleanup_vga:
intel_vga_unregister(display);
cleanup_bios:
@@ -282,7 +284,7 @@ cleanup_bios:
return ret;
}
-static void set_display_access(struct drm_i915_private *i915,
+static void set_display_access(struct intel_display *display,
bool any_task_allowed,
struct task_struct *allowed_task)
{
@@ -290,20 +292,20 @@ static void set_display_access(struct drm_i915_private *i915,
int err;
intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
- err = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
+ err = drm_modeset_lock_all_ctx(display->drm, &ctx);
if (err)
continue;
- i915->display.access.any_task_allowed = any_task_allowed;
- i915->display.access.allowed_task = allowed_task;
+ display->access.any_task_allowed = any_task_allowed;
+ display->access.allowed_task = allowed_task;
}
- drm_WARN_ON(&i915->drm, err);
+ drm_WARN_ON(display->drm, err);
}
/**
* intel_display_driver_enable_user_access - Enable display HW access for all threads
- * @i915: i915 device instance
+ * @display: display device instance
*
* Enable the display HW access for all threads. Examples for such accesses
* are modeset commits and connector probing.
@@ -311,16 +313,18 @@ static void set_display_access(struct drm_i915_private *i915,
* This function should be called during driver loading and system resume once
* all the HW initialization steps are done.
*/
-void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
+void intel_display_driver_enable_user_access(struct intel_display *display)
{
- set_display_access(i915, true, NULL);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ set_display_access(display, true, NULL);
intel_hpd_enable_detection_work(i915);
}
/**
* intel_display_driver_disable_user_access - Disable display HW access for user threads
- * @i915: i915 device instance
+ * @display: display device instance
*
* Disable the display HW access for user threads. Examples for such accesses
* are modeset commits and connector probing. For the current thread the
@@ -335,16 +339,18 @@ void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
* This function should be called during driver loading/unloading and system
* suspend/shutdown before starting the HW init/deinit programming.
*/
-void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
+void intel_display_driver_disable_user_access(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
intel_hpd_disable_detection_work(i915);
- set_display_access(i915, false, current);
+ set_display_access(display, false, current);
}
/**
* intel_display_driver_suspend_access - Suspend display HW access for all threads
- * @i915: i915 device instance
+ * @display: display device instance
*
* Disable the display HW access for all threads. Examples for such accesses
* are modeset commits and connector probing. This call should be either
@@ -354,14 +360,14 @@ void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
* This function should be called during driver unloading and system
* suspend/shutdown after completing the HW deinit programming.
*/
-void intel_display_driver_suspend_access(struct drm_i915_private *i915)
+void intel_display_driver_suspend_access(struct intel_display *display)
{
- set_display_access(i915, false, NULL);
+ set_display_access(display, false, NULL);
}
/**
* intel_display_driver_resume_access - Resume display HW access for the resume thread
- * @i915: i915 device instance
+ * @display: display device instance
*
* Enable the display HW access for the current resume thread, keeping the
* access disabled for all other (user) threads. Examples for such accesses
@@ -373,14 +379,14 @@ void intel_display_driver_suspend_access(struct drm_i915_private *i915)
* This function should be called during system resume before starting the HW
* init steps.
*/
-void intel_display_driver_resume_access(struct drm_i915_private *i915)
+void intel_display_driver_resume_access(struct intel_display *display)
{
- set_display_access(i915, false, current);
+ set_display_access(display, false, current);
}
/**
* intel_display_driver_check_access - Check if the current thread has disaplay HW access
- * @i915: i915 device instance
+ * @display: display device instance
*
* Check whether the current thread has display HW access, print a debug
* message if it doesn't. Such accesses are modeset commits and connector
@@ -389,26 +395,26 @@ void intel_display_driver_resume_access(struct drm_i915_private *i915)
* Returns %true if the current thread has display HW access, %false
* otherwise.
*/
-bool intel_display_driver_check_access(struct drm_i915_private *i915)
+bool intel_display_driver_check_access(struct intel_display *display)
{
char comm[TASK_COMM_LEN];
char current_task[TASK_COMM_LEN + 16];
char allowed_task[TASK_COMM_LEN + 16] = "none";
- if (i915->display.access.any_task_allowed ||
- i915->display.access.allowed_task == current)
+ if (display->access.any_task_allowed ||
+ display->access.allowed_task == current)
return true;
snprintf(current_task, sizeof(current_task), "%s[%d]",
get_task_comm(comm, current),
task_pid_vnr(current));
- if (i915->display.access.allowed_task)
+ if (display->access.allowed_task)
snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
- get_task_comm(comm, i915->display.access.allowed_task),
- task_pid_vnr(i915->display.access.allowed_task));
+ get_task_comm(comm, display->access.allowed_task),
+ task_pid_vnr(display->access.allowed_task));
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Reject display access from task %s (allowed to %s)\n",
current_task, allowed_task);
@@ -416,14 +422,13 @@ bool intel_display_driver_check_access(struct drm_i915_private *i915)
}
/* part #2: call after irq install, but before gem init */
-int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
+int intel_display_driver_probe_nogem(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct drm_device *dev = display->drm;
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
intel_wm_init(i915);
@@ -434,22 +439,22 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_gmbus_setup(display);
- drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
- INTEL_NUM_PIPES(i915),
- INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+ drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(display),
+ INTEL_NUM_PIPES(display) > 1 ? "s" : "");
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
ret = intel_crtc_init(i915, pipe);
if (ret)
goto err_mode_config;
}
- intel_plane_possible_crtcs_init(i915);
+ intel_plane_possible_crtcs_init(display);
intel_shared_dpll_init(i915);
intel_fdi_pll_freq_update(i915);
intel_update_czclk(i915);
- intel_display_driver_init_hw(i915);
+ intel_display_driver_init_hw(display);
intel_dpll_update_ref_clks(i915);
if (display->cdclk.max_cdclk_freq == 0)
@@ -465,21 +470,21 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
if (ret)
goto err_hdcp;
- intel_display_driver_disable_user_access(i915);
+ intel_display_driver_disable_user_access(display);
- drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
+ drm_modeset_lock_all(display->drm);
+ intel_modeset_setup_hw_state(i915, display->drm->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(display);
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(display->drm);
- intel_initial_plane_config(i915);
+ intel_initial_plane_config(display);
/*
* Make sure hardware watermarks really match the state we read out.
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
- if (!HAS_GMCH(i915))
+ if (!HAS_GMCH(display))
ilk_wm_sanitize(i915);
return 0;
@@ -487,18 +492,18 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
err_hdcp:
intel_hdcp_component_fini(display);
err_mode_config:
- intel_mode_config_cleanup(i915);
+ intel_mode_config_cleanup(display);
return ret;
}
/* part #3: call after gem init */
-int intel_display_driver_probe(struct drm_i915_private *i915)
+int intel_display_driver_probe(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
/*
@@ -514,11 +519,11 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
* are already calculated and there is no assert_plane warnings
* during bootup.
*/
- ret = intel_initial_commit(&i915->drm);
+ ret = intel_initial_commit(display->drm);
if (ret)
- drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
+ drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
- intel_overlay_setup(i915);
+ intel_overlay_setup(display);
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
@@ -528,13 +533,13 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
return 0;
}
-void intel_display_driver_register(struct drm_i915_private *i915)
+void intel_display_driver_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS,
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
"i915 display info:");
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
/* Must be done after probing outputs */
@@ -543,7 +548,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_audio_init(i915);
- intel_display_driver_enable_user_access(i915);
+ intel_display_driver_enable_user_access(display);
intel_audio_register(i915);
@@ -554,41 +559,42 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* fbdev configuration, for which we use the
* fbdev->async_cookie.
*/
- drm_kms_helper_poll_init(&i915->drm);
+ drm_kms_helper_poll_init(display->drm);
intel_hpd_poll_disable(i915);
intel_fbdev_setup(i915);
- intel_display_device_info_print(DISPLAY_INFO(i915),
- DISPLAY_RUNTIME_INFO(i915), &p);
+ intel_display_device_info_print(DISPLAY_INFO(display),
+ DISPLAY_RUNTIME_INFO(display), &p);
}
/* part #1: call before irq uninstall */
-void intel_display_driver_remove(struct drm_i915_private *i915)
+void intel_display_driver_remove(struct intel_display *display)
{
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- flush_workqueue(i915->display.wq.flip);
- flush_workqueue(i915->display.wq.modeset);
+ flush_workqueue(display->wq.flip);
+ flush_workqueue(display->wq.modeset);
+ flush_workqueue(display->wq.cleanup);
/*
* MST topology needs to be suspended so we don't have any calls to
* fbdev after it's finalized. MST will be destroyed later as part of
* drm_mode_config_cleanup()
*/
- intel_dp_mst_suspend(i915);
+ intel_dp_mst_suspend(display);
}
/* part #2: call after irq uninstall */
-void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
+void intel_display_driver_remove_noirq(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- intel_display_driver_suspend_access(i915);
+ intel_display_driver_suspend_access(display);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -603,55 +609,54 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_hdcp_component_fini(display);
- intel_mode_config_cleanup(i915);
+ intel_mode_config_cleanup(display);
intel_dp_tunnel_mgr_cleanup(display);
- intel_overlay_cleanup(i915);
+ intel_overlay_cleanup(display);
intel_gmbus_teardown(display);
- destroy_workqueue(i915->display.wq.flip);
- destroy_workqueue(i915->display.wq.modeset);
+ destroy_workqueue(display->wq.flip);
+ destroy_workqueue(display->wq.modeset);
+ destroy_workqueue(display->wq.cleanup);
- intel_fbc_cleanup(&i915->display);
+ intel_fbc_cleanup(display);
}
/* part #3: call after gem init */
-void intel_display_driver_remove_nogem(struct drm_i915_private *i915)
+void intel_display_driver_remove_nogem(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
intel_dmc_fini(display);
- intel_power_domains_driver_remove(i915);
+ intel_power_domains_driver_remove(display);
intel_vga_unregister(display);
intel_bios_driver_remove(display);
}
-void intel_display_driver_unregister(struct drm_i915_private *i915)
+void intel_display_driver_unregister(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- drm_client_dev_unregister(&i915->drm);
+ drm_client_dev_unregister(display->drm);
/*
* After flushing the fbdev (incl. a late async config which
* will have delayed queuing of a hotplug event), then flush
* the hotplug events.
*/
- drm_kms_helper_poll_fini(&i915->drm);
+ drm_kms_helper_poll_fini(display->drm);
- intel_display_driver_disable_user_access(i915);
+ intel_display_driver_disable_user_access(display);
intel_audio_deinit(i915);
- drm_atomic_helper_shutdown(&i915->drm);
+ drm_atomic_helper_shutdown(display->drm);
acpi_video_unregister();
intel_opregion_unregister(display);
@@ -661,30 +666,36 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
* turn all crtc's off, but do not adjust state
* This has to be paired with a call to intel_modeset_setup_hw_state.
*/
-int intel_display_driver_suspend(struct drm_i915_private *i915)
+int intel_display_driver_suspend(struct intel_display *display)
{
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
- state = drm_atomic_helper_suspend(&i915->drm);
+ state = drm_atomic_helper_suspend(display->drm);
ret = PTR_ERR_OR_ZERO(state);
if (ret)
- drm_err(&i915->drm, "Suspending crtc's failed with %i\n",
+ drm_err(display->drm, "Suspending crtc's failed with %i\n",
ret);
else
- i915->display.restore.modeset_state = state;
+ display->restore.modeset_state = state;
+
+ /* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
+ flush_workqueue(display->wq.cleanup);
+
+ intel_dp_mst_suspend(display);
+
return ret;
}
int
-__intel_display_driver_resume(struct drm_i915_private *i915,
+__intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
@@ -710,33 +721,37 @@ __intel_display_driver_resume(struct drm_i915_private *i915,
}
/* ignore any reset values/BIOS leftovers in the WM registers */
- if (!HAS_GMCH(i915))
+ if (!HAS_GMCH(display))
to_intel_atomic_state(state)->skip_intermediate_wm = true;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- drm_WARN_ON(&i915->drm, ret == -EDEADLK);
+ drm_WARN_ON(display->drm, ret == -EDEADLK);
return ret;
}
-void intel_display_driver_resume(struct drm_i915_private *i915)
+void intel_display_driver_resume(struct intel_display *display)
{
- struct drm_atomic_state *state = i915->display.restore.modeset_state;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_atomic_state *state = display->restore.modeset_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- i915->display.restore.modeset_state = NULL;
+ /* MST sideband requires HPD interrupts enabled */
+ intel_dp_mst_resume(display);
+
+ display->restore.modeset_state = NULL;
if (state)
state->acquire_ctx = &ctx;
drm_modeset_acquire_init(&ctx, 0);
while (1) {
- ret = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
+ ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
if (ret != -EDEADLK)
break;
@@ -744,14 +759,14 @@ void intel_display_driver_resume(struct drm_i915_private *i915)
}
if (!ret)
- ret = __intel_display_driver_resume(i915, state, &ctx);
+ ret = __intel_display_driver_resume(display, state, &ctx);
skl_watermark_ipc_update(i915);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
if (ret)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.h b/drivers/gpu/drm/i915/display/intel_display_driver.h
index 42cc4af6d3fd..2966ff91b219 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.h
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.h
@@ -9,34 +9,34 @@
#include <linux/types.h>
struct drm_atomic_state;
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
+struct intel_display;
struct pci_dev;
bool intel_display_driver_probe_defer(struct pci_dev *pdev);
-void intel_display_driver_init_hw(struct drm_i915_private *i915);
-void intel_display_driver_early_probe(struct drm_i915_private *i915);
-int intel_display_driver_probe_noirq(struct drm_i915_private *i915);
-int intel_display_driver_probe_nogem(struct drm_i915_private *i915);
-int intel_display_driver_probe(struct drm_i915_private *i915);
-void intel_display_driver_register(struct drm_i915_private *i915);
-void intel_display_driver_remove(struct drm_i915_private *i915);
-void intel_display_driver_remove_noirq(struct drm_i915_private *i915);
-void intel_display_driver_remove_nogem(struct drm_i915_private *i915);
-void intel_display_driver_unregister(struct drm_i915_private *i915);
-int intel_display_driver_suspend(struct drm_i915_private *i915);
-void intel_display_driver_resume(struct drm_i915_private *i915);
+void intel_display_driver_init_hw(struct intel_display *display);
+void intel_display_driver_early_probe(struct intel_display *display);
+int intel_display_driver_probe_noirq(struct intel_display *display);
+int intel_display_driver_probe_nogem(struct intel_display *display);
+int intel_display_driver_probe(struct intel_display *display);
+void intel_display_driver_register(struct intel_display *display);
+void intel_display_driver_remove(struct intel_display *display);
+void intel_display_driver_remove_noirq(struct intel_display *display);
+void intel_display_driver_remove_nogem(struct intel_display *display);
+void intel_display_driver_unregister(struct intel_display *display);
+int intel_display_driver_suspend(struct intel_display *display);
+void intel_display_driver_resume(struct intel_display *display);
/* interface for intel_display_reset.c */
-int __intel_display_driver_resume(struct drm_i915_private *i915,
+int __intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
-void intel_display_driver_enable_user_access(struct drm_i915_private *i915);
-void intel_display_driver_disable_user_access(struct drm_i915_private *i915);
-void intel_display_driver_suspend_access(struct drm_i915_private *i915);
-void intel_display_driver_resume_access(struct drm_i915_private *i915);
-bool intel_display_driver_check_access(struct drm_i915_private *i915);
+void intel_display_driver_enable_user_access(struct intel_display *display);
+void intel_display_driver_disable_user_access(struct intel_display *display);
+void intel_display_driver_suspend_access(struct intel_display *display);
+void intel_display_driver_resume_access(struct intel_display *display);
+bool intel_display_driver_check_access(struct intel_display *display);
#endif /* __INTEL_DISPLAY_DRIVER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index e1547ebce60e..069043f9d894 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -434,7 +434,8 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
spin_lock(&dev_priv->irq_lock);
- if (!dev_priv->display.irq.display_irqs_enabled) {
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ !dev_priv->display.irq.vlv_display_irqs_enabled) {
spin_unlock(&dev_priv->irq_lock);
return;
}
@@ -843,7 +844,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (DISPLAY_VER(dev_priv) >= 14)
+ struct intel_display *display = &dev_priv->display;
+
+ if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
GEN12_PIPEDMC_FAULT |
@@ -853,7 +856,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE5_FAULT |
@@ -861,7 +864,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- else if (DISPLAY_VER(dev_priv) == 12)
+ else if (DISPLAY_VER(display) == 12)
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE7_FAULT |
@@ -871,7 +874,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- else if (DISPLAY_VER(dev_priv) == 11)
+ else if (DISPLAY_VER(display) == 11)
return GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE7_FAULT |
GEN11_PIPE_PLANE6_FAULT |
@@ -880,7 +883,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- else if (DISPLAY_VER(dev_priv) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
return GEN9_PIPE_CURSOR_FAULT |
GEN9_PIPE_PLANE4_FAULT |
GEN9_PIPE_PLANE3_FAULT |
@@ -1420,7 +1423,6 @@ static void intel_display_vblank_dc_work(struct work_struct *work)
{
struct intel_display *display =
container_of(work, typeof(*display), irq.vblank_dc_work);
- struct drm_i915_private *i915 = to_i915(display->drm);
int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
/*
@@ -1429,7 +1431,7 @@ static void intel_display_vblank_dc_work(struct work_struct *work)
* PSR code. If DC3CO is taken into use we need take that into account
* here as well.
*/
- intel_display_power_set_target_dc_state(i915, vblank_wa_num_pipes ? DC_STATE_DISABLE :
+ intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
DC_STATE_EN_UPTO_DC6);
}
@@ -1479,7 +1481,7 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
schedule_work(&display->irq.vblank_dc_work);
}
-void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -1497,6 +1499,12 @@ void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~0u;
}
+void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->display.irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_reset(dev_priv);
+}
+
void i9xx_display_irq_reset(struct drm_i915_private *i915)
{
if (I915_HAS_HOTPLUG(i915)) {
@@ -1516,6 +1524,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
u32 enable_mask;
enum pipe pipe;
+ if (!dev_priv->display.irq.vlv_display_irqs_enabled)
+ return;
+
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
@@ -1688,13 +1699,13 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
+ if (dev_priv->display.irq.vlv_display_irqs_enabled)
return;
- dev_priv->display.irq.display_irqs_enabled = true;
+ dev_priv->display.irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
- vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(dev_priv);
vlv_display_irq_postinstall(dev_priv);
}
}
@@ -1703,13 +1714,13 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (!dev_priv->display.irq.display_irqs_enabled)
+ if (!dev_priv->display.irq.vlv_display_irqs_enabled)
return;
- dev_priv->display.irq.display_irqs_enabled = false;
+ dev_priv->display.irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(dev_priv);
}
void ilk_de_irq_postinstall(struct drm_i915_private *i915)
@@ -1902,17 +1913,6 @@ void intel_display_irq_init(struct drm_i915_private *i915)
{
i915->drm.vblank_disable_immediate = true;
- /*
- * Most platforms treat the display irq block as an always-on power
- * domain. vlv/chv can disable it at runtime and need special care to
- * avoid writing any of the display block registers outside of the power
- * domain. We defer setting up the display irqs in this case to the
- * runtime pm.
- */
- i915->display.irq.display_irqs_enabled = true;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.irq.display_irqs_enabled = false;
-
intel_hotplug_irq_init(i915);
INIT_WORK(&i915->display.irq.vblank_dc_work,
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index 024de8abcb1a..f92e4640a613 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -3,8 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string_choices.h>
+
+#include <drm/drm_print.h>
+
#include "intel_display_params.h"
-#include "i915_drv.h"
#define intel_display_param_named(name, T, perm, desc) \
module_param_named(name, intel_display_modparams.name, T, perm); \
@@ -123,10 +128,10 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
"(0=disabled, 1=enabled) "
"Default: 1");
-intel_display_param_named_unsafe(enable_dmc_wl, bool, 0400,
+intel_display_param_named_unsafe(enable_dmc_wl, int, 0400,
"Enable DMC wakelock "
- "(0=disabled, 1=enabled) "
- "Default: 0");
+ "(-1=use per-chip default, 0=disabled, 1=enabled) "
+ "Default: -1");
__maybe_unused
static void _param_print_bool(struct drm_printer *p, const char *driver_name,
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index dcb6face936a..5317138e6044 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -47,7 +47,7 @@ struct drm_printer;
param(int, enable_psr, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
- param(bool, enable_dmc_wl, false, 0400) \
+ param(int, enable_dmc_wl, -1, 0400) \
#define MEMBER(T, member, ...) T member;
struct intel_display_params {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 2766fd9208b0..d3b8453a1705 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -28,12 +28,12 @@
#include "skl_watermark_regs.h"
#include "vlv_sideband.h"
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
- for_each_power_well(__dev_priv, __power_well) \
+#define for_each_power_domain_well(__display, __power_well, __domain) \
+ for_each_power_well((__display), __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
-#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
- for_each_power_well_reverse(__dev_priv, __power_well) \
+#define for_each_power_domain_well_reverse(__display, __power_well, __domain) \
+ for_each_power_well_reverse((__display), __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
static const char *
@@ -198,18 +198,18 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
}
}
-static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+static bool __intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
struct i915_power_well *power_well;
bool is_enabled;
- if (pm_runtime_suspended(dev_priv->drm.dev))
+ if (pm_runtime_suspended(display->drm->dev))
return false;
is_enabled = true;
- for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
+ for_each_power_domain_well_reverse(display, power_well, domain) {
if (intel_power_well_is_always_on(power_well))
continue;
@@ -242,23 +242,22 @@ static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
bool ret;
- power_domains = &dev_priv->display.power.domains;
-
mutex_lock(&power_domains->lock);
- ret = __intel_display_power_is_enabled(dev_priv, domain);
+ ret = __intel_display_power_is_enabled(display, domain);
mutex_unlock(&power_domains->lock);
return ret;
}
static u32
-sanitize_target_dc_state(struct drm_i915_private *i915,
+sanitize_target_dc_state(struct intel_display *display,
u32 target_dc_state)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
@@ -282,43 +281,43 @@ sanitize_target_dc_state(struct drm_i915_private *i915,
/**
* intel_display_power_set_target_dc_state - Set target dc state.
- * @dev_priv: i915 device
+ * @display: display device
* @state: state which needs to be set as target_dc_state.
*
* This function set the "DC off" power well target_dc_state,
* based upon this target_dc_stste, "DC off" power well will
* enable desired DC state.
*/
-void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state)
{
struct i915_power_well *power_well;
bool dc_off_enabled;
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
mutex_lock(&power_domains->lock);
- power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
+ power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
- if (drm_WARN_ON(&dev_priv->drm, !power_well))
+ if (drm_WARN_ON(display->drm, !power_well))
goto unlock;
- state = sanitize_target_dc_state(dev_priv, state);
+ state = sanitize_target_dc_state(display, state);
if (state == power_domains->target_dc_state)
goto unlock;
- dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
+ dc_off_enabled = intel_power_well_is_enabled(display, power_well);
/*
* If DC off power well is disabled, need to enable and disable the
* DC off power well to effect target DC state.
*/
if (!dc_off_enabled)
- intel_power_well_enable(dev_priv, power_well);
+ intel_power_well_enable(display, power_well);
power_domains->target_dc_state = state;
if (!dc_off_enabled)
- intel_power_well_disable(dev_priv, power_well);
+ intel_power_well_disable(display, power_well);
unlock:
mutex_unlock(&power_domains->lock);
@@ -338,11 +337,11 @@ static void __async_put_domains_mask(struct i915_power_domains *power_domains,
static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
- return !drm_WARN_ON(&i915->drm,
+ return !drm_WARN_ON(display->drm,
bitmap_intersects(power_domains->async_put_domains[0].bits,
power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM));
@@ -351,21 +350,21 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
static bool
__async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
struct intel_power_domain_mask async_put_mask;
enum intel_display_power_domain domain;
bool err = false;
err |= !assert_async_put_domain_masks_disjoint(power_domains);
__async_put_domains_mask(power_domains, &async_put_mask);
- err |= drm_WARN_ON(&i915->drm,
+ err |= drm_WARN_ON(display->drm,
!!power_domains->async_put_wakeref !=
!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, &async_put_mask)
- err |= drm_WARN_ON(&i915->drm,
+ err |= drm_WARN_ON(display->drm,
power_domains->domain_use_count[domain] != 1);
return !err;
@@ -374,27 +373,27 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, struct intel_power_domain_mask *mask)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
enum intel_display_power_domain domain;
- drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
+ drm_dbg_kms(display->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask)
- drm_dbg(&i915->drm, "%s use_count %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg_kms(display->drm, "%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
static void
print_async_put_domains_state(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
- drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
- str_yes_no(power_domains->async_put_wakeref));
+ drm_dbg_kms(display->drm, "async_put_wakeref: %s\n",
+ str_yes_no(power_domains->async_put_wakeref));
print_power_domains(power_domains, "async_put_domains[0]",
&power_domains->async_put_domains[0]);
@@ -454,10 +453,11 @@ cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
}
static bool
-intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
+intel_display_power_grab_async_put_ref(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -483,17 +483,17 @@ out_verify:
}
static void
-__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+__intel_display_power_get_domain(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
- if (intel_display_power_grab_async_put_ref(dev_priv, domain))
+ if (intel_display_power_grab_async_put_ref(display, domain))
return;
- for_each_power_domain_well(dev_priv, power_well, domain)
- intel_power_well_get(dev_priv, power_well);
+ for_each_power_domain_well(display, power_well, domain)
+ intel_power_well_get(display, power_well);
power_domains->domain_use_count[domain]++;
}
@@ -513,11 +513,12 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&power_domains->lock);
- __intel_display_power_get_domain(dev_priv, domain);
+ __intel_display_power_get_domain(display, domain);
mutex_unlock(&power_domains->lock);
return wakeref;
@@ -539,7 +540,8 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref;
bool is_enabled;
@@ -549,8 +551,8 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
mutex_lock(&power_domains->lock);
- if (__intel_display_power_is_enabled(dev_priv, domain)) {
- __intel_display_power_get_domain(dev_priv, domain);
+ if (__intel_display_power_is_enabled(display, domain)) {
+ __intel_display_power_get_domain(display, domain);
is_enabled = true;
} else {
is_enabled = false;
@@ -567,38 +569,36 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
}
static void
-__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
+__intel_display_power_put_domain(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
const char *name = intel_display_power_domain_str(domain);
struct intel_power_domain_mask async_put_mask;
- power_domains = &dev_priv->display.power.domains;
-
- drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
+ drm_WARN(display->drm, !power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
name);
async_put_domains_mask(power_domains, &async_put_mask);
- drm_WARN(&dev_priv->drm,
+ drm_WARN(display->drm,
test_bit(domain, async_put_mask.bits),
"Async disabling of domain %s is pending\n",
name);
power_domains->domain_use_count[domain]--;
- for_each_power_domain_well_reverse(dev_priv, power_well, domain)
- intel_power_well_put(dev_priv, power_well);
+ for_each_power_domain_well_reverse(display, power_well, domain)
+ intel_power_well_put(display, power_well);
}
-static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+static void __intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
mutex_lock(&power_domains->lock);
- __intel_display_power_put_domain(dev_priv, domain);
+ __intel_display_power_put_domain(display, domain);
mutex_unlock(&power_domains->lock);
}
@@ -607,23 +607,24 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
- drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
+ drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
- drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
- &power_domains->async_put_work,
- msecs_to_jiffies(delay_ms)));
+ drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
+ &power_domains->async_put_work,
+ msecs_to_jiffies(delay_ms)));
}
static void
release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
- struct drm_i915_private *dev_priv =
- container_of(power_domains, struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
@@ -633,7 +634,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
async_put_domains_clear_domain(power_domains, domain);
- __intel_display_power_put_domain(dev_priv, domain);
+ __intel_display_power_put_domain(display, domain);
}
intel_runtime_pm_put(rpm, wakeref);
@@ -642,10 +643,10 @@ release_async_put_domains(struct i915_power_domains *power_domains,
static void
intel_display_power_put_async_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.power.domains.async_put_work.work);
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = container_of(work, struct intel_display,
+ power.domains.async_put_work.work);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
intel_wakeref_t old_work_wakeref = NULL;
@@ -711,7 +712,8 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_display *display = &i915->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
@@ -720,12 +722,12 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
mutex_lock(&power_domains->lock);
if (power_domains->domain_use_count[domain] > 1) {
- __intel_display_power_put_domain(i915, domain);
+ __intel_display_power_put_domain(display, domain);
goto out_verify;
}
- drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
+ drm_WARN_ON(display->drm, power_domains->domain_use_count[domain] != 1);
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
@@ -764,7 +766,8 @@ out_verify:
*/
void intel_display_power_flush_work(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_display *display = &i915->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -789,22 +792,23 @@ out_verify:
/**
* intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
- * @i915: i915 device instance
+ * @display: display device instance
*
* Like intel_display_power_flush_work(), but also ensure that the work
* handler function is not running any more when this function returns.
*/
static void
-intel_display_power_flush_work_sync(struct drm_i915_private *i915)
+intel_display_power_flush_work_sync(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_display_power_flush_work(i915);
cancel_async_put_work(power_domains, true);
verify_async_put_domains_state(power_domains);
- drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
+ drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -822,7 +826,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put(dev_priv, domain);
+ struct intel_display *display = &dev_priv->display;
+
+ __intel_display_power_put(display, domain);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
#else
@@ -842,7 +848,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- __intel_display_power_put(dev_priv, domain);
+ struct intel_display *display = &dev_priv->display;
+
+ __intel_display_power_put(display, domain);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
#endif
@@ -852,9 +860,10 @@ intel_display_power_get_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t __maybe_unused wf;
- drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
+ drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get(i915, domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -868,9 +877,10 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t wf;
- drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
+ drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get_if_enabled(i915, domain);
if (!wf)
@@ -889,9 +899,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
struct intel_power_domain_mask *mask)
{
+ struct intel_display *display = &i915->display;
enum intel_display_power_domain domain;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask) {
@@ -906,8 +917,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
}
static int
-sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
- int disable_power_well)
+sanitize_disable_power_well_option(int disable_power_well)
{
if (disable_power_well >= 0)
return !!disable_power_well;
@@ -915,27 +925,26 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
return 1;
}
-static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
- int enable_dc)
+static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc)
{
u32 mask;
int requested_dc;
int max_dc;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return 0;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
max_dc = 2;
- else if (IS_DG2(dev_priv))
+ else if (display->platform.dg2)
max_dc = 1;
- else if (IS_DG1(dev_priv))
+ else if (display->platform.dg1)
max_dc = 3;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
max_dc = 4;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
max_dc = 1;
- else if (DISPLAY_VER(dev_priv) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
max_dc = 2;
else
max_dc = 0;
@@ -945,11 +954,10 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
- mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
- DISPLAY_VER(dev_priv) >= 11 ?
- DC_STATE_EN_DC9 : 0;
+ mask = display->platform.geminilake || display->platform.broxton ||
+ DISPLAY_VER(display) >= 11 ? DC_STATE_EN_DC9 : 0;
- if (!dev_priv->display.params.disable_power_well)
+ if (!display->params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -957,12 +965,12 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
} else if (enable_dc == -1) {
requested_dc = max_dc;
} else if (enable_dc > max_dc && enable_dc <= 4) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Adjusting requested max DC state (%d->%d)\n",
enable_dc, max_dc);
requested_dc = max_dc;
} else {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unexpected value for enable_dc (%d)\n", enable_dc);
requested_dc = max_dc;
}
@@ -982,30 +990,29 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
break;
}
- drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
+ drm_dbg_kms(display->drm, "Allowed DC state mask %02x\n", mask);
return mask;
}
/**
* intel_power_domains_init - initializes the power domain structures
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* Initializes the power domain structures for @dev_priv depending upon the
* supported platform.
*/
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
+int intel_power_domains_init(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
- dev_priv->display.params.disable_power_well =
- sanitize_disable_power_well_option(dev_priv,
- dev_priv->display.params.disable_power_well);
+ display->params.disable_power_well =
+ sanitize_disable_power_well_option(display->params.disable_power_well);
power_domains->allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc);
+ get_allowed_dc_mask(display, display->params.enable_dc);
power_domains->target_dc_state =
- sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ sanitize_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1017,39 +1024,39 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
/**
* intel_power_domains_cleanup - clean up power domains resources
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* Release any resources acquired by intel_power_domains_init()
*/
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
+void intel_power_domains_cleanup(struct intel_display *display)
{
- intel_display_power_map_cleanup(&dev_priv->display.power.domains);
+ intel_display_power_map_cleanup(&display->power.domains);
}
-static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
+static void intel_power_domains_sync_hw(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
- for_each_power_well(dev_priv, power_well)
- intel_power_well_sync_hw(dev_priv, power_well);
+ for_each_power_well(display, power_well)
+ intel_power_well_sync_hw(display, power_well);
mutex_unlock(&power_domains->lock);
}
-static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
+static void gen9_dbuf_slice_set(struct intel_display *display,
enum dbuf_slice slice, bool enable)
{
i915_reg_t reg = DBUF_CTL_S(slice);
bool state;
- intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
+ intel_de_rmw(display, reg, DBUF_POWER_REQUEST,
enable ? DBUF_POWER_REQUEST : 0);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(10);
- state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
- drm_WARN(&dev_priv->drm, enable != state,
+ state = intel_de_read(display, reg) & DBUF_POWER_STATE;
+ drm_WARN(display->drm, enable != state,
"DBuf slice %d power %s timeout!\n",
slice, str_enable_disable(enable));
}
@@ -1057,15 +1064,16 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
- u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
+ u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask;
enum dbuf_slice slice;
- drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
+ drm_WARN(display->drm, req_slices & ~slice_mask,
"Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
req_slices, slice_mask);
- drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
+ drm_dbg_kms(display->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
/*
@@ -1077,25 +1085,25 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
*/
mutex_lock(&power_domains->lock);
- for_each_dbuf_slice(dev_priv, slice)
- gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
+ for_each_dbuf_slice(display, slice)
+ gen9_dbuf_slice_set(display, slice, req_slices & BIT(slice));
- dev_priv->display.dbuf.enabled_slices = req_slices;
+ display->dbuf.enabled_slices = req_slices;
mutex_unlock(&power_domains->lock);
}
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_enable(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u8 slices_mask;
- dev_priv->display.dbuf.enabled_slices =
- intel_enabled_dbuf_slices_mask(dev_priv);
+ display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices;
+ slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices;
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_pmdemand_program_dbuf(dev_priv, slices_mask);
+ if (DISPLAY_VER(display) >= 14)
+ intel_pmdemand_program_dbuf(display, slices_mask);
/*
* Just power up at least 1 slice, we will
@@ -1104,33 +1112,35 @@ static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
gen9_dbuf_slices_update(dev_priv, slices_mask);
}
-static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_disable(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
gen9_dbuf_slices_update(dev_priv, 0);
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_pmdemand_program_dbuf(dev_priv, 0);
+ if (DISPLAY_VER(display) >= 14)
+ intel_pmdemand_program_dbuf(display, 0);
}
-static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
+static void gen12_dbuf_slices_config(struct intel_display *display)
{
enum dbuf_slice slice;
- if (IS_ALDERLAKE_P(dev_priv))
+ if (display->platform.alderlake_p)
return;
- for_each_dbuf_slice(dev_priv, slice)
- intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
+ for_each_dbuf_slice(display, slice)
+ intel_de_rmw(display, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
DBUF_TRACKER_STATE_SERVICE(8));
}
-static void icl_mbus_init(struct drm_i915_private *dev_priv)
+static void icl_mbus_init(struct intel_display *display)
{
- unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask;
+ unsigned long abox_regs = DISPLAY_INFO(display)->abox_mask;
u32 mask, val, i;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
@@ -1147,16 +1157,16 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
* expect us to program the abox_ctl0 register as well, even though
* we don't have to program other instance-0 registers like BW_BUDDY.
*/
- if (DISPLAY_VER(dev_priv) == 12)
+ if (DISPLAY_VER(display) == 12)
abox_regs |= BIT(0);
for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
- intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
+ intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
}
-static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
+static void hsw_assert_cdclk(struct intel_display *display)
{
- u32 val = intel_de_read(dev_priv, LCPLL_CTL);
+ u32 val = intel_de_read(display, LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
@@ -1165,18 +1175,18 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
*/
if (val & LCPLL_CD_SOURCE_FCLK)
- drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
+ drm_err(display->drm, "CDCLK source is not LCPLL\n");
if (val & LCPLL_PLL_DISABLE)
- drm_err(&dev_priv->drm, "LCPLL is disabled\n");
+ drm_err(display->drm, "LCPLL is disabled\n");
if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
- drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
+ drm_err(display->drm, "LCPLL not using non-SSC reference\n");
}
-static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+static void assert_can_disable_lcpll(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
for_each_intel_crtc(display->drm, crtc)
@@ -1201,7 +1211,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
INTEL_DISPLAY_STATE_WARN(display,
intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
INTEL_DISPLAY_STATE_WARN(display,
intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
@@ -1225,23 +1235,24 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
"IRQs enabled\n");
}
-static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
+static u32 hsw_read_dcomp(struct intel_display *display)
{
- if (IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv, D_COMP_HSW);
+ if (display->platform.haswell)
+ return intel_de_read(display, D_COMP_HSW);
else
- return intel_de_read(dev_priv, D_COMP_BDW);
+ return intel_de_read(display, D_COMP_BDW);
}
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
+static void hsw_write_dcomp(struct intel_display *display, u32 val)
{
- if (IS_HASWELL(dev_priv)) {
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (display->platform.haswell) {
if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
- drm_dbg_kms(&dev_priv->drm,
- "Failed to write to D_COMP\n");
+ drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
- intel_de_write(dev_priv, D_COMP_BDW, val);
- intel_de_posting_read(dev_priv, D_COMP_BDW);
+ intel_de_write(display, D_COMP_BDW, val);
+ intel_de_posting_read(display, D_COMP_BDW);
}
}
@@ -1253,45 +1264,45 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
* register. Callers should take care of disabling all the display engine
* functions, doing the mode unset, fixing interrupts, etc.
*/
-static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+static void hsw_disable_lcpll(struct intel_display *display,
bool switch_to_fclk, bool allow_power_down)
{
u32 val;
- assert_can_disable_lcpll(dev_priv);
+ assert_can_disable_lcpll(display);
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_write(display, LCPLL_CTL, val);
- if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
- drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
+ drm_err(display->drm, "Switching to FCLK failed\n");
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
}
val |= LCPLL_PLL_DISABLE;
- intel_de_write(dev_priv, LCPLL_CTL, val);
- intel_de_posting_read(dev_priv, LCPLL_CTL);
+ intel_de_write(display, LCPLL_CTL, val);
+ intel_de_posting_read(display, LCPLL_CTL);
- if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
- drm_err(&dev_priv->drm, "LCPLL still locked\n");
+ if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
+ drm_err(display->drm, "LCPLL still locked\n");
- val = hsw_read_dcomp(dev_priv);
+ val = hsw_read_dcomp(display);
val |= D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
+ hsw_write_dcomp(display, val);
ndelay(100);
- if (wait_for((hsw_read_dcomp(dev_priv) &
+ if (wait_for((hsw_read_dcomp(display) &
D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
- drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
+ drm_err(display->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
- intel_de_posting_read(dev_priv, LCPLL_CTL);
+ intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
+ intel_de_posting_read(display, LCPLL_CTL);
}
}
@@ -1299,12 +1310,12 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
* source.
*/
-static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+static void hsw_restore_lcpll(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
u32 val;
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
@@ -1318,28 +1329,28 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
- intel_de_write(dev_priv, LCPLL_CTL, val);
- intel_de_posting_read(dev_priv, LCPLL_CTL);
+ intel_de_write(display, LCPLL_CTL, val);
+ intel_de_posting_read(display, LCPLL_CTL);
}
- val = hsw_read_dcomp(dev_priv);
+ val = hsw_read_dcomp(display);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
+ hsw_write_dcomp(display, val);
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_write(display, LCPLL_CTL, val);
- if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
- drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
+ if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
+ drm_err(display->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
+ intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+ if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Switching back to LCPLL failed\n");
}
@@ -1372,36 +1383,42 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* For more, read "Display Sequences for Package C8" on the hardware
* documentation.
*/
-static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_enable_pc8(struct intel_display *display)
{
- drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ drm_dbg_kms(display->drm, "Enabling package C8+\n");
if (HAS_PCH_LPT_LP(dev_priv))
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
lpt_disable_clkout_dp(dev_priv);
- hsw_disable_lcpll(dev_priv, true, true);
+ hsw_disable_lcpll(display, true, true);
}
-static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_disable_pc8(struct intel_display *display)
{
- drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ drm_dbg_kms(display->drm, "Disabling package C8+\n");
- hsw_restore_lcpll(dev_priv);
+ hsw_restore_lcpll(display);
intel_init_pch_refclk(dev_priv);
/* Many display registers don't survive PC8+ */
+#ifdef I915 /* FIXME */
intel_clock_gating_init(dev_priv);
+#endif
}
-static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+static void intel_pch_reset_handshake(struct intel_display *display,
bool enable)
{
i915_reg_t reg;
u32 reset_bits;
- if (IS_IVYBRIDGE(dev_priv)) {
+ if (display->platform.ivybridge) {
reg = GEN7_MSG_CTL;
reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
} else {
@@ -1409,59 +1426,58 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
- intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
+ intel_de_rmw(display, reg, reset_bits, enable ? reset_bits : 0);
}
-static void skl_display_core_init(struct drm_i915_private *dev_priv,
+static void skl_display_core_init(struct intel_display *display,
bool resume)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_enable(display, well);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_MISC_IO);
+ intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
intel_cdclk_init_hw(display);
- gen9_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(display);
if (resume)
intel_dmc_load_program(display);
}
-static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void skl_display_core_uninit(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
gen9_disable_dc_states(display);
/* TODO: disable DMC program */
- gen9_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(display);
intel_cdclk_uninit_hw(display);
@@ -1476,17 +1492,16 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
* Note that even though the driver's request is removed power well 1
* may stay enabled after this due to DMC's own request on it.
*/
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
+static void bxt_display_core_init(struct intel_display *display, bool resume)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
@@ -1498,40 +1513,39 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
- intel_pch_reset_handshake(dev_priv, false);
+ intel_pch_reset_handshake(display, false);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/* Enable PG1 */
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
intel_cdclk_init_hw(display);
- gen9_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(display);
if (resume)
intel_dmc_load_program(display);
}
-static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+static void bxt_display_core_uninit(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
gen9_disable_dc_states(display);
/* TODO: disable DMC program */
- gen9_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(display);
intel_cdclk_uninit_hw(display);
@@ -1544,8 +1558,8 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
@@ -1582,20 +1596,21 @@ static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
{}
};
-static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
+static void tgl_bw_buddy_init(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum intel_dram_type type = dev_priv->dram_info.type;
u8 num_channels = dev_priv->dram_info.num_channels;
const struct buddy_page_mask *table;
- unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask;
+ unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
int config, i;
/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
- if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
+ if (display->platform.dgfx && !display->platform.dg1)
return;
- if (IS_ALDERLAKE_S(dev_priv) ||
- (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)))
+ if (display->platform.alderlake_s ||
+ (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)))
/* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
@@ -1607,29 +1622,29 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
break;
if (table[config].page_mask == 0) {
- drm_dbg(&dev_priv->drm,
- "Unknown memory configuration; disabling address buddy logic.\n");
+ drm_dbg_kms(display->drm,
+ "Unknown memory configuration; disabling address buddy logic.\n");
for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
- intel_de_write(dev_priv, BW_BUDDY_CTL(i),
+ intel_de_write(display, BW_BUDDY_CTL(i),
BW_BUDDY_DISABLE);
} else {
for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
- intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
+ intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
table[config].page_mask);
/* Wa_22010178259:tgl,dg1,rkl,adl-s */
- if (DISPLAY_VER(dev_priv) == 12)
- intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
+ if (DISPLAY_VER(display) == 12)
+ intel_de_rmw(display, BW_BUDDY_CTL(i),
BW_BUDDY_TLB_REQ_TIMER_MASK,
BW_BUDDY_TLB_REQ_TIMER(0x8));
}
}
}
-static void icl_display_core_init(struct drm_i915_private *dev_priv,
+static void icl_display_core_init(struct intel_display *display,
bool resume)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
@@ -1638,13 +1653,13 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/* 2. Initialize all combo phys */
@@ -1655,67 +1670,67 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
* The AUX IO power wells will be enabled on demand.
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
- if (DISPLAY_VER(dev_priv) == 14)
- intel_de_rmw(dev_priv, DC_STATE_EN,
+ if (DISPLAY_VER(display) == 14)
+ intel_de_rmw(display, DC_STATE_EN,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(display);
- if (DISPLAY_VER(dev_priv) >= 12)
- gen12_dbuf_slices_config(dev_priv);
+ if (DISPLAY_VER(display) >= 12)
+ gen12_dbuf_slices_config(display);
/* 5. Enable DBUF. */
- gen9_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(display);
/* 6. Setup MBUS. */
- icl_mbus_init(dev_priv);
+ icl_mbus_init(display);
/* 7. Program arbiter BW_BUDDY registers */
- if (DISPLAY_VER(dev_priv) >= 12)
- tgl_bw_buddy_init(dev_priv);
+ if (DISPLAY_VER(display) >= 12)
+ tgl_bw_buddy_init(display);
/* 8. Ensure PHYs have completed calibration and adaptation */
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
intel_snps_phy_wait_for_calibration(dev_priv);
/* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
- if (DISPLAY_VERx100(dev_priv) == 1401)
- intel_de_rmw(dev_priv, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
+ if (DISPLAY_VERx100(display) == 1401)
+ intel_de_rmw(display, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
if (resume)
intel_dmc_load_program(display);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
- if (IS_DISPLAY_VERx100(dev_priv, 1200, 1300))
- intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
+ if (IS_DISPLAY_VERx100(display, 1200, 1300))
+ intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
- if (DISPLAY_VER(dev_priv) == 13)
- intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
+ if (DISPLAY_VER(display) == 13)
+ intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
/* Wa_15013987218 */
- if (DISPLAY_VER(dev_priv) == 20) {
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ if (DISPLAY_VER(display) == 20) {
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
}
}
-static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void icl_display_core_uninit(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
gen9_disable_dc_states(display);
@@ -1724,13 +1739,13 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 1. Disable all display engine functions -> aready done */
/* 2. Disable DBUF */
- gen9_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(display);
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(display);
- if (DISPLAY_VER(dev_priv) == 14)
- intel_de_rmw(dev_priv, DC_STATE_EN, 0,
+ if (DISPLAY_VER(display) == 14)
+ intel_de_rmw(display, DC_STATE_EN, 0,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
/*
@@ -1739,20 +1754,20 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
* disabled at this point.
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
/* 5. */
intel_combo_phy_uninit(dev_priv);
}
-static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+static void chv_phy_control_init(struct intel_display *display)
{
struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
/*
* DISPLAY_PHY_CONTROL can get corrupted if read. As a
@@ -1761,7 +1776,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* power well state and lane status to reconstruct the
* expected initial value.
*/
- dev_priv->display.power.chv_phy_control =
+ display->power.chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
@@ -1775,39 +1790,39 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* override and set the lane powerdown bits accding to the
* current lane status.
*/
- if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
- u32 status = intel_de_read(dev_priv, DPLL(dev_priv, PIPE_A));
+ if (intel_power_well_is_enabled(display, cmn_bc)) {
+ u32 status = intel_de_read(display, DPLL(display, PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
- dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+ display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
- dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
+ display->power.chv_phy_assert[DPIO_PHY0] = false;
} else {
- dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
+ display->power.chv_phy_assert[DPIO_PHY0] = true;
}
- if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
- u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
+ if (intel_power_well_is_enabled(display, cmn_d)) {
+ u32 status = intel_de_read(display, DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
@@ -1815,42 +1830,42 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
- dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+ display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
- dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
+ display->power.chv_phy_assert[DPIO_PHY1] = false;
} else {
- dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
+ display->power.chv_phy_assert[DPIO_PHY1] = true;
}
- drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
- dev_priv->display.power.chv_phy_control);
+ drm_dbg_kms(display->drm, "Initial PHY_CONTROL=0x%08x\n",
+ display->power.chv_phy_control);
/* Defer application of initial phy_control to enabling the powerwell */
}
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+static void vlv_cmnlane_wa(struct intel_display *display)
{
struct i915_power_well *cmn =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *disp2d =
- lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
+ lookup_power_well(display, VLV_DISP_PW_DISP2D);
/* If the display might be already active skip this */
- if (intel_power_well_is_enabled(dev_priv, cmn) &&
- intel_power_well_is_enabled(dev_priv, disp2d) &&
- intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
+ if (intel_power_well_is_enabled(display, cmn) &&
+ intel_power_well_is_enabled(display, disp2d) &&
+ intel_de_read(display, DPIO_CTL) & DPIO_CMNRST)
return;
- drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
+ drm_dbg_kms(display->drm, "toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
- intel_power_well_enable(dev_priv, disp2d);
+ intel_power_well_enable(display, disp2d);
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
@@ -1859,11 +1874,12 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
- intel_power_well_disable(dev_priv, cmn);
+ intel_power_well_disable(display, cmn);
}
-static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
vlv_punit_get(dev_priv);
@@ -1873,14 +1889,14 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
return ret;
}
-static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+static void assert_ved_power_gated(struct intel_display *display)
{
- drm_WARN(&dev_priv->drm,
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ drm_WARN(display->drm,
+ !vlv_punit_is_power_gated(display, PUNIT_REG_VEDSSPM0),
"VED not power gated\n");
}
-static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+static void assert_isp_power_gated(struct intel_display *display)
{
static const struct pci_device_id isp_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
@@ -1888,16 +1904,16 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
{}
};
- drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ drm_WARN(display->drm, !pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(display, PUNIT_REG_ISPSSPM0),
"ISP not power gated\n");
}
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+static void intel_power_domains_verify_state(struct intel_display *display);
/**
* intel_power_domains_init_hw - initialize hardware power domain state
- * @i915: i915 device instance
+ * @display: display device instance
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
@@ -1911,34 +1927,35 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_driver_remove().
*/
-void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
+void intel_power_domains_init_hw(struct intel_display *display, bool resume)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
power_domains->initializing = true;
- if (DISPLAY_VER(i915) >= 11) {
- icl_display_core_init(i915, resume);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
- bxt_display_core_init(i915, resume);
- } else if (DISPLAY_VER(i915) == 9) {
- skl_display_core_init(i915, resume);
- } else if (IS_CHERRYVIEW(i915)) {
+ if (DISPLAY_VER(display) >= 11) {
+ icl_display_core_init(display, resume);
+ } else if (display->platform.geminilake || display->platform.broxton) {
+ bxt_display_core_init(display, resume);
+ } else if (DISPLAY_VER(display) == 9) {
+ skl_display_core_init(display, resume);
+ } else if (display->platform.cherryview) {
mutex_lock(&power_domains->lock);
- chv_phy_control_init(i915);
+ chv_phy_control_init(display);
mutex_unlock(&power_domains->lock);
- assert_isp_power_gated(i915);
- } else if (IS_VALLEYVIEW(i915)) {
+ assert_isp_power_gated(display);
+ } else if (display->platform.valleyview) {
mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(i915);
+ vlv_cmnlane_wa(display);
mutex_unlock(&power_domains->lock);
- assert_ved_power_gated(i915);
- assert_isp_power_gated(i915);
- } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
- hsw_assert_cdclk(i915);
- intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
- } else if (IS_IVYBRIDGE(i915)) {
- intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ assert_ved_power_gated(display);
+ assert_isp_power_gated(display);
+ } else if (display->platform.broadwell || display->platform.haswell) {
+ hsw_assert_cdclk(display);
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ } else if (display->platform.ivybridge) {
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
}
/*
@@ -1947,24 +1964,24 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
- drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->display.params.disable_power_well) {
- drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
- i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_INIT);
+ if (!display->params.disable_power_well) {
+ drm_WARN_ON(display->drm, power_domains->disable_wakeref);
+ display->power.domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
}
- intel_power_domains_sync_hw(i915);
+ intel_power_domains_sync_hw(display);
power_domains->initializing = false;
}
/**
* intel_power_domains_driver_remove - deinitialize hw power domain state
- * @i915: i915 device instance
+ * @display: display device instance
*
* De-initializes the display power domain HW state. It also ensures that the
* device stays powered up so that the driver can be reloaded.
@@ -1973,19 +1990,20 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
-void intel_power_domains_driver_remove(struct drm_i915_private *i915)
+void intel_power_domains_driver_remove(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->display.power.domains.init_wakeref);
+ fetch_and_zero(&display->power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915->display.params.disable_power_well)
+ if (!display->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->display.power.domains.disable_wakeref));
+ fetch_and_zero(&display->power.domains.disable_wakeref));
- intel_display_power_flush_work_sync(i915);
+ intel_display_power_flush_work_sync(display);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -1993,7 +2011,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
/**
* intel_power_domains_sanitize_state - sanitize power domains state
- * @i915: i915 device instance
+ * @display: display device instance
*
* Sanitize the power domains state during driver loading and system resume.
* The function will disable all display power wells that BIOS has enabled
@@ -2001,22 +2019,22 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
* on it by the time this function is called, after the state of all the
* pipe, encoder, etc. HW resources have been sanitized).
*/
-void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
+void intel_power_domains_sanitize_state(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
- for_each_power_well_reverse(i915, power_well) {
+ for_each_power_well_reverse(display, power_well) {
if (power_well->desc->always_on || power_well->count ||
- !intel_power_well_is_enabled(i915, power_well))
+ !intel_power_well_is_enabled(display, power_well))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS left unused %s power well enabled, disabling it\n",
intel_power_well_name(power_well));
- intel_power_well_disable(i915, power_well);
+ intel_power_well_disable(display, power_well);
}
mutex_unlock(&power_domains->lock);
@@ -2024,7 +2042,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
/**
* intel_power_domains_enable - enable toggling of display power wells
- * @i915: i915 device instance
+ * @display: display device instance
*
* Enable the ondemand enabling/disabling of the display power wells. Note that
* power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
@@ -2034,36 +2052,38 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
* of display HW readout (which will acquire the power references reflecting
* the current HW state).
*/
-void intel_power_domains_enable(struct drm_i915_private *i915)
+void intel_power_domains_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->display.power.domains.init_wakeref);
+ fetch_and_zero(&display->power.domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
}
/**
* intel_power_domains_disable - disable toggling of display power wells
- * @i915: i915 device instance
+ * @display: display device instance
*
* Disable the ondemand enabling/disabling of the display power wells. See
* intel_power_domains_enable() for which power wells this call controls.
*/
-void intel_power_domains_disable(struct drm_i915_private *i915)
+void intel_power_domains_disable(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
- drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
}
/**
* intel_power_domains_suspend - suspend power domain state
- * @i915: i915 device instance
+ * @display: display device instance
* @s2idle: specifies whether we go to idle, or deeper sleep
*
* This function prepares the hardware power domain state before entering
@@ -2072,9 +2092,9 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
* It must be called with power domains already disabled (after a call to
* intel_power_domains_disable()) and paired with intel_power_domains_resume().
*/
-void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
+void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
@@ -2091,7 +2111,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
intel_dmc_has_payload(display)) {
intel_display_power_flush_work(i915);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
return;
}
@@ -2099,26 +2119,26 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
- if (!i915->display.params.disable_power_well)
+ if (!display->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->display.power.domains.disable_wakeref));
+ fetch_and_zero(&display->power.domains.disable_wakeref));
intel_display_power_flush_work(i915);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
- if (DISPLAY_VER(i915) >= 11)
- icl_display_core_uninit(i915);
- else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- bxt_display_core_uninit(i915);
- else if (DISPLAY_VER(i915) == 9)
- skl_display_core_uninit(i915);
+ if (DISPLAY_VER(display) >= 11)
+ icl_display_core_uninit(display);
+ else if (display->platform.geminilake || display->platform.broxton)
+ bxt_display_core_uninit(display);
+ else if (DISPLAY_VER(display) == 9)
+ skl_display_core_uninit(display);
power_domains->display_core_suspended = true;
}
/**
* intel_power_domains_resume - resume power domain state
- * @i915: i915 device instance
+ * @display: display device instance
*
* This function resume the hardware power domain state during system resume.
*
@@ -2126,45 +2146,46 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_suspend().
*/
-void intel_power_domains_resume(struct drm_i915_private *i915)
+void intel_power_domains_resume(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
if (power_domains->display_core_suspended) {
- intel_power_domains_init_hw(i915, true);
+ intel_power_domains_init_hw(display, true);
power_domains->display_core_suspended = false;
} else {
- drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void intel_power_domains_dump_info(struct drm_i915_private *i915)
+static void intel_power_domains_dump_info(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
- for_each_power_well(i915, power_well) {
+ for_each_power_well(display, power_well) {
enum intel_display_power_domain domain;
- drm_dbg(&i915->drm, "%-25s %d\n",
- intel_power_well_name(power_well), intel_power_well_refcount(power_well));
+ drm_dbg_kms(display->drm, "%-25s %d\n",
+ intel_power_well_name(power_well), intel_power_well_refcount(power_well));
for_each_power_domain(domain, intel_power_well_domains(power_well))
- drm_dbg(&i915->drm, " %-23s %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg_kms(display->drm, " %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
}
/**
* intel_power_domains_verify_state - verify the HW/SW state for all power wells
- * @i915: i915 device instance
+ * @display: display device instance
*
* Verify if the reference count of each power well matches its HW enabled
* state and the total refcount of the domains it belongs to. This must be
@@ -2172,9 +2193,9 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
* acquiring reference counts for any power wells in use and disabling the
* ones left on by BIOS but not required by any active output.
*/
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+static void intel_power_domains_verify_state(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
bool dump_domain_info;
@@ -2183,16 +2204,16 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
verify_async_put_domains_state(power_domains);
dump_domain_info = false;
- for_each_power_well(i915, power_well) {
+ for_each_power_well(display, power_well) {
enum intel_display_power_domain domain;
int domains_count;
bool enabled;
- enabled = intel_power_well_is_enabled(i915, power_well);
+ enabled = intel_power_well_is_enabled(display, power_well);
if ((intel_power_well_refcount(power_well) ||
intel_power_well_is_always_on(power_well)) !=
enabled)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"power well %s state mismatch (refcount %d/enabled %d)",
intel_power_well_name(power_well),
intel_power_well_refcount(power_well), enabled);
@@ -2202,7 +2223,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
domains_count += power_domains->domain_use_count[domain];
if (intel_power_well_refcount(power_well) != domains_count) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n",
intel_power_well_name(power_well),
@@ -2216,7 +2237,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
static bool dumped;
if (!dumped) {
- intel_power_domains_dump_info(i915);
+ intel_power_domains_dump_info(display);
dumped = true;
}
}
@@ -2226,21 +2247,23 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
#else
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+static void intel_power_domains_verify_state(struct intel_display *display)
{
}
#endif
-void intel_display_power_suspend_late(struct drm_i915_private *i915)
+void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ intel_power_domains_suspend(display, s2idle);
- if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
- IS_BROXTON(i915)) {
+ if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
+ display->platform.broxton) {
bxt_enable_dc9(display);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_enable_pc8(i915);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_enable_pc8(display);
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
@@ -2248,66 +2271,66 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
-void intel_display_power_resume_early(struct drm_i915_private *i915)
+void intel_display_power_resume_early(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
- IS_BROXTON(i915)) {
+ if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
+ display->platform.broxton) {
gen9_sanitize_dc_state(display);
bxt_disable_dc9(display);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_disable_pc8(i915);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_disable_pc8(display);
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+
+ intel_power_domains_resume(display);
}
-void intel_display_power_suspend(struct drm_i915_private *i915)
+void intel_display_power_suspend(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (DISPLAY_VER(i915) >= 11) {
- icl_display_core_uninit(i915);
+ if (DISPLAY_VER(display) >= 11) {
+ icl_display_core_uninit(display);
bxt_enable_dc9(display);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
- bxt_display_core_uninit(i915);
+ } else if (display->platform.geminilake || display->platform.broxton) {
+ bxt_display_core_uninit(display);
bxt_enable_dc9(display);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_enable_pc8(i915);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_enable_pc8(display);
}
}
-void intel_display_power_resume(struct drm_i915_private *i915)
+void intel_display_power_resume(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct i915_power_domains *power_domains = &display->power.domains;
- if (DISPLAY_VER(i915) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
bxt_disable_dc9(display);
- icl_display_core_init(i915, true);
+ icl_display_core_init(display, true);
if (intel_dmc_has_payload(display)) {
if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(display);
else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(display);
}
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
bxt_disable_dc9(display);
- bxt_display_core_init(i915, true);
+ bxt_display_core_init(display, true);
if (intel_dmc_has_payload(display) &&
(power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(display);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_disable_pc8(i915);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_disable_pc8(display);
}
}
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_display *display = &i915->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
int i;
mutex_lock(&power_domains->lock);
@@ -2452,17 +2475,17 @@ d13_port_domains[] = {
};
static void
-intel_port_domains_for_platform(struct drm_i915_private *i915,
+intel_port_domains_for_platform(struct intel_display *display,
const struct intel_ddi_port_domains **domains,
int *domains_size)
{
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
*domains = d13_port_domains;
*domains_size = ARRAY_SIZE(d13_port_domains);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
*domains = d12_port_domains;
*domains_size = ARRAY_SIZE(d12_port_domains);
- } else if (DISPLAY_VER(i915) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
*domains = d11_port_domains;
*domains_size = ARRAY_SIZE(d11_port_domains);
} else {
@@ -2472,13 +2495,13 @@ intel_port_domains_for_platform(struct drm_i915_private *i915,
}
static const struct intel_ddi_port_domains *
-intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
+intel_port_domains_for_port(struct intel_display *display, enum port port)
{
const struct intel_ddi_port_domains *domains;
int domains_size;
int i;
- intel_port_domains_for_platform(i915, &domains, &domains_size);
+ intel_port_domains_for_platform(display, &domains, &domains_size);
for (i = 0; i < domains_size; i++)
if (port >= domains[i].port_start && port <= domains[i].port_end)
return &domains[i];
@@ -2489,9 +2512,10 @@ intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
- if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_IO_A;
return domains->ddi_io + (int)(port - domains->port_start);
@@ -2500,22 +2524,23 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
- if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_LANES_A;
return domains->ddi_lanes + (int)(port - domains->port_start);
}
static const struct intel_ddi_port_domains *
-intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
+intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch)
{
const struct intel_ddi_port_domains *domains;
int domains_size;
int i;
- intel_port_domains_for_platform(i915, &domains, &domains_size);
+ intel_port_domains_for_platform(display, &domains, &domains_size);
for (i = 0; i < domains_size; i++)
if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
return &domains[i];
@@ -2526,9 +2551,10 @@ intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
enum intel_display_power_domain
intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_IO_A;
return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
@@ -2537,9 +2563,10 @@ intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux
enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_A;
return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
@@ -2548,9 +2575,10 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch
enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_TBT1;
return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 3f8f84df4733..7b294eec4431 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -15,6 +15,7 @@ enum aux_ch;
enum port;
struct drm_i915_private;
struct i915_power_well;
+struct intel_display;
struct intel_encoder;
struct seq_file;
@@ -166,21 +167,21 @@ struct intel_display_power_domain_set {
for ((__domain) = 0; (__domain) < POWER_DOMAIN_NUM; (__domain)++) \
for_each_if(test_bit((__domain), (__mask)->bits))
-int intel_power_domains_init(struct drm_i915_private *dev_priv);
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
-void intel_power_domains_enable(struct drm_i915_private *dev_priv);
-void intel_power_domains_disable(struct drm_i915_private *dev_priv);
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv, bool s2idle);
-void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void intel_power_domains_sanitize_state(struct drm_i915_private *dev_priv);
-
-void intel_display_power_suspend_late(struct drm_i915_private *i915);
-void intel_display_power_resume_early(struct drm_i915_private *i915);
-void intel_display_power_suspend(struct drm_i915_private *i915);
-void intel_display_power_resume(struct drm_i915_private *i915);
-void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+int intel_power_domains_init(struct intel_display *display);
+void intel_power_domains_cleanup(struct intel_display *display);
+void intel_power_domains_init_hw(struct intel_display *display, bool resume);
+void intel_power_domains_driver_remove(struct intel_display *display);
+void intel_power_domains_enable(struct intel_display *display);
+void intel_power_domains_disable(struct intel_display *display);
+void intel_power_domains_suspend(struct intel_display *display, bool s2idle);
+void intel_power_domains_resume(struct intel_display *display);
+void intel_power_domains_sanitize_state(struct intel_display *display);
+
+void intel_display_power_suspend_late(struct intel_display *display, bool s2idle);
+void intel_display_power_resume_early(struct intel_display *display);
+void intel_display_power_suspend(struct intel_display *display);
+void intel_display_power_resume(struct intel_display *display);
+void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 5575aa0d6689..0c8ac1af6db7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -3,14 +3,12 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
-
-#include "vlv_sideband_reg.h"
-
+#include "intel_display_core.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
#include "intel_display_types.h"
+#include "vlv_sideband_reg.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
@@ -1752,9 +1750,9 @@ __set_power_wells(struct i915_power_domains *power_domains,
const struct i915_power_well_desc_list *power_well_descs,
int power_well_descs_sz)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
u64 power_well_ids = 0;
const struct i915_power_well_desc_list *desc_list;
const struct i915_power_well_desc *desc;
@@ -1778,7 +1776,7 @@ __set_power_wells(struct i915_power_domains *power_domains,
enum i915_power_well_id id = inst->id;
pw->desc = desc;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
overflows_type(inst - desc->instances->list, pw->instance_idx));
pw->instance_idx = inst - desc->instances->list;
@@ -1789,8 +1787,8 @@ __set_power_wells(struct i915_power_domains *power_domains,
if (id == DISP_PW_ID_NONE)
continue;
- drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
- drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
+ drm_WARN_ON(display->drm, id >= sizeof(power_well_ids) * 8);
+ drm_WARN_ON(display->drm, power_well_ids & BIT_ULL(id));
power_well_ids |= BIT_ULL(id);
}
@@ -1811,53 +1809,53 @@ __set_power_wells(struct i915_power_domains *power_domains,
*/
int intel_display_power_map_init(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (!HAS_DISPLAY(i915)) {
+ if (!HAS_DISPLAY(display)) {
power_domains->power_well_count = 0;
return 0;
}
- if (DISPLAY_VER(i915) >= 30)
+ if (DISPLAY_VER(display) >= 30)
return set_power_wells(power_domains, xe3lpd_power_wells);
- else if (DISPLAY_VER(i915) >= 20)
+ else if (DISPLAY_VER(display) >= 20)
return set_power_wells(power_domains, xe2lpd_power_wells);
- else if (DISPLAY_VER(i915) >= 14)
+ else if (DISPLAY_VER(display) >= 14)
return set_power_wells(power_domains, xelpdp_power_wells);
- else if (IS_DG2(i915))
+ else if (display->platform.dg2)
return set_power_wells(power_domains, xehpd_power_wells);
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return set_power_wells(power_domains, xelpd_power_wells);
- else if (IS_DG1(i915))
+ else if (display->platform.dg1)
return set_power_wells(power_domains, dg1_power_wells);
- else if (IS_ALDERLAKE_S(i915))
+ else if (display->platform.alderlake_s)
return set_power_wells(power_domains, adls_power_wells);
- else if (IS_ROCKETLAKE(i915))
+ else if (display->platform.rocketlake)
return set_power_wells(power_domains, rkl_power_wells);
- else if (DISPLAY_VER(i915) == 12)
+ else if (DISPLAY_VER(display) == 12)
return set_power_wells(power_domains, tgl_power_wells);
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
return set_power_wells(power_domains, icl_power_wells);
- else if (IS_GEMINILAKE(i915))
+ else if (display->platform.geminilake)
return set_power_wells(power_domains, glk_power_wells);
- else if (IS_BROXTON(i915))
+ else if (display->platform.broxton)
return set_power_wells(power_domains, bxt_power_wells);
- else if (DISPLAY_VER(i915) == 9)
+ else if (DISPLAY_VER(display) == 9)
return set_power_wells(power_domains, skl_power_wells);
- else if (IS_CHERRYVIEW(i915))
+ else if (display->platform.cherryview)
return set_power_wells(power_domains, chv_power_wells);
- else if (IS_BROADWELL(i915))
+ else if (display->platform.broadwell)
return set_power_wells(power_domains, bdw_power_wells);
- else if (IS_HASWELL(i915))
+ else if (display->platform.haswell)
return set_power_wells(power_domains, hsw_power_wells);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
return set_power_wells(power_domains, vlv_power_wells);
- else if (IS_I830(i915))
+ else if (display->platform.i830)
return set_power_wells(power_domains, i830_power_wells);
else
return set_power_wells(power_domains, i9xx_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index f0131dd853de..f45a4f9ba23c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -46,23 +46,23 @@ struct i915_power_well_ops {
* during driver init and resume time, possibly after first calling
* the enable/disable handlers.
*/
- void (*sync_hw)(struct drm_i915_private *i915,
+ void (*sync_hw)(struct intel_display *display,
struct i915_power_well *power_well);
/*
* Enable the well and resources that depend on it (for example
* interrupts located on the well). Called after the 0->1 refcount
* transition.
*/
- void (*enable)(struct drm_i915_private *i915,
+ void (*enable)(struct intel_display *display,
struct i915_power_well *power_well);
/*
* Disable the well and resources that depend on it. Called after
* the 1->0 refcount transition.
*/
- void (*disable)(struct drm_i915_private *i915,
+ void (*disable)(struct intel_display *display,
struct i915_power_well *power_well);
/* Returns the hw enabled state. */
- bool (*is_enabled)(struct drm_i915_private *i915,
+ bool (*is_enabled)(struct intel_display *display,
struct i915_power_well *power_well);
};
@@ -73,12 +73,12 @@ i915_power_well_instance(const struct i915_power_well *power_well)
}
struct i915_power_well *
-lookup_power_well(struct drm_i915_private *i915,
+lookup_power_well(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
struct i915_power_well *power_well;
- for_each_power_well(i915, power_well)
+ for_each_power_well(display, power_well)
if (i915_power_well_instance(power_well)->id == power_well_id)
return power_well;
@@ -89,58 +89,57 @@ lookup_power_well(struct drm_i915_private *i915,
* the first power well and hope the WARN gets reported so we can fix
* our driver.
*/
- drm_WARN(&i915->drm, 1,
+ drm_WARN(display->drm, 1,
"Power well %d not defined for this platform\n",
power_well_id);
- return &i915->display.power.domains.power_wells[0];
+ return &display->power.domains.power_wells[0];
}
-void intel_power_well_enable(struct drm_i915_private *i915,
+void intel_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well));
- power_well->desc->ops->enable(i915, power_well);
+ drm_dbg_kms(display->drm, "enabling %s\n", intel_power_well_name(power_well));
+ power_well->desc->ops->enable(display, power_well);
power_well->hw_enabled = true;
}
-void intel_power_well_disable(struct drm_i915_private *i915,
+void intel_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well));
+ drm_dbg_kms(display->drm, "disabling %s\n", intel_power_well_name(power_well));
power_well->hw_enabled = false;
- power_well->desc->ops->disable(i915, power_well);
+ power_well->desc->ops->disable(display, power_well);
}
-void intel_power_well_sync_hw(struct drm_i915_private *i915,
+void intel_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
- power_well->desc->ops->sync_hw(i915, power_well);
- power_well->hw_enabled =
- power_well->desc->ops->is_enabled(i915, power_well);
+ power_well->desc->ops->sync_hw(display, power_well);
+ power_well->hw_enabled = power_well->desc->ops->is_enabled(display, power_well);
}
-void intel_power_well_get(struct drm_i915_private *i915,
+void intel_power_well_get(struct intel_display *display,
struct i915_power_well *power_well)
{
if (!power_well->count++)
- intel_power_well_enable(i915, power_well);
+ intel_power_well_enable(display, power_well);
}
-void intel_power_well_put(struct drm_i915_private *i915,
+void intel_power_well_put(struct intel_display *display,
struct i915_power_well *power_well)
{
- drm_WARN(&i915->drm, !power_well->count,
+ drm_WARN(display->drm, !power_well->count,
"Use count on power well %s is already zero",
i915_power_well_instance(power_well)->name);
if (!--power_well->count)
- intel_power_well_disable(i915, power_well);
+ intel_power_well_disable(display, power_well);
}
-bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+bool intel_power_well_is_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- return power_well->desc->ops->is_enabled(i915, power_well);
+ return power_well->desc->ops->is_enabled(display, power_well);
}
bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
@@ -148,14 +147,14 @@ bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
return power_well->hw_enabled;
}
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_well_is_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
struct i915_power_well *power_well;
- power_well = lookup_power_well(dev_priv, power_well_id);
+ power_well = lookup_power_well(display, power_well_id);
- return intel_power_well_is_enabled(dev_priv, power_well);
+ return intel_power_well_is_enabled(display, power_well);
}
bool intel_power_well_is_always_on(struct i915_power_well *power_well)
@@ -184,10 +183,10 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
if (has_vga)
intel_vga_reset_io_mem(display);
@@ -196,9 +195,11 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
}
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (irq_pipe_mask)
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
@@ -221,12 +222,12 @@ static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
}
static struct intel_digital_port *
-aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
+aux_ch_to_digital_port(struct intel_display *display,
enum aux_ch aux_ch)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_digital_port *dig_port;
/* We'll check the MST primary port */
@@ -242,11 +243,11 @@ aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
return NULL;
}
-static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
+static enum phy icl_aux_pw_to_phy(struct intel_display *display,
const struct i915_power_well *power_well)
{
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
/*
* FIXME should we care about the (VBT defined) dig_port->aux_ch
@@ -258,7 +259,7 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
}
-static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+static void hsw_wait_for_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well,
bool timeout_expected)
{
@@ -271,39 +272,39 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
* an ack, but rather just wait a fixed amount of time and then
* proceed. This is only used on DG2.
*/
- if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) {
+ if (display->platform.dg2 && power_well->desc->fixed_enable_delay) {
usleep_range(600, 1200);
return;
}
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- if (intel_de_wait_for_set(dev_priv, regs->driver,
+ if (intel_de_wait_for_set(display, regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
- drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+ drm_dbg_kms(display->drm, "%s power well enable timeout\n",
intel_power_well_name(power_well));
- drm_WARN_ON(&dev_priv->drm, !timeout_expected);
+ drm_WARN_ON(display->drm, !timeout_expected);
}
}
-static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+static u32 hsw_power_well_requesters(struct intel_display *display,
const struct i915_power_well_regs *regs,
int pw_idx)
{
u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
u32 ret;
- ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
- ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
+ ret = intel_de_read(display, regs->bios) & req_mask ? 1 : 0;
+ ret |= intel_de_read(display, regs->driver) & req_mask ? 2 : 0;
if (regs->kvmr.reg)
- ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
- ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
+ ret |= intel_de_read(display, regs->kvmr) & req_mask ? 4 : 0;
+ ret |= intel_de_read(display, regs->debug) & req_mask ? 8 : 0;
return ret;
}
-static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+static void hsw_wait_for_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -320,28 +321,28 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
+ wait_for((disabled = !(intel_de_read(display, regs->driver) &
HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+ (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1);
if (disabled)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
intel_power_well_name(power_well),
!!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
}
-static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+static void gen9_wait_for_power_well_fuses(struct intel_display *display,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- drm_WARN_ON(&dev_priv->drm,
- intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ drm_WARN_ON(display->drm,
+ intel_de_wait_for_set(display, SKL_FUSE_STATUS,
SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -350,12 +351,12 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
/* Wa_16013190616:adlp */
- if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
+ if (display->platform.alderlake_p && pg == SKL_PG1)
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
@@ -365,112 +366,112 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
* after the enabling.
*/
if (pg == SKL_PG1)
- gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+ gen9_wait_for_power_well_fuses(display, SKL_PG0);
}
- intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+ hsw_wait_for_power_well_enable(display, power_well, false);
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
- gen9_wait_for_power_well_fuses(dev_priv, pg);
+ gen9_wait_for_power_well_fuses(display, pg);
}
- hsw_power_well_post_enable(dev_priv,
+ hsw_power_well_post_enable(display,
power_well->desc->irq_pipe_mask,
power_well->desc->has_vga);
}
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- hsw_power_well_pre_disable(dev_priv,
+ hsw_power_well_pre_disable(display,
power_well->desc->irq_pipe_mask);
- intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
- hsw_wait_for_power_well_disable(dev_priv, power_well);
+ intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
+ hsw_wait_for_power_well_disable(display, power_well);
}
-static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch)
+static bool intel_aux_ch_is_edp(struct intel_display *display, enum aux_ch aux_ch)
{
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
}
static void
-icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_combo_phy_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+ drm_WARN_ON(display->drm, !display->platform.icelake);
- intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
/*
* FIXME not sure if we should derive the PHY from the pw_idx, or
* from the VBT defined AUX_CH->DDI->PHY mapping.
*/
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
0, ICL_LANE_ENABLE_AUX);
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+ hsw_wait_for_power_well_enable(display, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx)))
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
+ !intel_aux_ch_is_edp(display, ICL_AUX_PW_TO_CH(pw_idx)))
+ intel_de_rmw(display, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
}
static void
-icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+icl_combo_phy_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+ drm_WARN_ON(display->drm, !display->platform.icelake);
/*
* FIXME not sure if we should derive the PHY from the pw_idx, or
* from the VBT defined AUX_CH->DDI->PHY mapping.
*/
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
ICL_LANE_ENABLE_AUX, 0);
- intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
+ intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
- hsw_wait_for_power_well_disable(dev_priv, power_well);
+ hsw_wait_for_power_well_disable(display, power_well);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+static void icl_tc_port_assert_ref_held(struct intel_display *display,
struct i915_power_well *power_well,
struct intel_digital_port *dig_port)
{
- if (drm_WARN_ON(&dev_priv->drm, !dig_port))
+ if (drm_WARN_ON(display->drm, !dig_port))
return;
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
return;
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
}
#else
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+static void icl_tc_port_assert_ref_held(struct intel_display *display,
struct i915_power_well *power_well,
struct intel_digital_port *dig_port)
{
@@ -480,8 +481,9 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
-static void icl_tc_cold_exit(struct drm_i915_private *i915)
+static void icl_tc_cold_exit(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret, tries = 0;
while (1) {
@@ -502,21 +504,22 @@ static void icl_tc_cold_exit(struct drm_i915_private *i915)
}
static void
-icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_tc_phy_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
- icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
+ icl_tc_port_assert_ref_held(display, power_well, dig_port);
- intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch),
+ intel_de_rmw(display, DP_AUX_CH_CTL(aux_ch),
DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
- intel_de_rmw(dev_priv, regs->driver,
+ intel_de_rmw(display, regs->driver,
0,
HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
@@ -526,51 +529,53 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
* exit sequence.
*/
timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
- icl_tc_cold_exit(dev_priv);
+ if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ icl_tc_cold_exit(display);
- hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
+ hsw_wait_for_power_well_enable(display, power_well, timeout_expected);
- if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
+ if (DISPLAY_VER(display) >= 12 && !is_tbt) {
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) &
DKL_CMN_UC_DW27_UC_HEALTH, 1))
- drm_warn(&dev_priv->drm,
+ drm_warn(display->drm,
"Timeout waiting TC uC health\n");
}
}
static void
-icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(dev_priv, phy))
- return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_enable(dev_priv,
+ return icl_tc_phy_aux_power_well_enable(display, power_well);
+ else if (display->platform.icelake)
+ return icl_combo_phy_aux_power_well_enable(display,
power_well);
else
- return hsw_power_well_enable(dev_priv, power_well);
+ return hsw_power_well_enable(display, power_well);
}
static void
-icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
+icl_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(dev_priv, phy))
- return hsw_power_well_disable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_disable(dev_priv,
+ return hsw_power_well_disable(display, power_well);
+ else if (display->platform.icelake)
+ return icl_combo_phy_aux_power_well_disable(display,
power_well);
else
- return hsw_power_well_disable(dev_priv, power_well);
+ return hsw_power_well_disable(display, power_well);
}
/*
@@ -578,7 +583,7 @@ icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool hsw_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -588,7 +593,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
HSW_PWR_WELL_CTL_STATE(pw_idx);
u32 val;
- val = intel_de_read(dev_priv, regs->driver);
+ val = intel_de_read(display, regs->driver);
/*
* On GEN9 big core due to a DMC bug the driver's request bits for PW1
@@ -596,9 +601,9 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
* BIOS's own request bits, which are forced-on for these power wells
* when exiting DC5/6.
*/
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= intel_de_read(dev_priv, regs->bios);
+ val |= intel_de_read(display, regs->bios);
return (val & mask) == mask;
}
@@ -691,7 +696,6 @@ static void gen9_write_dc_state(struct intel_display *display,
static u32 gen9_dc_mask(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
@@ -701,7 +705,7 @@ static u32 gen9_dc_mask(struct intel_display *display)
| DC_STATE_EN_DC9;
else if (DISPLAY_VER(display) == 11)
mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
@@ -798,7 +802,7 @@ static void tgl_disable_dc3co(struct intel_display *display)
static void assert_can_enable_dc5(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
enum i915_power_well_id high_pg;
/* Power wells at this level and above must be disabled for DC5 entry */
@@ -808,7 +812,7 @@ static void assert_can_enable_dc5(struct intel_display *display)
high_pg = SKL_DISP_PW_2;
drm_WARN_ONCE(display->drm,
- intel_display_power_well_is_enabled(dev_priv, high_pg),
+ intel_display_power_well_is_enabled(display, high_pg),
"Power wells above platform's DC5 limit still enabled.\n");
drm_WARN_ONCE(display->drm,
@@ -822,18 +826,16 @@ static void assert_can_enable_dc5(struct intel_display *display)
void gen9_enable_dc5(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
assert_can_enable_dc5(display);
drm_dbg_kms(display->drm, "Enabling DC5\n");
/* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(display) == 9 && !IS_BROXTON(dev_priv))
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
- intel_dmc_wl_enable(display);
+ intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC5);
gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5);
}
@@ -855,26 +857,22 @@ static void assert_can_enable_dc6(struct intel_display *display)
void skl_enable_dc6(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
assert_can_enable_dc6(display);
drm_dbg_kms(display->drm, "Enabling DC6\n");
/* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(display) == 9 && !IS_BROXTON(dev_priv))
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
- intel_dmc_wl_enable(display);
+ intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC6);
gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6);
}
void bxt_enable_dc9(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
assert_can_enable_dc9(display);
drm_dbg_kms(display->drm, "Enabling DC9\n");
@@ -882,7 +880,7 @@ void bxt_enable_dc9(struct intel_display *display)
* Power sequencer reset is needed on BXT/GLK, because the PPS registers
* aren't always on, unlike with South Display Engine on PCH.
*/
- if (IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (display->platform.broxton || display->platform.geminilake)
bxt_pps_reset_all(display);
gen9_set_dc_state(display, DC_STATE_EN_DC9);
}
@@ -898,63 +896,56 @@ void bxt_disable_dc9(struct intel_display *display)
intel_pps_unlock_regs_wa(display);
}
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void hsw_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = intel_de_read(dev_priv, regs->bios);
+ u32 bios_req = intel_de_read(display, regs->bios);
/* Take over the request bit if set by BIOS. */
if (bios_req & mask) {
- u32 drv_req = intel_de_read(dev_priv, regs->driver);
+ u32 drv_req = intel_de_read(display, regs->driver);
if (!(drv_req & mask))
- intel_de_write(dev_priv, regs->driver, drv_req | mask);
- intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
+ intel_de_write(display, regs->driver, drv_req | mask);
+ intel_de_write(display, regs->bios, bios_req & ~mask);
}
}
-static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void bxt_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
bxt_dpio_phy_init(display, i915_power_well_instance(power_well)->bxt.phy);
}
-static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void bxt_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
bxt_dpio_phy_uninit(display, i915_power_well_instance(power_well)->bxt.phy);
}
-static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool bxt_dpio_cmn_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
return bxt_dpio_phy_is_enabled(display, i915_power_well_instance(power_well)->bxt.phy);
}
-static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv)
+static void bxt_verify_dpio_phy_power_wells(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_well *power_well;
- power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+ power_well = lookup_power_well(display, BXT_DISP_PW_DPIO_CMN_A);
if (intel_power_well_refcount(power_well) > 0)
bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
- power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ power_well = lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
if (intel_power_well_refcount(power_well) > 0)
bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
- if (IS_GEMINILAKE(dev_priv)) {
- power_well = lookup_power_well(dev_priv,
+ if (display->platform.geminilake) {
+ power_well = lookup_power_well(display,
GLK_DISP_PW_DPIO_CMN_C);
if (intel_power_well_refcount(power_well) > 0)
bxt_dpio_phy_verify_state(display,
@@ -962,21 +953,20 @@ static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv)
}
}
-static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool gen9_dc_off_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
(intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
-static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+static void gen9_assert_dbuf_enabled(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices;
+ u8 enabled_dbuf_slices = display->dbuf.enabled_slices;
- drm_WARN(&dev_priv->drm,
+ drm_WARN(display->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices,
"Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
hw_enabled_dbuf_slices,
@@ -988,18 +978,25 @@ void gen9_disable_dc_states(struct intel_display *display)
struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_cdclk_config cdclk_config = {};
+ u32 old_state = power_domains->dc_state;
if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(display);
return;
}
- gen9_set_dc_state(display, DC_STATE_DISABLE);
-
- if (!HAS_DISPLAY(display))
+ if (HAS_DISPLAY(display)) {
+ intel_dmc_wl_get_noreg(display);
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
+ intel_dmc_wl_put_noreg(display);
+ } else {
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
return;
+ }
- intel_dmc_wl_disable(display);
+ if (old_state == DC_STATE_EN_UPTO_DC5 ||
+ old_state == DC_STATE_EN_UPTO_DC6)
+ intel_dmc_wl_disable(display);
intel_cdclk_get_cdclk(display, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
@@ -1007,10 +1004,10 @@ void gen9_disable_dc_states(struct intel_display *display)
intel_cdclk_clock_changed(&display->cdclk.hw,
&cdclk_config));
- gen9_assert_dbuf_enabled(dev_priv);
+ gen9_assert_dbuf_enabled(display);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_verify_dpio_phy_power_wells(dev_priv);
+ if (display->platform.geminilake || display->platform.broxton)
+ bxt_verify_dpio_phy_power_wells(display);
if (DISPLAY_VER(display) >= 11)
/*
@@ -1021,18 +1018,15 @@ void gen9_disable_dc_states(struct intel_display *display)
intel_combo_phy_init(dev_priv);
}
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+static void gen9_dc_off_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
gen9_disable_dc_states(display);
}
-static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+static void gen9_dc_off_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
if (!intel_dmc_has_payload(display))
@@ -1051,63 +1045,58 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
}
}
-static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+static void i9xx_power_well_sync_hw_noop(struct intel_display *display,
struct i915_power_well *power_well)
{
}
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+static void i9xx_always_on_power_well_noop(struct intel_display *display,
struct i915_power_well *power_well)
{
}
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static bool i9xx_always_on_power_well_enabled(struct intel_display *display,
+ struct i915_power_well *power_well)
{
return true;
}
-static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
- if ((intel_de_read(display, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE) == 0)
+ if ((intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(display, PIPE_A);
- if ((intel_de_read(display, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE) == 0)
+ if ((intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(display, PIPE_B);
}
-static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
i830_disable_pipe(display, PIPE_B);
i830_disable_pipe(display, PIPE_A);
}
-static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool i830_pipes_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
- return intel_de_read(display, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE &&
- intel_de_read(display, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE;
+ return intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE &&
+ intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
}
-static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
if (intel_power_well_refcount(power_well) > 0)
- i830_pipes_power_well_enable(dev_priv, power_well);
+ i830_pipes_power_well_enable(display, power_well);
else
- i830_pipes_power_well_disable(dev_priv, power_well);
+ i830_pipes_power_well_disable(display, power_well);
}
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+static void vlv_set_power_well(struct intel_display *display,
struct i915_power_well *power_well, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
u32 mask;
u32 state;
@@ -1131,7 +1120,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
@@ -1142,21 +1131,22 @@ out:
vlv_punit_put(dev_priv);
}
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
}
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
}
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool vlv_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
bool enabled = false;
u32 mask;
@@ -1173,7 +1163,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ drm_WARN_ON(display->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
state != PUNIT_PWRGT_PWR_GATE(pw_idx));
if (state == ctrl)
enabled = true;
@@ -1183,14 +1173,14 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- drm_WARN_ON(&dev_priv->drm, ctrl != state);
+ drm_WARN_ON(display->drm, ctrl != state);
vlv_punit_put(dev_priv);
return enabled;
}
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+static void vlv_init_display_clock_gating(struct intel_display *display)
{
/*
* On driver load, a pipe may be active and driving a DSI display.
@@ -1198,25 +1188,25 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
* Disable trickle feed and enable pnd deadline calculation
*/
- intel_de_write(dev_priv, MI_ARB_VLV,
+ intel_de_write(display, MI_ARB_VLV,
MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- intel_de_write(dev_priv, CBR1_VLV, 0);
+ intel_de_write(display, CBR1_VLV, 0);
- drm_WARN_ON(&dev_priv->drm, DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
- intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq,
+ drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0);
+ intel_de_write(display, RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq,
1000));
}
-static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+static void vlv_display_power_well_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1228,17 +1218,17 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(dev_priv, pipe) {
- u32 val = intel_de_read(dev_priv, DPLL(dev_priv, pipe));
+ for_each_pipe(display, pipe) {
+ u32 val = intel_de_read(display, DPLL(display, pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
+ intel_de_write(display, DPLL(display, pipe), val);
}
- vlv_init_display_clock_gating(dev_priv);
+ vlv_init_display_clock_gating(display);
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
@@ -1248,14 +1238,14 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
- if (dev_priv->display.power.domains.initializing)
+ if (display->power.domains.initializing)
return;
intel_hpd_init(dev_priv);
intel_hpd_poll_disable(dev_priv);
/* Re-enable the ADPA, if we have one */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_ANALOG)
intel_crt_reset(&encoder->base);
}
@@ -1265,9 +1255,9 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_pps_unlock_regs_wa(display);
}
-static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+static void vlv_display_power_well_deinit(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
@@ -1279,33 +1269,33 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
vlv_pps_reset_all(display);
/* Prevent us from re-enabling polling on accident in late suspend */
- if (!dev_priv->drm.dev->power.is_suspended)
+ if (!display->drm->dev->power.is_suspended)
intel_hpd_poll_enable(dev_priv);
}
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_display_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
- vlv_display_power_well_init(dev_priv);
+ vlv_display_power_well_init(display);
}
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_display_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_display_power_well_deinit(dev_priv);
+ vlv_display_power_well_deinit(display);
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
}
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
@@ -1318,32 +1308,32 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST);
+ intel_de_rmw(display, DPIO_CTL, 0, DPIO_CMNRST);
}
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0);
+ intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, 0);
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
}
#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
static void assert_chv_phy_status(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
u32 phy_control = display->power.chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
@@ -1368,7 +1358,7 @@ static void assert_chv_phy_status(struct intel_display *display)
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
- if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
+ if (intel_power_well_is_enabled(display, cmn_bc)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY0);
/* this assumes override is only used to enable lanes */
@@ -1409,7 +1399,7 @@ static void assert_chv_phy_status(struct intel_display *display)
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
}
- if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
+ if (intel_power_well_is_enabled(display, cmn_d)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY1);
/* this assumes override is only used to enable lanes */
@@ -1444,10 +1434,10 @@ static void assert_chv_phy_status(struct intel_display *display)
#undef BITS_SET
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
u32 tmp;
@@ -1463,7 +1453,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
/* Poll for phypwrgood signal */
if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS,
@@ -1507,10 +1497,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
assert_chv_phy_status(display);
}
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
@@ -1531,7 +1521,7 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
intel_de_write(display, DISPLAY_PHY_CONTROL,
display->power.chv_phy_control);
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
drm_dbg_kms(display->drm,
"Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
@@ -1543,9 +1533,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_chv_phy_status(display);
}
-static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, val, expected, actual;
/*
@@ -1555,7 +1546,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->display.power.chv_phy_assert[phy])
+ if (!display->power.chv_phy_assert[phy])
return;
if (ch == DPIO_CH0)
@@ -1598,7 +1589,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
DPIO_ALLDL_POWERDOWN_CH1, val);
- drm_WARN(&dev_priv->drm, actual != expected,
+ drm_WARN(display->drm, actual != expected,
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
!!(actual & DPIO_ALLDL_POWERDOWN),
!!(actual & DPIO_ANYDL_POWERDOWN),
@@ -1607,10 +1598,9 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
reg, val);
}
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
bool was_override;
@@ -1645,7 +1635,6 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_power_domains *power_domains = &display->power.domains;
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
@@ -1669,14 +1658,15 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
assert_chv_phy_status(display);
- assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+ assert_chv_phy_powergate(display, phy, ch, override, mask);
mutex_unlock(&power_domains->lock);
}
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool chv_pipe_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
bool enabled;
u32 state, ctrl;
@@ -1688,7 +1678,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ drm_WARN_ON(display->drm, state != DP_SSS_PWR_ON(pipe) &&
state != DP_SSS_PWR_GATE(pipe));
enabled = state == DP_SSS_PWR_ON(pipe);
@@ -1697,17 +1687,18 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
+ drm_WARN_ON(display->drm, ctrl << 16 != state);
vlv_punit_put(dev_priv);
return enabled;
}
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+static void chv_set_pipe_power_well(struct intel_display *display,
struct i915_power_well *power_well,
bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
@@ -1728,7 +1719,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
@@ -1739,32 +1730,33 @@ out:
vlv_punit_put(dev_priv);
}
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->display.power.chv_phy_control);
+ intel_de_write(display, DISPLAY_PHY_CONTROL,
+ display->power.chv_phy_control);
}
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- chv_set_pipe_power_well(dev_priv, power_well, true);
+ chv_set_pipe_power_well(display, power_well, true);
- vlv_display_power_well_init(dev_priv);
+ vlv_display_power_well_init(display);
}
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_display_power_well_deinit(dev_priv);
+ vlv_display_power_well_deinit(display);
- chv_set_pipe_power_well(dev_priv, power_well, false);
+ chv_set_pipe_power_well(display, power_well, false);
}
static void
-tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
+tgl_tc_cold_request(struct intel_display *display, bool block)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u8 tries = 0;
int ret;
@@ -1805,31 +1797,31 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
}
static void
-tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
+tgl_tc_cold_off_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- tgl_tc_cold_request(i915, true);
+ tgl_tc_cold_request(display, true);
}
static void
-tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
+tgl_tc_cold_off_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- tgl_tc_cold_request(i915, false);
+ tgl_tc_cold_request(display, false);
}
static void
-tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
+tgl_tc_cold_off_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
if (intel_power_well_refcount(power_well) > 0)
- tgl_tc_cold_off_power_well_enable(i915, power_well);
+ tgl_tc_cold_off_power_well_enable(display, power_well);
else
- tgl_tc_cold_off_power_well_disable(i915, power_well);
+ tgl_tc_cold_off_power_well_disable(display, power_well);
}
static bool
-tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
+tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
/*
@@ -1839,17 +1831,18 @@ tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
return intel_power_well_refcount(power_well);
}
-static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
+static void xelpdp_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(dev_priv, phy))
- icl_tc_port_assert_ref_held(dev_priv, power_well,
- aux_ch_to_digital_port(dev_priv, aux_ch));
+ icl_tc_port_assert_ref_held(display, power_well,
+ aux_ch_to_digital_port(display, aux_ch));
- intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch),
+ intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
@@ -1862,57 +1855,57 @@ static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
usleep_range(600, 1200);
}
-static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv,
+static void xelpdp_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
- intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch),
+ intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
0);
usleep_range(10, 30);
}
-static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
- return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch)) &
+ return intel_de_read(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch)) &
XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
}
-static void xe2lpd_pica_power_well_enable(struct drm_i915_private *dev_priv,
+static void xe2lpd_pica_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL,
+ intel_de_write(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_REQUEST);
- if (intel_de_wait_for_set(dev_priv, XE2LPD_PICA_PW_CTL,
+ if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
- drm_dbg_kms(&dev_priv->drm, "pica power well enable timeout\n");
+ drm_dbg_kms(display->drm, "pica power well enable timeout\n");
- drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when enabled");
+ drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
}
}
-static void xe2lpd_pica_power_well_disable(struct drm_i915_private *dev_priv,
+static void xe2lpd_pica_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL, 0);
+ intel_de_write(display, XE2LPD_PICA_PW_CTL, 0);
- if (intel_de_wait_for_clear(dev_priv, XE2LPD_PICA_PW_CTL,
+ if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
- drm_dbg_kms(&dev_priv->drm, "pica power well disable timeout\n");
+ drm_dbg_kms(display->drm, "pica power well disable timeout\n");
- drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when disabled");
+ drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
}
}
-static bool xe2lpd_pica_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool xe2lpd_pica_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- return intel_de_read(dev_priv, XE2LPD_PICA_PW_CTL) &
+ return intel_de_read(display, XE2LPD_PICA_PW_CTL) &
XE2LPD_PICA_CTL_POWER_STATUS;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index 93559f7c6100..338379dae44c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -10,21 +10,20 @@
#include "intel_display_power.h"
#include "intel_dpio_phy.h"
-struct drm_i915_private;
struct i915_power_well_ops;
struct intel_display;
struct intel_encoder;
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \
- (__power_well) - (__dev_priv)->display.power.domains.power_wells < \
- (__dev_priv)->display.power.domains.power_well_count; \
+#define for_each_power_well(___display, __power_well) \
+ for ((__power_well) = (___display)->power.domains.power_wells; \
+ (__power_well) - (___display)->power.domains.power_wells < \
+ (___display)->power.domains.power_well_count; \
(__power_well)++)
-#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->display.power.domains.power_wells + \
- (__dev_priv)->display.power.domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->display.power.domains.power_wells >= 0; \
+#define for_each_power_well_reverse(___display, __power_well) \
+ for ((__power_well) = (___display)->power.domains.power_wells + \
+ (___display)->power.domains.power_well_count - 1; \
+ (__power_well) - (___display)->power.domains.power_wells >= 0; \
(__power_well)--)
/*
@@ -127,23 +126,23 @@ struct i915_power_well {
u8 instance_idx;
};
-struct i915_power_well *lookup_power_well(struct drm_i915_private *i915,
+struct i915_power_well *lookup_power_well(struct intel_display *display,
enum i915_power_well_id id);
-void intel_power_well_enable(struct drm_i915_private *i915,
+void intel_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_disable(struct drm_i915_private *i915,
+void intel_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_sync_hw(struct drm_i915_private *i915,
+void intel_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_get(struct drm_i915_private *i915,
+void intel_power_well_get(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_put(struct drm_i915_private *i915,
+void intel_power_well_put(struct intel_display *display,
struct i915_power_well *power_well);
-bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+bool intel_power_well_is_enabled(struct intel_display *display,
struct i915_power_well *power_well);
bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well);
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_well_is_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id);
bool intel_power_well_is_always_on(struct i915_power_well *power_well);
const char *intel_power_well_name(struct i915_power_well *power_well);
@@ -152,7 +151,7 @@ int intel_power_well_refcount(struct i915_power_well *power_well);
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override);
void gen9_enable_dc5(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 49e2e650ebcd..093b386c95e8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -114,11 +114,11 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
* so need a full re-initialization.
*/
intel_pps_unlock_regs_wa(display);
- intel_display_driver_init_hw(i915);
+ intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
intel_hpd_init(i915);
- ret = __intel_display_driver_resume(i915, state, ctx);
+ ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
index 030c4f873da1..25ba043cbb65 100644
--- a/drivers/gpu/drm/i915/display/intel_display_snapshot.c
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
@@ -3,7 +3,9 @@
#include <linux/slab.h>
-#include "i915_drv.h"
+#include <drm/drm_drv.h>
+
+#include "intel_display_core.h"
#include "intel_display_device.h"
#include "intel_display_params.h"
#include "intel_display_snapshot.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 9bd8f1e505b0..338b9f7b20b8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -14,8 +14,8 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include "i915_drv.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_limits.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ff6eb93337e0..8271e50e3644 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -45,6 +45,7 @@
#include "i915_vma_types.h"
#include "intel_bios.h"
#include "intel_display.h"
+#include "intel_display_conversion.h"
#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
@@ -301,6 +302,15 @@ struct intel_panel_bl_funcs {
u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
};
+/* in 100us units */
+struct intel_pps_delays {
+ u16 power_up; /* eDP: T1+T3, LVDS: T1+T2 */
+ u16 backlight_on; /* eDP: T8, LVDS: T5 */
+ u16 backlight_off; /* eDP: T9, LVDS: T6/TX */
+ u16 power_down; /* eDP: T10, LVDS: T3 */
+ u16 power_cycle; /* eDP: T11+T12, LVDS: T7+T4 */
+};
+
enum drrs_type {
DRRS_TYPE_NONE,
DRRS_TYPE_STATIC,
@@ -328,7 +338,7 @@ struct intel_vbt_panel_data {
int preemphasis;
int vswing;
int bpp;
- struct edp_power_seq pps;
+ struct intel_pps_delays pps;
u8 drrs_msa_timing_delay;
bool low_vswing;
bool hobl;
@@ -587,6 +597,8 @@ struct intel_atomic_state {
bool skip_intermediate_wm;
bool rps_interactive;
+
+ struct work_struct cleanup_work;
};
struct intel_plane_state {
@@ -697,8 +709,8 @@ struct intel_initial_plane_config {
};
struct intel_scaler {
- int in_use;
u32 mode;
+ bool in_use;
};
struct intel_crtc_scaler_state {
@@ -769,6 +781,7 @@ struct skl_wm_level {
u8 lines;
bool enable;
bool ignore_lines;
+ bool auto_min_alloc_wm_enable;
bool can_sagv;
};
@@ -863,6 +876,13 @@ struct intel_crtc_wm_state {
struct skl_ddb_entry plane_ddb[I915_MAX_PLANES];
/* pre-icl: for planar Y */
struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+
+ /*
+ * xe3: Minimum amount of display blocks and minimum
+ * sagv allocation required for async flip
+ */
+ u16 plane_min_ddb[I915_MAX_PLANES];
+ u16 plane_interim_ddb[I915_MAX_PLANES];
} skl;
struct {
@@ -1140,8 +1160,6 @@ struct intel_crtc_state {
bool double_wide;
- int pbn;
-
struct intel_crtc_scaler_state scaler_state;
/* w/a for waiting 2 vblanks during crtc enable */
@@ -1235,7 +1253,7 @@ struct intel_crtc_state {
/* Display Stream compression state */
struct {
bool compression_enable;
- bool dsc_split;
+ int num_streams;
/* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
u16 compressed_bpp_x16;
u8 slice_count;
@@ -1568,8 +1586,8 @@ struct intel_pps {
* requiring a reinitialization. Only relevant on BXT+.
*/
bool bxt_pps_reset;
- struct edp_power_seq pps_delays;
- struct edp_power_seq bios_pps_delays;
+ struct intel_pps_delays pps_delays;
+ struct intel_pps_delays bios_pps_delays;
};
struct intel_psr {
@@ -1803,11 +1821,13 @@ struct intel_lspcon {
struct intel_digital_port {
struct intel_encoder base;
- u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
+
+ bool lane_reversal;
+ bool ddi_a_4_lanes;
bool release_cl2_override;
u8 max_lanes;
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
@@ -1946,6 +1966,19 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
}
}
+static inline bool intel_encoder_is_hdmi(struct intel_encoder *encoder)
+{
+ switch (encoder->type) {
+ case INTEL_OUTPUT_HDMI:
+ return true;
+ case INTEL_OUTPUT_DDI:
+ /* See if the HDMI encoder is valid. */
+ return i915_mmio_reg_valid(enc_to_intel_hdmi(encoder)->hdmi_reg);
+ default:
+ return false;
+ }
+}
+
static inline struct intel_lspcon *
enc_to_intel_lspcon(struct intel_encoder *encoder)
{
@@ -2086,7 +2119,7 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
* intel_display pointer.
*/
#define __drm_device_to_intel_display(p) \
- ((p) ? &to_i915(p)->display : NULL)
+ ((p) ? __drm_to_display(p) : NULL)
#define __device_to_intel_display(p) \
__drm_device_to_intel_display(dev_get_drvdata(p))
#define __pci_dev_to_intel_display(p) \
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 87bdacfd9edf..221d3abda791 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -638,8 +638,6 @@ void intel_dmc_disable_program(struct intel_display *display)
pipedmc_clock_gating_wa(display, true);
disable_all_event_handlers(display);
pipedmc_clock_gating_wa(display, false);
-
- intel_dmc_wl_disable(display);
}
void assert_dmc_loaded(struct intel_display *display)
@@ -1146,8 +1144,6 @@ void intel_dmc_suspend(struct intel_display *display)
if (dmc)
flush_work(&dmc->work);
- intel_dmc_wl_disable(display);
-
/* Drop the reference held in case DMC isn't loaded. */
if (!intel_dmc_has_payload(display))
intel_dmc_runtime_pm_put(display);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index 5634ff07269d..02de3ae15074 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -5,6 +5,10 @@
#include <linux/kernel.h>
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
#include "intel_de.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
@@ -39,7 +43,11 @@
* potential future use.
*/
-#define DMC_WAKELOCK_CTL_TIMEOUT 5
+/*
+ * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
+ * atomic variant of waiting MMIO.
+ */
+#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
#define DMC_WAKELOCK_HOLD_TIME 50
struct intel_dmc_wl_range {
@@ -47,8 +55,90 @@ struct intel_dmc_wl_range {
u32 end;
};
-static struct intel_dmc_wl_range lnl_wl_range[] = {
+static const struct intel_dmc_wl_range powered_off_ranges[] = {
{ .start = 0x60000, .end = 0x7ffff },
+ {},
+};
+
+static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
+ { .start = 0x45500 }, /* DC_STATE_SEL */
+ { .start = 0x457a0, .end = 0x457b0 }, /* DC*_RESIDENCY_COUNTER */
+ { .start = 0x45504 }, /* DC_STATE_EN */
+ { .start = 0x45400, .end = 0x4540c }, /* PWR_WELL_CTL_* */
+ { .start = 0x454f0 }, /* RETENTION_CTRL */
+
+ /* DBUF_CTL_* */
+ { .start = 0x44300 },
+ { .start = 0x44304 },
+ { .start = 0x44f00 },
+ { .start = 0x44f04 },
+ { .start = 0x44fe8 },
+ { .start = 0x45008 },
+
+ { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
+ { .start = 0x46000 }, /* CDCLK_CTL */
+ { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
+
+ /* TRANS_CMTG_CTL_* */
+ { .start = 0x6fa88 },
+ { .start = 0x6fb88 },
+
+ { .start = 0x46430 }, /* CHICKEN_DCPR_1 */
+ { .start = 0x46434 }, /* CHICKEN_DCPR_2 */
+ { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
+ { .start = 0x42084 }, /* CHICKEN_MISC_2 */
+ { .start = 0x42088 }, /* CHICKEN_MISC_3 */
+ { .start = 0x46160 }, /* CMTG_CLK_SEL */
+ { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+
+ {},
+};
+
+static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
+ { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
+
+ { .start = 0x45504 }, /* DC_STATE_EN */
+
+ /* DBUF_CTL_* */
+ { .start = 0x44300 },
+ { .start = 0x44304 },
+ { .start = 0x44f00 },
+ { .start = 0x44f04 },
+ { .start = 0x44fe8 },
+ { .start = 0x45008 },
+
+ { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
+ { .start = 0x46000 }, /* CDCLK_CTL */
+ { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
+ { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+
+ /* Scanline registers */
+ { .start = 0x70000 },
+ { .start = 0x70004 },
+ { .start = 0x70014 },
+ { .start = 0x70018 },
+ { .start = 0x71000 },
+ { .start = 0x71004 },
+ { .start = 0x71014 },
+ { .start = 0x71018 },
+ { .start = 0x72000 },
+ { .start = 0x72004 },
+ { .start = 0x72014 },
+ { .start = 0x72018 },
+ { .start = 0x73000 },
+ { .start = 0x73004 },
+ { .start = 0x73014 },
+ { .start = 0x73018 },
+ { .start = 0x7b000 },
+ { .start = 0x7b004 },
+ { .start = 0x7b014 },
+ { .start = 0x7b018 },
+ { .start = 0x7c000 },
+ { .start = 0x7c004 },
+ { .start = 0x7c014 },
+ { .start = 0x7c018 },
+
+ {},
};
static void __intel_dmc_wl_release(struct intel_display *display)
@@ -72,15 +162,18 @@ static void intel_dmc_wl_work(struct work_struct *work)
spin_lock_irqsave(&wl->lock, flags);
- /* Bail out if refcount reached zero while waiting for the spinlock */
- if (!refcount_read(&wl->refcount))
+ /*
+ * Bail out if refcount became non-zero while waiting for the spinlock,
+ * meaning that the lock is now taken again.
+ */
+ if (refcount_read(&wl->refcount))
goto out_unlock;
__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
- if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK, 0,
- DMC_WAKELOCK_CTL_TIMEOUT)) {
+ if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK, 0,
+ DMC_WAKELOCK_CTL_TIMEOUT_US)) {
WARN_RATELIMIT(1, "DMC wakelock release timed out");
goto out_unlock;
}
@@ -91,38 +184,110 @@ out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
-static bool intel_dmc_wl_check_range(u32 address)
+static void __intel_dmc_wl_take(struct intel_display *display)
{
- int i;
- bool wl_needed = false;
-
- for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
- if (address >= lnl_wl_range[i].start &&
- address <= lnl_wl_range[i].end) {
- wl_needed = true;
- break;
- }
+ struct intel_dmc_wl *wl = &display->wl;
+
+ /*
+ * Only try to take the wakelock if it's not marked as taken
+ * yet. It may be already taken at this point if we have
+ * already released the last reference, but the work has not
+ * run yet.
+ */
+ if (wl->taken)
+ return;
+
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
+ DMC_WAKELOCK_CTL_REQ);
+
+ /*
+ * We need to use the atomic variant of the waiting routine
+ * because the DMC wakelock is also taken in atomic context.
+ */
+ if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_TIMEOUT_US)) {
+ WARN_RATELIMIT(1, "DMC wakelock ack timed out");
+ return;
}
- return wl_needed;
+ wl->taken = true;
+}
+
+static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
+ const struct intel_dmc_wl_range ranges[])
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+
+ for (int i = 0; ranges[i].start; i++) {
+ u32 end = ranges[i].end ?: ranges[i].start;
+
+ if (ranges[i].start <= offset && offset <= end)
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_dmc_wl_check_range(i915_reg_t reg, u32 dc_state)
+{
+ const struct intel_dmc_wl_range *ranges;
+
+ /*
+ * Check that the offset is in one of the ranges for which
+ * registers are powered off during DC states.
+ */
+ if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
+ return true;
+
+ /*
+ * Check that the offset is for a register that is touched by
+ * the DMC and requires a DC exit for proper access.
+ */
+ switch (dc_state) {
+ case DC_STATE_EN_DC3CO:
+ ranges = xe3lpd_dc3co_dmc_ranges;
+ break;
+ case DC_STATE_EN_UPTO_DC5:
+ case DC_STATE_EN_UPTO_DC6:
+ ranges = xe3lpd_dc5_dc6_dmc_ranges;
+ break;
+ default:
+ ranges = NULL;
+ }
+
+ if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
+ return true;
+
+ return false;
}
static bool __intel_dmc_wl_supported(struct intel_display *display)
{
- if (DISPLAY_VER(display) < 20 ||
- !intel_dmc_has_payload(display) ||
- !display->params.enable_dmc_wl)
- return false;
+ return display->params.enable_dmc_wl && intel_dmc_has_payload(display);
+}
- return true;
+static void intel_dmc_wl_sanitize_param(struct intel_display *display)
+{
+ if (!HAS_DMC_WAKELOCK(display))
+ display->params.enable_dmc_wl = 0;
+ else if (display->params.enable_dmc_wl >= 0)
+ display->params.enable_dmc_wl = !!display->params.enable_dmc_wl;
+ else
+ display->params.enable_dmc_wl = DISPLAY_VER(display) >= 30;
+
+ drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d\n",
+ display->params.enable_dmc_wl);
}
void intel_dmc_wl_init(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
- /* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
- if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl)
+ intel_dmc_wl_sanitize_param(display);
+
+ if (!display->params.enable_dmc_wl)
return;
INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
@@ -130,7 +295,8 @@ void intel_dmc_wl_init(struct intel_display *display)
refcount_set(&wl->refcount, 0);
}
-void intel_dmc_wl_enable(struct intel_display *display)
+/* Must only be called as part of enabling dynamic DC states. */
+void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
{
struct intel_dmc_wl *wl = &display->wl;
unsigned long flags;
@@ -140,7 +306,9 @@ void intel_dmc_wl_enable(struct intel_display *display)
spin_lock_irqsave(&wl->lock, flags);
- if (wl->enabled)
+ wl->dc_state = dc_state;
+
+ if (drm_WARN_ON(display->drm, wl->enabled))
goto out_unlock;
/*
@@ -151,12 +319,29 @@ void intel_dmc_wl_enable(struct intel_display *display)
__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
wl->enabled = true;
- wl->taken = false;
+
+ /*
+ * This would be racy in the following scenario:
+ *
+ * 1. Function A calls intel_dmc_wl_get();
+ * 2. Some function calls intel_dmc_wl_disable();
+ * 3. Some function calls intel_dmc_wl_enable();
+ * 4. Concurrently with (3), function A performs the MMIO in between
+ * setting DMC_WAKELOCK_CFG_ENABLE and asserting the lock with
+ * __intel_dmc_wl_take().
+ *
+ * TODO: Check with the hardware team whether it is safe to assert the
+ * hardware lock before enabling to avoid such a scenario. Otherwise, we
+ * would need to deal with it via software synchronization.
+ */
+ if (refcount_read(&wl->refcount))
+ __intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
+/* Must only be called as part of disabling dynamic DC states. */
void intel_dmc_wl_disable(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
@@ -165,40 +350,63 @@ void intel_dmc_wl_disable(struct intel_display *display)
if (!__intel_dmc_wl_supported(display))
return;
- flush_delayed_work(&wl->work);
+ intel_dmc_wl_flush_release_work(display);
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (drm_WARN_ON(display->drm, !wl->enabled))
goto out_unlock;
/* Disable wakelock in DMC */
__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
- refcount_set(&wl->refcount, 0);
wl->enabled = false;
+
+ /*
+ * The spec is not explicit about the expectation of existing
+ * lock users at the moment of disabling, but it does say that we must
+ * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to
+ * disable with existing lock users.
+ *
+ * TODO: Get the correct expectation from the hardware team.
+ */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+
wl->taken = false;
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
-void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+void intel_dmc_wl_flush_release_work(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
- unsigned long flags;
if (!__intel_dmc_wl_supported(display))
return;
- if (!intel_dmc_wl_check_range(reg.reg))
+ flush_delayed_work(&wl->work);
+}
+
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
return;
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
goto out_unlock;
+ if (!wl->enabled) {
+ if (!refcount_inc_not_zero(&wl->refcount))
+ refcount_set(&wl->refcount, 1);
+ goto out_unlock;
+ }
+
cancel_delayed_work(&wl->work);
if (refcount_inc_not_zero(&wl->refcount))
@@ -206,26 +414,7 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
refcount_set(&wl->refcount, 1);
- /*
- * Only try to take the wakelock if it's not marked as taken
- * yet. It may be already taken at this point if we have
- * already released the last reference, but the work has not
- * run yet.
- */
- if (!wl->taken) {
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
- DMC_WAKELOCK_CTL_REQ);
-
- if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_TIMEOUT)) {
- WARN_RATELIMIT(1, "DMC wakelock ack timed out");
- goto out_unlock;
- }
-
- wl->taken = true;
- }
+ __intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
@@ -239,12 +428,9 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
if (!__intel_dmc_wl_supported(display))
return;
- if (!intel_dmc_wl_check_range(reg.reg))
- return;
-
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
goto out_unlock;
if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
@@ -252,6 +438,9 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
goto out_unlock;
if (refcount_dec_and_test(&wl->refcount)) {
+ if (!wl->enabled)
+ goto out_unlock;
+
__intel_dmc_wl_release(display);
goto out_unlock;
@@ -260,3 +449,13 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
+
+void intel_dmc_wl_get_noreg(struct intel_display *display)
+{
+ intel_dmc_wl_get(display, INVALID_MMIO_REG);
+}
+
+void intel_dmc_wl_put_noreg(struct intel_display *display)
+{
+ intel_dmc_wl_put(display, INVALID_MMIO_REG);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.h b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
index adab51208d0a..5488fbdf29b8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
@@ -15,17 +15,27 @@
struct intel_display;
struct intel_dmc_wl {
- spinlock_t lock; /* protects enabled, taken and refcount */
+ spinlock_t lock; /* protects enabled, taken, dc_state and refcount */
bool enabled;
bool taken;
refcount_t refcount;
+ /*
+ * We are keeping a copy of the enabled DC state because
+ * intel_display.power.domains is protected by a mutex and we do
+ * not want call mutex_lock() in atomic context, where some of
+ * the tracked MMIO operations happen.
+ */
+ u32 dc_state;
struct delayed_work work;
};
void intel_dmc_wl_init(struct intel_display *display);
-void intel_dmc_wl_enable(struct intel_display *display);
+void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state);
void intel_dmc_wl_disable(struct intel_display *display);
+void intel_dmc_wl_flush_release_work(struct intel_display *display);
void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg);
void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg);
+void intel_dmc_wl_get_noreg(struct intel_display *display);
+void intel_dmc_wl_put_noreg(struct intel_display *display);
#endif /* __INTEL_WAKELOCK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ff5ba7b3035f..f1f3b1bb1e89 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -28,6 +28,7 @@
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/notifier.h>
+#include <linux/seq_buf.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/string_helpers.h>
@@ -93,8 +94,6 @@
#include "intel_vrr.h"
#include "intel_crtc_state_dump.h"
-#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
-
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@@ -109,10 +108,19 @@
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
-/* With Single pipe configuration, HW is capable of supporting maximum
- * of 4 slices per line.
+/*
+ * With Single pipe configuration, HW is capable of supporting maximum of:
+ * 2 slices per line for ICL, BMG
+ * 4 slices per line for other platforms.
+ * For now consider a max of 2 slices per line, which works for all platforms.
+ * With this we can have max of 4 DSC Slices per pipe.
+ *
+ * For higher resolutions where 12 slice support is required with
+ * ultrajoiner, only then each pipe can support 3 slices.
+ *
+ * #TODO Split this better to use 4 slices/dsc engine where supported.
*/
-static const u8 valid_dsc_slicecount[] = {1, 2, 4};
+static const u8 valid_dsc_slicecount[] = {1, 2, 3, 4};
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
@@ -257,6 +265,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
@@ -266,7 +275,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
if (intel_dp->num_sink_rates)
return;
- drm_err(&dp_to_i915(intel_dp)->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
@@ -281,6 +290,7 @@ static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
@@ -294,7 +304,7 @@ static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
return;
}
- drm_err(&dp_to_i915(intel_dp)->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
@@ -327,7 +337,9 @@ static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
{
- if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (drm_WARN_ON(display->drm,
index < 0 || index >= intel_dp->num_common_rates))
return 162000;
@@ -454,16 +466,16 @@ int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
bool intel_dp_has_joiner(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
/* eDP MSO is not compatible with joiner */
if (intel_dp->mso_link_count)
return false;
- return DISPLAY_VER(dev_priv) >= 12 ||
- (DISPLAY_VER(dev_priv) == 11 &&
+ return DISPLAY_VER(display) >= 12 ||
+ (DISPLAY_VER(display) == 11 &&
encoder->port != PORT_A);
}
@@ -492,12 +504,13 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
static int mtl_max_source_rate(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (intel_encoder_is_c10phy(encoder))
return 810000;
- if (DISPLAY_VERx100(to_i915(encoder->base.dev)) == 1401)
+ if (DISPLAY_VERx100(display) == 1401)
return 1350000;
return 2000000;
@@ -551,17 +564,16 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
static const int g4x_rates[] = {
162000, 270000
};
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
const int *source_rates;
int size, max_rate = 0, vbt_max_rate;
/* This should only be done once */
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->source_rates || intel_dp->num_source_rates);
- if (DISPLAY_VER(dev_priv) >= 14) {
- if (IS_BATTLEMAGE(dev_priv)) {
+ if (DISPLAY_VER(display) >= 14) {
+ if (display->platform.battlemage) {
source_rates = bmg_rates;
size = ARRAY_SIZE(bmg_rates);
} else {
@@ -569,26 +581,26 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
size = ARRAY_SIZE(mtl_rates);
}
max_rate = mtl_max_source_rate(intel_dp);
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
source_rates = icl_rates;
size = ARRAY_SIZE(icl_rates);
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
max_rate = dg2_max_source_rate(intel_dp);
- else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
- IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.alderlake_p || display->platform.alderlake_s ||
+ display->platform.dg1 || display->platform.rocketlake)
max_rate = 810000;
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
max_rate = ehl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
- } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) ||
- IS_BROADWELL(dev_priv)) {
+ } else if ((display->platform.haswell && !display->platform.haswell_ulx) ||
+ display->platform.broadwell) {
source_rates = hsw_rates;
size = ARRAY_SIZE(hsw_rates);
} else {
@@ -679,18 +691,18 @@ static int link_config_cmp_by_bw(const void *a, const void *b, const void *p)
static void intel_dp_link_config_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_dp_link_config *lc;
int num_common_lane_configs;
int i;
int j;
- if (drm_WARN_ON(&i915->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp))))
+ if (drm_WARN_ON(display->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp))))
return;
num_common_lane_configs = ilog2(intel_dp_max_common_lane_count(intel_dp)) + 1;
- if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates * num_common_lane_configs >
+ if (drm_WARN_ON(display->drm, intel_dp->num_common_rates * num_common_lane_configs >
ARRAY_SIZE(intel_dp->link.configs)))
return;
@@ -714,10 +726,10 @@ static void intel_dp_link_config_init(struct intel_dp *intel_dp)
void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct intel_dp_link_config *lc;
- if (drm_WARN_ON(&i915->drm, idx < 0 || idx >= intel_dp->link.num_configs))
+ if (drm_WARN_ON(display->drm, idx < 0 || idx >= intel_dp->link.num_configs))
idx = 0;
lc = &intel_dp->link.configs[idx];
@@ -746,9 +758,9 @@ int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lan
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
@@ -758,7 +770,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
intel_dp->common_rates);
/* Paranoia, there should always be something in common. */
- if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
+ if (drm_WARN_ON(display->drm, intel_dp->num_common_rates == 0)) {
intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
@@ -806,30 +818,30 @@ int intel_dp_bw_fec_overhead(bool fec_enabled)
}
static int
-small_joiner_ram_size_bits(struct drm_i915_private *i915)
+small_joiner_ram_size_bits(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 17280 * 8;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
return 7680 * 8;
else
return 6144 * 8;
}
-u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp)
+u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp)
{
u32 bits_per_pixel = bpp;
int i;
/* Error out if the max bpp is less than smallest allowed valid bpp */
if (bits_per_pixel < valid_dsc_bpp[0]) {
- drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+ drm_dbg_kms(display->drm, "Unsupported BPP %u, min %u\n",
bits_per_pixel, valid_dsc_bpp[0]);
return 0;
}
/* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
/*
@@ -841,7 +853,8 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
* DSC enabled.
*/
if (bits_per_pixel < 8) {
- drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n",
+ drm_dbg_kms(display->drm,
+ "Unsupported BPP %u, min 8\n",
bits_per_pixel);
return 0;
}
@@ -852,7 +865,7 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
if (bits_per_pixel < valid_dsc_bpp[i + 1])
break;
}
- drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n",
+ drm_dbg_kms(display->drm, "Set dsc bpp from %d to VESA %d\n",
bits_per_pixel, valid_dsc_bpp[i]);
bits_per_pixel = valid_dsc_bpp[i];
@@ -887,11 +900,10 @@ static u32 small_joiner_ram_max_bpp(struct intel_display *display,
u32 mode_hdisplay,
int num_joined_pipes)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 max_bpp;
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
- max_bpp = small_joiner_ram_size_bits(i915) / mode_hdisplay;
+ max_bpp = small_joiner_ram_size_bits(display) / mode_hdisplay;
max_bpp *= num_joined_pipes;
@@ -909,11 +921,10 @@ static u32 ultrajoiner_ram_max_bpp(u32 mode_hdisplay)
}
static
-u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915,
+u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
u32 mode_clock, u32 mode_hdisplay,
int num_joined_pipes)
{
- struct intel_display *display = to_intel_display(&i915->drm);
u32 max_bpp = small_joiner_ram_max_bpp(display, mode_hdisplay, num_joined_pipes);
if (num_joined_pipes > 1)
@@ -925,7 +936,7 @@ u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915,
return max_bpp;
}
-u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
+u16 intel_dp_dsc_get_max_compressed_bpp(struct intel_display *display,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
int num_joined_pipes,
@@ -967,17 +978,17 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
bits_per_pixel = min_t(u32, bits_per_pixel, 31);
- drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
+ drm_dbg_kms(display->drm, "Max link bpp is %u for %u timeslots "
"total bw %u pixel clock %u\n",
bits_per_pixel, timeslots,
(link_clock * lane_count * 8),
intel_dp_mode_to_fec_clock(mode_clock));
- joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock,
+ joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, mode_clock,
mode_hdisplay, num_joined_pipes);
bits_per_pixel = min(bits_per_pixel, joiner_max_bpp);
- bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp);
+ bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(display, bits_per_pixel, pipe_bpp);
return bits_per_pixel;
}
@@ -986,7 +997,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
int num_joined_pipes)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u8 min_slice_count, i;
int max_slice_width;
@@ -1001,12 +1012,12 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
* Due to some DSC engine BW limitations, we need to enable second
* slice and VDSC engine, whenever we approach close enough to max CDCLK
*/
- if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
+ if (mode_clock >= ((display->cdclk.max_cdclk_freq * 85) / 100))
min_slice_count = max_t(u8, min_slice_count, 2);
max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unsupported slice width %d by DP DSC Sink device\n",
max_slice_width);
return 0;
@@ -1020,6 +1031,13 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes;
+ /*
+ * 3 DSC Slices per pipe need 3 DSC engines,
+ * which is supported only with Ultrajoiner.
+ */
+ if (valid_dsc_slicecount[i] == 3 && num_joined_pipes != 4)
+ continue;
+
if (test_slice_count >
drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false))
break;
@@ -1032,11 +1050,14 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
if (num_joined_pipes > 1 && valid_dsc_slicecount[i] < 2)
continue;
+ if (mode_hdisplay % test_slice_count)
+ continue;
+
if (min_slice_count <= test_slice_count)
return test_slice_count;
}
- drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
+ drm_dbg_kms(display->drm, "Unsupported Slice Count %d\n",
min_slice_count);
return 0;
}
@@ -1044,7 +1065,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
static bool source_can_output(struct intel_dp *intel_dp,
enum intel_output_format format)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
switch (format) {
case INTEL_OUTPUT_FORMAT_RGB:
@@ -1056,11 +1077,11 @@ static bool source_can_output(struct intel_dp *intel_dp,
* Also, ILK doesn't seem capable of DP YCbCr output.
* The displayed image is severly corrupted. SNB+ is fine.
*/
- return !HAS_GMCH(i915) && !IS_IRONLAKE(i915);
+ return !HAS_GMCH(display) && !display->platform.ironlake;
case INTEL_OUTPUT_FORMAT_YCBCR420:
/* Platform < Gen 11 cannot output YCbCr420 format */
- return DISPLAY_VER(i915) >= 11;
+ return DISPLAY_VER(display) >= 11;
default:
MISSING_CASE(format);
@@ -1120,8 +1141,8 @@ static enum intel_output_format
intel_dp_output_format(struct intel_connector *connector,
enum intel_output_format sink_format)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
enum intel_output_format force_dsc_output_format =
intel_dp->force_dsc_output_format;
enum intel_output_format output_format;
@@ -1132,7 +1153,7 @@ intel_dp_output_format(struct intel_connector *connector,
dfp_can_convert(intel_dp, force_dsc_output_format, sink_format)))
return force_dsc_output_format;
- drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n");
+ drm_dbg_kms(display->drm, "Cannot force DSC output format\n");
}
if (sink_format == INTEL_OUTPUT_FORMAT_RGB ||
@@ -1146,7 +1167,7 @@ intel_dp_output_format(struct intel_connector *connector,
else
output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format));
+ drm_WARN_ON(display->drm, !source_can_output(intel_dp, output_format));
return output_format;
}
@@ -1197,7 +1218,7 @@ intel_dp_mode_min_output_bpp(struct intel_connector *connector,
return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
}
-static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
+static bool intel_dp_hdisplay_bad(struct intel_display *display,
int hdisplay)
{
/*
@@ -1213,7 +1234,7 @@ static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
*
* TODO: confirm the behaviour on HSW+
*/
- return hdisplay == 4096 && !HAS_DDI(dev_priv);
+ return hdisplay == 4096 && !HAS_DDI(display);
}
static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
@@ -1314,7 +1335,7 @@ bool intel_dp_needs_joiner(struct intel_dp *intel_dp,
int hdisplay, int clock,
int num_joined_pipes)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int hdisplay_limit;
if (!intel_dp_has_joiner(intel_dp))
@@ -1322,9 +1343,9 @@ bool intel_dp_needs_joiner(struct intel_dp *intel_dp,
num_joined_pipes /= 2;
- hdisplay_limit = DISPLAY_VER(i915) >= 30 ? 6144 : 5120;
+ hdisplay_limit = DISPLAY_VER(display) >= 30 ? 6144 : 5120;
- return clock > num_joined_pipes * i915->display.cdclk.max_dotclk_freq ||
+ return clock > num_joined_pipes * display->cdclk.max_dotclk_freq ||
hdisplay > num_joined_pipes * hdisplay_limit;
}
@@ -1333,16 +1354,15 @@ int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
int hdisplay, int clock)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (connector->force_joined_pipes)
return connector->force_joined_pipes;
- if (HAS_ULTRAJOINER(i915) &&
+ if (HAS_ULTRAJOINER(display) &&
intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 4))
return 4;
- if ((HAS_BIGJOINER(i915) || HAS_UNCOMPRESSED_JOINER(i915)) &&
+ if ((HAS_BIGJOINER(display) || HAS_UNCOMPRESSED_JOINER(display)) &&
intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 2))
return 2;
@@ -1351,12 +1371,12 @@ int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
bool intel_dp_has_dsc(const struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- if (!HAS_DSC(i915))
+ if (!HAS_DSC(display))
return false;
- if (connector->mst_port && !HAS_DSC_MST(i915))
+ if (connector->mst_port && !HAS_DSC_MST(display))
return false;
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
@@ -1373,13 +1393,14 @@ static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
u16 dsc_max_compressed_bpp = 0;
u8 dsc_slice_count = 0;
enum drm_mode_status status;
@@ -1412,7 +1433,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
- if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
+ if (intel_dp_hdisplay_bad(display, mode->hdisplay))
return MODE_H_ILLEGAL;
max_link_clock = intel_dp_max_link_rate(intel_dp);
@@ -1447,7 +1468,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
true);
} else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
dsc_max_compressed_bpp =
- intel_dp_dsc_get_max_compressed_bpp(dev_priv,
+ intel_dp_dsc_get_max_compressed_bpp(display,
max_link_clock,
max_lanes,
target_clock,
@@ -1465,7 +1486,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- if (intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes) && !dsc)
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc)
return MODE_CLOCK_HIGH;
if (mode_rate > max_rate && !dsc)
@@ -1478,51 +1499,43 @@ intel_dp_mode_valid(struct drm_connector *_connector,
return intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes);
}
-bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
+bool intel_dp_source_supports_tps3(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
+ return DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell;
}
-bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
+bool intel_dp_source_supports_tps4(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 10;
+ return DISPLAY_VER(display) >= 10;
}
-static void snprintf_int_array(char *str, size_t len,
- const int *array, int nelem)
+static void seq_buf_print_array(struct seq_buf *s, const int *array, int nelem)
{
int i;
- str[0] = '\0';
-
- for (i = 0; i < nelem; i++) {
- int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
- if (r >= len)
- return;
- str += r;
- len -= r;
- }
+ for (i = 0; i < nelem; i++)
+ seq_buf_printf(s, "%s%d", i ? ", " : "", array[i]);
}
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- char str[128]; /* FIXME: too big for stack? */
+ struct intel_display *display = to_intel_display(intel_dp);
+ DECLARE_SEQ_BUF(s, 128); /* FIXME: too big for stack? */
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- snprintf_int_array(str, sizeof(str),
- intel_dp->source_rates, intel_dp->num_source_rates);
- drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
+ seq_buf_print_array(&s, intel_dp->source_rates, intel_dp->num_source_rates);
+ drm_dbg_kms(display->drm, "source rates: %s\n", seq_buf_str(&s));
- snprintf_int_array(str, sizeof(str),
- intel_dp->sink_rates, intel_dp->num_sink_rates);
- drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
+ seq_buf_clear(&s);
+ seq_buf_print_array(&s, intel_dp->sink_rates, intel_dp->num_sink_rates);
+ drm_dbg_kms(display->drm, "sink rates: %s\n", seq_buf_str(&s));
- snprintf_int_array(str, sizeof(str),
- intel_dp->common_rates, intel_dp->num_common_rates);
- drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
+ seq_buf_clear(&s);
+ seq_buf_print_array(&s, intel_dp->common_rates, intel_dp->num_common_rates);
+ drm_dbg_kms(display->drm, "common rates: %s\n", seq_buf_str(&s));
}
static int forced_link_rate(struct intel_dp *intel_dp)
@@ -1559,11 +1572,11 @@ intel_dp_min_link_rate(struct intel_dp *intel_dp)
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int i = intel_dp_rate_index(intel_dp->sink_rates,
intel_dp->num_sink_rates, rate);
- if (drm_WARN_ON(&i915->drm, i < 0))
+ if (drm_WARN_ON(display->drm, i < 0))
i = 0;
return i;
@@ -1593,13 +1606,13 @@ bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return true;
- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
+ if (DISPLAY_VER(display) == 11 && encoder->port != PORT_A &&
!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
@@ -1614,13 +1627,15 @@ bool intel_dp_supports_fec(struct intel_dp *intel_dp,
drm_dp_sink_supports_fec(connector->dp.fec_capability);
}
-bool intel_dp_supports_dsc(const struct intel_connector *connector,
+bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state)
{
if (!intel_dp_has_dsc(connector))
return false;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) &&
+ !intel_dp_supports_fec(intel_dp, connector, crtc_state))
return false;
return intel_dsc_source_support(crtc_state);
@@ -1662,8 +1677,8 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
int bpp, bpc;
bpc = crtc_state->pipe_bpp / 3;
@@ -1685,13 +1700,13 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
bpp = bpc * 3;
if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
- if (intel_connector->base.display_info.bpc == 0 &&
- intel_connector->panel.vbt.edp.bpp &&
- intel_connector->panel.vbt.edp.bpp < bpp) {
- drm_dbg_kms(&dev_priv->drm,
+ if (connector->base.display_info.bpc == 0 &&
+ connector->panel.vbt.edp.bpp &&
+ connector->panel.vbt.edp.bpp < bpp) {
+ drm_dbg_kms(display->drm,
"clamping bpp for eDP panel to BIOS-provided %i\n",
- intel_connector->panel.vbt.edp.bpp);
- bpp = intel_connector->panel.vbt.edp.bpp;
+ connector->panel.vbt.edp.bpp);
+ bpp = connector->panel.vbt.edp.bpp;
}
}
@@ -1700,13 +1715,13 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
static bool has_seamless_m_n(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/*
* Seamless M/N reprogramming only implemented
* for BDW+ double buffered M/N registers so far.
*/
- return HAS_DOUBLE_BUFFERED_M_N(i915) &&
+ return HAS_DOUBLE_BUFFERED_M_N(display) &&
intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
}
@@ -1768,13 +1783,12 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-static
-u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915)
+int intel_dp_dsc_max_src_input_bpc(struct intel_display *display)
{
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return 12;
- if (DISPLAY_VER(i915) == 11)
+ if (DISPLAY_VER(display) == 11)
return 10;
return 0;
@@ -1783,17 +1797,17 @@ u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915)
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 max_req_bpc)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int i, num_bpc;
u8 dsc_bpc[3] = {};
- u8 dsc_max_bpc;
+ int dsc_max_bpc;
- dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
+ dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display);
if (!dsc_max_bpc)
return dsc_max_bpc;
- dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
+ dsc_max_bpc = min(dsc_max_bpc, max_req_bpc);
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
dsc_bpc);
@@ -1805,9 +1819,9 @@ int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
return 0;
}
-static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915)
+static int intel_dp_source_dsc_version_minor(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 14 ? 2 : 1;
+ return DISPLAY_VER(display) >= 14 ? 2 : 1;
}
static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
@@ -1841,7 +1855,7 @@ static int intel_dp_get_slice_height(int vactive)
static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
int ret;
@@ -1864,7 +1878,7 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
(connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
vdsc_cfg->dsc_version_minor =
- min(intel_dp_source_dsc_version_minor(i915),
+ min(intel_dp_source_dsc_version_minor(display),
intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd));
if (vdsc_cfg->convert_rgb)
vdsc_cfg->convert_rgb =
@@ -1874,7 +1888,7 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH,
drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd));
if (!vdsc_cfg->line_buf_depth) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DSC Sink Line Buffer Depth invalid\n");
return -EINVAL;
}
@@ -1889,7 +1903,7 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
static bool intel_dp_dsc_supports_format(const struct intel_connector *connector,
enum intel_output_format output_format)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u8 sink_dsc_format;
switch (output_format) {
@@ -1900,7 +1914,7 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
sink_dsc_format = DP_DSC_YCbCr444;
break;
case INTEL_OUTPUT_FORMAT_YCBCR420:
- if (min(intel_dp_source_dsc_version_minor(i915),
+ if (min(intel_dp_source_dsc_version_minor(display),
intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2)
return false;
sink_dsc_format = DP_DSC_YCbCr420_Native;
@@ -1961,7 +1975,7 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
static
u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *pipe_config,
int bpc)
{
u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd);
@@ -1986,7 +2000,7 @@ u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connec
return 0;
}
-int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
+int intel_dp_dsc_sink_min_compressed_bpp(const struct intel_crtc_state *pipe_config)
{
/* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
switch (pipe_config->output_format) {
@@ -2004,7 +2018,7 @@ int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
}
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *pipe_config,
int bpc)
{
return intel_dp_dsc_max_sink_compressed_bppx16(connector,
@@ -2019,13 +2033,22 @@ static int dsc_src_min_compressed_bpp(void)
static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /*
+ * Forcing DSC and using the platform's max compressed bpp is seen to cause
+ * underruns. Since DSC isn't needed in these cases, limit the
+ * max compressed bpp to 18, which is a safe value across platforms with different
+ * pipe bpps.
+ */
+ if (intel_dp->force_dsc_en)
+ return 18;
/*
* Max Compressed bpp for Gen 13+ is 27bpp.
* For earlier platform is 23bpp. (Bspec:49259).
*/
- if (DISPLAY_VER(i915) < 13)
+ if (DISPLAY_VER(display) < 13)
return 23;
else
return 27;
@@ -2086,13 +2109,13 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
int pipe_bpp,
int timeslots)
{
+ struct intel_display *display = to_intel_display(intel_dp);
u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u16 compressed_bppx16;
u8 bppx16_step;
int ret;
- if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
+ if (DISPLAY_VER(display) < 14 || bppx16_incr <= 1)
bppx16_step = 16;
else
bppx16_step = 16 / bppx16_incr;
@@ -2116,7 +2139,8 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
if (intel_dp->force_dsc_fractional_bpp_en &&
fxp_q4_to_frac(compressed_bppx16))
- drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
+ drm_dbg_kms(display->drm,
+ "Forcing DSC fractional bpp\n");
return 0;
}
@@ -2131,68 +2155,46 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
int pipe_bpp,
int timeslots)
{
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
- int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+ int dsc_min_bpp;
+ int dsc_max_bpp;
int dsc_joiner_max_bpp;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
- dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
- dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
- dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
- pipe_config,
- pipe_bpp / 3);
- dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
-
- dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
+ dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, adjusted_mode->clock,
adjusted_mode->hdisplay,
num_joined_pipes);
- dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp);
- dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
+ dsc_max_bpp = min(dsc_joiner_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
}
-static
-u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915)
+int intel_dp_dsc_min_src_input_bpc(void)
{
/* Min DSC Input BPC for ICL+ is 8 */
- return HAS_DSC(i915) ? 8 : 0;
+ return 8;
}
static
-bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits,
+bool is_dsc_pipe_bpp_sufficient(struct link_config_limits *limits,
int pipe_bpp)
{
- u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp;
-
- dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc);
- dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
-
- dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
- dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
-
- return pipe_bpp >= dsc_min_pipe_bpp &&
- pipe_bpp <= dsc_max_pipe_bpp;
+ return pipe_bpp >= limits->pipe.min_bpp &&
+ pipe_bpp <= limits->pipe.max_bpp;
}
static
int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
- struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int forced_bpp;
if (!intel_dp->force_dsc_bpc)
@@ -2200,12 +2202,14 @@ int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
forced_bpp = intel_dp->force_dsc_bpc * 3;
- if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, forced_bpp)) {
- drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n", intel_dp->force_dsc_bpc);
+ if (is_dsc_pipe_bpp_sufficient(limits, forced_bpp)) {
+ drm_dbg_kms(display->drm, "Input DSC BPC forced to %d\n",
+ intel_dp->force_dsc_bpc);
return forced_bpp;
}
- drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n",
+ drm_dbg_kms(display->drm,
+ "Cannot force DSC BPC:%d, due to DSC BPC limits\n",
intel_dp->force_dsc_bpc);
return 0;
@@ -2217,17 +2221,15 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct link_config_limits *limits,
int timeslots)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
const struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- u8 max_req_bpc = conn_state->max_requested_bpc;
- u8 dsc_max_bpc, dsc_max_bpp;
- u8 dsc_min_bpc, dsc_min_bpp;
+ int dsc_max_bpp;
+ int dsc_min_bpp;
u8 dsc_bpc[3] = {};
int forced_bpp, pipe_bpp;
int num_bpc, i, ret;
- forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
+ forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, limits);
if (forced_bpp) {
ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
@@ -2238,15 +2240,8 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
}
}
- dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
- if (!dsc_max_bpc)
- return -EINVAL;
-
- dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
- dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
-
- dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
- dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
+ dsc_max_bpp = limits->pipe.max_bpp;
+ dsc_min_bpp = limits->pipe.min_bpp;
/*
* Get the maximum DSC bpc that will be supported by any valid
@@ -2275,24 +2270,24 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int pipe_bpp, forced_bpp;
- int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
- int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+ int dsc_min_bpp;
+ int dsc_max_bpp;
- forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
+ forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, limits);
if (forced_bpp) {
pipe_bpp = forced_bpp;
} else {
- int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc);
+ int max_bpc = limits->pipe.max_bpp / 3;
/* For eDP use max bpp that can be supported with DSC. */
pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_bpc);
- if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) {
- drm_dbg_kms(&i915->drm,
+ if (!is_dsc_pipe_bpp_sufficient(limits, pipe_bpp)) {
+ drm_dbg_kms(display->drm,
"Computed BPC is not in DSC BPC limits\n");
return -EINVAL;
}
@@ -2300,17 +2295,9 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
pipe_config->port_clock = limits->max_rate;
pipe_config->lane_count = limits->max_lane_count;
- dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
- dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
- dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
- pipe_config,
- pipe_bpp / 3);
- dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
- dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
+ dsc_max_bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
@@ -2323,6 +2310,26 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
return 0;
}
+static void intel_dp_fec_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->fec_enable)
+ return;
+
+ /*
+ * Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional.
+ * Since, FEC is a bandwidth overhead, continue to not enable it for
+ * eDP. Until, there is a good reason to do so.
+ */
+ if (intel_dp_is_edp(intel_dp))
+ return;
+
+ if (intel_dp_is_uhbr(crtc_state))
+ return;
+
+ crtc_state->fec_enable = true;
+}
+
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -2330,8 +2337,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
int timeslots,
bool compute_pipe_bpp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
@@ -2339,18 +2345,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
int ret;
- /*
- * Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional.
- * Since, FEC is a bandwidth overhead, continue to not enable it for
- * eDP. Until, there is a good reason to do so.
- */
- pipe_config->fec_enable = pipe_config->fec_enable ||
- (!intel_dp_is_edp(intel_dp) &&
- intel_dp_supports_fec(intel_dp, connector, pipe_config) &&
- !intel_dp_is_uhbr(pipe_config));
-
- if (!intel_dp_supports_dsc(connector, pipe_config))
- return -EINVAL;
+ intel_dp_fec_compute_config(intel_dp, pipe_config);
if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format))
return -EINVAL;
@@ -2369,7 +2364,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
conn_state, limits, timeslots);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No Valid pipe bpp for given mode ret = %d\n", ret);
return ret;
}
@@ -2381,7 +2376,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
true);
if (!pipe_config->dsc.slice_count) {
- drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
+ drm_dbg_kms(display->drm,
+ "Unsupported Slice Count %d\n",
pipe_config->dsc.slice_count);
return -EINVAL;
}
@@ -2394,7 +2390,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_hdisplay,
num_joined_pipes);
if (!dsc_dp_slice_count) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Compressed Slice Count not supported\n");
return -EINVAL;
}
@@ -2405,13 +2401,20 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
+ * In case of Ultrajoiner along with 12 slices we need to use 3
+ * VDSC instances.
*/
- if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1)
- pipe_config->dsc.dsc_split = true;
+ if (pipe_config->joiner_pipes && num_joined_pipes == 4 &&
+ pipe_config->dsc.slice_count == 12)
+ pipe_config->dsc.num_streams = 3;
+ else if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1)
+ pipe_config->dsc.num_streams = 2;
+ else
+ pipe_config->dsc.num_streams = 1;
ret = intel_dp_dsc_compute_params(connector, pipe_config);
if (ret < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Cannot compute valid DSC parameters for Input Bpp = %d"
"Compressed BPP = " FXP_Q4_FMT "\n",
pipe_config->pipe_bpp,
@@ -2420,7 +2423,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
}
pipe_config->dsc.compression_enable = true;
- drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
+ drm_dbg_kms(display->drm, "DP DSC computed with Input Bpp = %d "
"Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
@@ -2429,25 +2432,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return 0;
}
-/**
- * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits
- * @intel_dp: intel DP
- * @crtc_state: crtc state
- * @dsc: DSC compression mode
- * @limits: link configuration limits
- *
- * Calculates the output link min, max bpp values in @limits based on the
- * pipe bpp range, @crtc_state and @dsc mode.
- *
- * Returns %true in case of success.
+/*
+ * Calculate the output link min, max bpp values in limits based on the pipe bpp
+ * range, crtc_state and dsc mode. Return true on success.
*/
-bool
+static bool
intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -2465,17 +2461,27 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp);
} else {
- /*
- * TODO: set the DSC link limits already here, atm these are
- * initialized only later in intel_edp_dsc_compute_pipe_bpp() /
- * intel_dp_dsc_compute_pipe_bpp()
- */
- limits->link.min_bpp_x16 = 0;
+ int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
+ int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+
+ dsc_src_min_bpp = dsc_src_min_compressed_bpp();
+ dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
+ dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
+ limits->link.min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
+
+ dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
+ dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ crtc_state,
+ limits->pipe.max_bpp / 3);
+ dsc_max_bpp = dsc_sink_max_bpp ?
+ min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
+
+ max_link_bpp_x16 = min(max_link_bpp_x16, fxp_q4_from_int(dsc_max_bpp));
}
limits->link.max_bpp_x16 = max_link_bpp_x16;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " FXP_Q4_FMT "\n",
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
@@ -2489,29 +2495,62 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
return true;
}
-static bool
+static void
+intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
+ struct link_config_limits *limits)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int dsc_min_bpc = intel_dp_dsc_min_src_input_bpc();
+ int dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display);
+
+ limits->pipe.max_bpp = clamp(limits->pipe.max_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
+ limits->pipe.min_bpp = clamp(limits->pipe.min_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
+}
+
+bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
struct link_config_limits *limits)
{
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+
limits->min_rate = intel_dp_min_link_rate(intel_dp);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
- /* FIXME 128b/132b SST support missing */
- limits->max_rate = min(limits->max_rate, 810000);
+ /* FIXME 128b/132b SST+DSC support missing */
+ if (!is_mst && dsc)
+ limits->max_rate = min(limits->max_rate, 810000);
limits->min_rate = min(limits->min_rate, limits->max_rate);
limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
- limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
- respect_downstream_limits);
+ if (is_mst) {
+ /*
+ * FIXME: If all the streams can't fit into the link with their
+ * current pipe_bpp we should reduce pipe_bpp across the board
+ * until things start to fit. Until then we limit to <= 8bpc
+ * since that's what was hardcoded for all MST streams
+ * previously. This hack should be removed once we have the
+ * proper retry logic in place.
+ */
+ limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
+ } else {
+ limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
+ respect_downstream_limits);
+ }
- if (intel_dp->use_max_params) {
+ if (dsc)
+ intel_dp_dsc_compute_pipe_bpp_limits(intel_dp, limits);
+
+ if (is_mst || intel_dp->use_max_params) {
/*
+ * For MST we always configure max link bw - the spec doesn't
+ * seem to suggest we should do otherwise.
+ *
* Use the maximum clock and number of lanes the eDP panel
* advertizes being capable of in case the initial fast
* optimal params failed us. The panels are generally
@@ -2526,6 +2565,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
intel_dp_test_compute_config(intel_dp, crtc_state, limits);
return intel_dp_compute_config_link_bpp_limits(intel_dp,
+ intel_dp->attached_connector,
crtc_state,
dsc,
limits);
@@ -2542,7 +2582,7 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
}
-bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915,
+bool intel_dp_joiner_needs_dsc(struct intel_display *display,
int num_joined_pipes)
{
/*
@@ -2551,7 +2591,7 @@ bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915,
* compression.
* Ultrajoiner always needs compression.
*/
- return (!HAS_UNCOMPRESSED_JOINER(i915) && num_joined_pipes == 2) ||
+ return (!HAS_UNCOMPRESSED_JOINER(display) && num_joined_pipes == 2) ||
num_joined_pipes == 4;
}
@@ -2561,7 +2601,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
@@ -2583,7 +2623,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
if (num_joined_pipes > 1)
pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe);
- joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, num_joined_pipes);
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_compute_config_limits(intel_dp, pipe_config,
@@ -2598,12 +2638,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
*/
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
conn_state, &limits);
+ if (!ret && intel_dp_is_uhbr(pipe_config))
+ ret = intel_dp_mtp_tu_compute_config(intel_dp,
+ pipe_config,
+ pipe_config->pipe_bpp,
+ pipe_config->pipe_bpp,
+ conn_state,
+ 0, false);
if (ret)
dsc_needed = true;
}
+ if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) {
+ drm_dbg_kms(display->drm, "DSC required but not available\n");
+ return -EINVAL;
+ }
+
if (dsc_needed) {
- drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ drm_dbg_kms(display->drm,
+ "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
@@ -2619,7 +2672,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
@@ -2665,12 +2718,11 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
}
}
-static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
- enum port port)
+static bool intel_dp_port_has_audio(struct intel_display *display, enum port port)
{
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
return false;
- if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
+ if (DISPLAY_VER(display) < 12 && port == PORT_A)
return false;
return true;
@@ -2680,8 +2732,7 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
const struct drm_connector_state *conn_state,
struct drm_dp_vsc_sdp *vsc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->has_panel_replay) {
/*
@@ -2758,7 +2809,7 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
vsc->bpc = crtc_state->pipe_bpp / 3;
/* only RGB pixelformat supports 6 bpc */
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
/* all YCbCr are always limited range */
@@ -2848,8 +2899,8 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
int ret;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
if (!conn_state->hdr_output_metadata)
@@ -2858,7 +2909,8 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
+ drm_dbg_kms(display->drm,
+ "couldn't set HDR metadata in infoframe\n");
return;
}
@@ -2900,6 +2952,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
int link_bpp_x16)
{
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
@@ -2918,7 +2971,8 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
return;
}
- if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
+ if (display->platform.ironlake || display->platform.sandybridge ||
+ display->platform.ivybridge)
pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
pipe_config->has_drrs = true;
@@ -2940,13 +2994,13 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
static bool intel_dp_has_audio(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- if (!intel_dp_port_has_audio(i915, encoder->port))
+ if (!intel_dp_port_has_audio(display, encoder->port))
return false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
@@ -2961,7 +3015,7 @@ intel_dp_compute_output_format(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
const struct drm_display_info *info = &connector->base.display_info;
@@ -2972,7 +3026,7 @@ intel_dp_compute_output_format(struct intel_encoder *encoder,
ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
} else {
@@ -3056,7 +3110,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3064,9 +3118,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *connector = intel_dp->attached_connector;
int ret = 0, link_bpp_x16;
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
- pipe_config->has_pch_encoder = true;
-
fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
ret = intel_panel_compute_config(connector, adjusted_mode);
@@ -3084,7 +3135,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
- if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
+ if (intel_dp_hdisplay_bad(display, adjusted_mode->crtc_hdisplay))
return -EINVAL;
/*
@@ -3107,8 +3158,13 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- pipe_config->enhanced_framing =
- drm_dp_enhanced_frame_cap(intel_dp->dpcd);
+ if (intel_dp_is_uhbr(pipe_config)) {
+ /* 128b/132b SST also needs this */
+ pipe_config->mst_master_transcoder = pipe_config->cpu_transcoder;
+ } else {
+ pipe_config->enhanced_framing =
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd);
+ }
if (pipe_config->dsc.compression_enable)
link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
@@ -3124,7 +3180,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->splitter.link_count = n;
pipe_config->splitter.pixel_overlap = overlap;
- drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
+ drm_dbg_kms(display->drm,
+ "MSO link count %d, pixel overlap %d\n",
n, overlap);
adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
@@ -3138,20 +3195,19 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
- intel_link_compute_m_n(link_bpp_x16,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- intel_dp_bw_fec_overhead(pipe_config->fec_enable),
- &pipe_config->dp_m_n);
+ if (!intel_dp_is_uhbr(pipe_config)) {
+ intel_link_compute_m_n(link_bpp_x16,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ intel_dp_bw_fec_overhead(pipe_config->fec_enable),
+ &pipe_config->dp_m_n);
+ }
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
- if (!HAS_DDI(dev_priv))
- g4x_dp_set_clock(encoder, pipe_config);
-
intel_vrr_compute_config(pipe_config, conn_state);
intel_dp_compute_as_sdp(intel_dp, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
@@ -3188,13 +3244,13 @@ void intel_dp_reset_link_params(struct intel_dp *intel_dp)
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_backlight_enable(crtc_state, conn_state);
intel_pps_backlight_on(intel_dp);
@@ -3204,12 +3260,12 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_pps_backlight_off(intel_dp);
intel_backlight_disable(old_conn_state);
@@ -3252,11 +3308,11 @@ static void
intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
DP_DECOMPRESSION_EN, enable) < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s sink decompression state\n",
str_enable_disable(enable));
}
@@ -3265,7 +3321,7 @@ static void
intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dp_aux *aux = connector->port ?
connector->port->passthrough_aux : NULL;
@@ -3274,7 +3330,7 @@ intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
if (write_dsc_decompression_flag(aux,
DP_DSC_PASSTHROUGH_EN, enable) < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s sink compression passthrough state\n",
str_enable_disable(enable));
}
@@ -3283,7 +3339,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
const struct intel_connector *connector,
bool for_get_ref)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_connector *_connector_iter;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
@@ -3308,7 +3364,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
if (!connector_iter->dp.dsc_decompression_enabled)
continue;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
(for_get_ref && !new_conn_state->crtc) ||
(!for_get_ref && !old_conn_state->crtc));
@@ -3355,12 +3411,12 @@ void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
struct intel_connector *connector,
const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
if (!new_crtc_state->dsc.compression_enable)
return;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!connector->dp.dsc_decompression_aux ||
connector->dp.dsc_decompression_enabled))
return;
@@ -3386,12 +3442,12 @@ void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
struct intel_connector *connector,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
if (!old_crtc_state->dsc.compression_enable)
return;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!connector->dp.dsc_decompression_aux ||
!connector->dp.dsc_decompression_enabled))
return;
@@ -3406,7 +3462,7 @@ void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
static void
intel_dp_init_source_oui(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 oui[] = { 0x00, 0xaa, 0x01 };
u8 buf[3] = {};
@@ -3420,7 +3476,7 @@ intel_dp_init_source_oui(struct intel_dp *intel_dp)
* already set to what we want, so as to avoid clearing any state by accident
*/
if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
- drm_err(&i915->drm, "Failed to read source OUI\n");
+ drm_dbg_kms(display->drm, "Failed to read source OUI\n");
if (memcmp(oui, buf, sizeof(oui)) == 0) {
/* Assume the OUI was written now. */
@@ -3429,7 +3485,7 @@ intel_dp_init_source_oui(struct intel_dp *intel_dp)
}
if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) {
- drm_info(&i915->drm, "Failed to write source OUI\n");
+ drm_dbg_kms(display->drm, "Failed to write source OUI\n");
WRITE_ONCE(intel_dp->oui_valid, false);
}
@@ -3443,10 +3499,11 @@ void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp)
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
connector->base.base.id, connector->base.name,
connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
@@ -3457,8 +3514,8 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
/* If the device supports it, try to set the power state appropriately */
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int ret, i;
/* Should have a valid DPCD by this point */
@@ -3494,7 +3551,8 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
}
if (ret != 1)
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Set power to %s failed\n",
encoder->base.base.id, encoder->base.name,
mode == DP_SET_POWER_D0 ? "D0" : "D3");
}
@@ -3537,7 +3595,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool fastset = true;
@@ -3547,7 +3605,8 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
*/
if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
crtc_state->port_clock) < 0) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.connectors_changed = true;
fastset = false;
@@ -3561,14 +3620,15 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
* Remove once we have readout for DSC.
*/
if (crtc_state->dsc.compression_enable) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
}
if (CAN_PANEL_REPLAY(intel_dp)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
@@ -3580,7 +3640,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/* Clear the cached register set to avoid using stale values */
@@ -3589,10 +3649,10 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
intel_dp->pcon_dsc_dpcd,
sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
- drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ drm_err(display->drm, "Failed to read DPCD register 0x%x\n",
DP_PCON_DSC_ENCODER);
- drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
+ drm_dbg_kms(display->drm, "PCON ENCODER DSC DPCD: %*ph\n",
(int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
}
@@ -3630,19 +3690,19 @@ static int intel_dp_pcon_set_frl_mask(int max_frl)
static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
int max_frl_rate;
int max_lanes, rate_per_lane;
int max_dsc_lanes, dsc_rate_per_lane;
- max_lanes = connector->display_info.hdmi.max_lanes;
- rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
+ max_lanes = info->hdmi.max_lanes;
+ rate_per_lane = info->hdmi.max_frl_rate_per_lane;
max_frl_rate = max_lanes * rate_per_lane;
- if (connector->display_info.hdmi.dsc_cap.v_1p2) {
- max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
- dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
+ if (info->hdmi.dsc_cap.v_1p2) {
+ max_dsc_lanes = info->hdmi.dsc_cap.max_lanes;
+ dsc_rate_per_lane = info->hdmi.dsc_cap.max_frl_rate_per_lane;
if (max_dsc_lanes && dsc_rate_per_lane)
max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
}
@@ -3664,19 +3724,19 @@ intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
#define TIMEOUT_FRL_READY_MS 500
#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
-
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
u8 max_frl_bw_mask = 0, frl_trained_mask;
bool is_active;
max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
- drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
+ drm_dbg(display->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
- drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
+ drm_dbg(display->drm, "Sink max rate from EDID = %d Gbps\n",
+ max_edid_frl_bw);
max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
@@ -3684,7 +3744,7 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
return -EINVAL;
max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
- drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
+ drm_dbg(display->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
goto frl_trained;
@@ -3721,10 +3781,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
return -ETIMEDOUT;
frl_trained:
- drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
+ drm_dbg(display->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
intel_dp->frl.is_trained = true;
- drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
+ drm_dbg(display->drm, "FRL trained with : %d Gbps\n",
+ intel_dp->frl.trained_rate_gbps);
return 0;
}
@@ -3763,7 +3824,7 @@ int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
void intel_dp_check_frl_training(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/*
* Always go for FRL training if:
@@ -3778,14 +3839,16 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
int ret, mode;
- drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
+ drm_dbg(display->drm,
+ "Couldn't set FRL mode, continuing with TMDS mode\n");
ret = intel_dp_pcon_set_tmds_mode(intel_dp);
mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
- drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
+ drm_dbg(display->drm,
+ "Issue with PCON, cannot set TMDS mode\n");
} else {
- drm_dbg(&dev_priv->drm, "FRL training Completed\n");
+ drm_dbg(display->drm, "FRL training Completed\n");
}
}
@@ -3801,10 +3864,10 @@ static int
intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
- int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
- int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int hdmi_throughput = info->hdmi.dsc_cap.clk_per_slice;
+ int hdmi_max_slices = info->hdmi.dsc_cap.max_slices;
int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
@@ -3818,13 +3881,13 @@ intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
int num_slices, int slice_width)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
int output_format = crtc_state->output_format;
- bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
+ bool hdmi_all_bpp = info->hdmi.dsc_cap.all_bpp;
int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
int hdmi_max_chunk_bytes =
- connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
+ info->hdmi.dsc_cap.total_chunk_kbytes * 1024;
return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
num_slices, output_format, hdmi_all_bpp,
@@ -3835,24 +3898,26 @@ void
intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info;
u8 pps_param[6];
int slice_height;
int slice_width;
int num_slices;
int bits_per_pixel;
int ret;
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct drm_connector *connector;
bool hdmi_is_dsc_1_2;
if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
return;
- if (!intel_connector)
+ if (!connector)
return;
- connector = &intel_connector->base;
- hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
+
+ info = &connector->base.display_info;
+
+ hdmi_is_dsc_1_2 = info->hdmi.dsc_cap.v_1p2;
if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
!hdmi_is_dsc_1_2)
@@ -3883,13 +3948,13 @@ intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
+ drm_dbg_kms(display->drm, "Failed to set pcon DSC\n");
}
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
bool ycbcr444_to_420 = false;
bool rgb_to_ycbcr = false;
u8 tmp;
@@ -3904,7 +3969,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
- drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
+ drm_dbg_kms(display->drm,
+ "Failed to %s protocol converter HDMI mode\n",
str_enable_disable(intel_dp_has_hdmi_sink(intel_dp)));
if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
@@ -3939,14 +4005,14 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s protocol converter RGB->YCbCr conversion mode\n",
str_enable_disable(tmp));
}
@@ -3979,7 +4045,7 @@ static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/*
* Clear the cached register set to avoid using stale values
@@ -3998,11 +4064,11 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY,
&connector->dp.fec_capability) < 0) {
- drm_err(&i915->drm, "Failed to read FEC DPCD register\n");
+ drm_err(display->drm, "Failed to read FEC DPCD register\n");
return;
}
- drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
+ drm_dbg_kms(display->drm, "FEC CAPABILITY: %x\n",
connector->dp.fec_capability);
}
@@ -4017,10 +4083,10 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *
static void
intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
- if (!HAS_DSC(i915))
+ if (!HAS_DSC(display))
return;
if (intel_dp_is_edp(intel_dp))
@@ -4034,8 +4100,8 @@ intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *conn
static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int n = intel_dp->mso_link_count;
int overlap = intel_dp->mso_pixel_overlap;
@@ -4050,7 +4116,7 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
drm_mode_set_name(mode);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n",
connector->base.base.id, connector->base.name,
DRM_MODE_ARG(mode));
@@ -4058,7 +4124,7 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
@@ -4076,7 +4142,7 @@ void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
pipe_bpp, connector->panel.vbt.edp.bpp);
connector->panel.vbt.edp.bpp = pipe_bpp;
@@ -4085,7 +4151,7 @@ void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
static void intel_edp_mso_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_display_info *info = &connector->base.display_info;
u8 mso;
@@ -4094,23 +4160,25 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
return;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
- drm_err(&i915->drm, "Failed to read MSO cap\n");
+ drm_err(display->drm, "Failed to read MSO cap\n");
return;
}
/* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
- drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
+ drm_err(display->drm, "Invalid MSO link count cap %u\n", mso);
mso = 0;
}
if (mso) {
- drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
+ drm_dbg_kms(display->drm,
+ "Sink MSO %ux%u configuration, pixel overlap %u\n",
mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
info->mso_pixel_overlap);
- if (!HAS_MSO(i915)) {
- drm_err(&i915->drm, "No source MSO support, disabling\n");
+ if (!HAS_MSO(display)) {
+ drm_err(display->drm,
+ "No source MSO support, disabling\n");
mso = 0;
}
}
@@ -4161,11 +4229,10 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
/* this function is meant to be called only once */
- drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
+ drm_WARN_ON(display->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
return false;
@@ -4189,7 +4256,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd)) {
- drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
+ drm_dbg_kms(display->drm, "eDP DPCD: %*ph\n",
(int)sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
@@ -4300,9 +4367,9 @@ static enum drm_dp_mst_mode
intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
enum drm_dp_mst_mode sink_mst_mode)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (!i915->display.params.enable_dp_mst)
+ if (!display->params.enable_dp_mst)
return DRM_DP_SST;
if (!intel_dp_mst_source_support(intel_dp))
@@ -4318,7 +4385,7 @@ intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
static enum drm_dp_mst_mode
intel_dp_mst_detect(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum drm_dp_mst_mode sink_mst_mode;
enum drm_dp_mst_mode mst_detect;
@@ -4327,12 +4394,12 @@ intel_dp_mst_detect(struct intel_dp *intel_dp)
mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n",
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp_mst_source_support(intel_dp)),
intel_dp_mst_mode_str(sink_mst_mode),
- str_yes_no(i915->display.params.enable_dp_mst),
+ str_yes_no(display->params.enable_dp_mst),
intel_dp_mst_mode_str(mst_detect));
return mst_detect;
@@ -4358,12 +4425,13 @@ intel_dp_mst_configure(struct intel_dp *intel_dp)
static void
intel_dp_mst_disconnect(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!intel_dp->is_mst)
return;
- drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n",
+ drm_dbg_kms(display->drm,
+ "MST device may have disappeared %d vs %d\n",
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
@@ -4444,7 +4512,7 @@ static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp,
}
static ssize_t
-intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
+intel_dp_hdr_metadata_infoframe_sdp_pack(struct intel_display *display,
const struct hdmi_drm_infoframe *drm_infoframe,
struct dp_sdp *sdp,
size_t size)
@@ -4461,12 +4529,13 @@ intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
if (len < 0) {
- drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n");
+ drm_dbg_kms(display->drm,
+ "buffer size is smaller than hdr metadata infoframe\n");
return -ENOSPC;
}
if (len != infoframe_size) {
- drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
+ drm_dbg_kms(display->drm, "wrong static hdr metadata size\n");
return -ENOSPC;
}
@@ -4524,8 +4593,8 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct dp_sdp sdp = {};
ssize_t len;
@@ -4538,7 +4607,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp);
break;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
+ len = intel_dp_hdr_metadata_infoframe_sdp_pack(display,
&crtc_state->infoframes.drm.drm,
&sdp, sizeof(sdp));
break;
@@ -4551,7 +4620,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
return;
}
- if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ if (drm_WARN_ON(display->drm, len < 0))
return;
dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
@@ -4562,20 +4631,19 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv,
- crtc_state->cpu_transcoder);
+ struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display, crtc_state->cpu_transcoder);
u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
- if (HAS_AS_SDP(dev_priv))
+ if (HAS_AS_SDP(display))
dip_enable |= VIDEO_DIP_ENABLE_AS_ADL;
- u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
+ u32 val = intel_de_read(display, reg) & ~dip_enable;
/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
- if (!enable && HAS_DSC(dev_priv))
+ if (!enable && HAS_DSC(display))
val &= ~VDIP_ENABLE_PPS;
/*
@@ -4585,8 +4653,8 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
if (!enable || !crtc_state->has_psr)
val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
if (!enable)
return;
@@ -4707,8 +4775,8 @@ intel_read_dp_as_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_dp_as_sdp *as_sdp)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = DP_SDP_ADAPTIVE_SYNC;
struct dp_sdp sdp = {};
int ret;
@@ -4722,7 +4790,7 @@ intel_read_dp_as_sdp(struct intel_encoder *encoder,
ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp));
if (ret)
- drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n");
+ drm_dbg_kms(display->drm, "Failed to unpack DP AS SDP\n");
}
static int
@@ -4775,8 +4843,8 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_dp_vsc_sdp *vsc)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = DP_SDP_VSC;
struct dp_sdp sdp = {};
int ret;
@@ -4790,15 +4858,15 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
if (ret)
- drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
+ drm_dbg_kms(display->drm, "Failed to unpack DP VSC SDP\n");
}
static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct hdmi_drm_infoframe *drm_infoframe)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
struct dp_sdp sdp = {};
int ret;
@@ -4814,7 +4882,7 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
sizeof(sdp));
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to unpack DP HDR Metadata Infoframe SDP\n");
}
@@ -4844,8 +4912,8 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
static bool intel_dp_link_ok(struct intel_dp *intel_dp,
u8 link_status[DP_LINK_STATUS_SIZE])
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool uhbr = intel_dp->link_rate >= 1000000;
bool ok;
@@ -4859,7 +4927,7 @@ static bool intel_dp_link_ok(struct intel_dp *intel_dp,
return true;
intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s link not ok, retraining\n",
encoder->base.base.id, encoder->base.name,
uhbr ? "128b/132b" : "8b/10b");
@@ -4882,14 +4950,14 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 link_status[DP_LINK_STATUS_SIZE] = {};
const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
esi_link_status_size) != esi_link_status_size) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[ENCODER:%d:%s] Failed to read link status\n",
encoder->base.base.id, encoder->base.name);
return false;
@@ -4915,27 +4983,27 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
+ drm_WARN_ON_ONCE(display->drm, intel_dp->active_mst_links < 0);
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"failed to get ESI - device may have failed\n");
link_ok = false;
break;
}
- drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi);
+ drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
if (intel_dp->active_mst_links > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
@@ -4947,7 +5015,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
intel_dp_mst_hpd_irq(intel_dp, esi, ack);
if (esi[3] & DP_TUNNELING_IRQ) {
- if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ if (drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
&intel_dp->aux))
reprobe_needed = true;
ack[3] |= DP_TUNNELING_IRQ;
@@ -4957,7 +5025,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
break;
if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
- drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
+ drm_dbg_kms(display->drm, "Failed to ack ESI\n");
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
@@ -5045,7 +5113,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -5058,7 +5126,7 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
return true;
/* MST */
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
encoder = &intel_dp->mst_encoders[pipe]->base;
if (conn_state->best_encoder == &encoder->base)
return true;
@@ -5086,14 +5154,14 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx,
u8 *pipe_mask)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
int ret = 0;
*pipe_mask = 0;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state =
connector->base.state;
@@ -5113,7 +5181,8 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
crtc_state = to_intel_crtc_state(crtc->base.state);
- drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
+ drm_WARN_ON(display->drm,
+ !intel_crtc_has_dp_encoder(crtc_state));
if (!crtc_state->hw.active)
continue;
@@ -5143,6 +5212,7 @@ static bool intel_dp_is_connected(struct intel_dp *intel_dp)
static int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 pipe_mask;
@@ -5151,7 +5221,7 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
if (!intel_dp_is_connected(intel_dp))
return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
ctx);
if (ret)
return ret;
@@ -5169,7 +5239,8 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
if (!intel_dp_needs_link_retrain(intel_dp))
return 0;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link (forced %s)\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] retraining link (forced %s)\n",
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp->link.force_retrain));
@@ -5180,7 +5251,7 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
intel_dp->link.force_retrain = false;
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] link retraining failed: %pe\n",
encoder->base.base.id, encoder->base.name,
ERR_PTR(ret));
@@ -5213,7 +5284,7 @@ void intel_dp_check_link_state(struct intel_dp *intel_dp)
static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
@@ -5232,12 +5303,12 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
- drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
+ drm_dbg_kms(display->drm, "Sink specific irq unhandled\n");
}
static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
bool reprobe_needed = false;
u8 val;
@@ -5249,7 +5320,7 @@ static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
return false;
if ((val & DP_TUNNELING_IRQ) &&
- drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
&intel_dp->aux))
reprobe_needed = true;
@@ -5318,12 +5389,12 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u8 *dpcd = intel_dp->dpcd;
u8 type;
- if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
+ if (drm_WARN_ON(display->drm, intel_dp_is_edp(intel_dp)))
return connector_status_connected;
lspcon_resume(dig_port);
@@ -5366,7 +5437,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
}
/* Anything else is out of spec, warn and ignore */
- drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
+ drm_dbg_kms(display->drm, "Broken DP branch device, ignoring\n");
return connector_status_disconnected;
}
@@ -5461,7 +5532,7 @@ static void
intel_dp_update_dfp(struct intel_dp *intel_dp,
const struct drm_edid *drm_edid)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
intel_dp->dfp.max_bpc =
@@ -5485,7 +5556,7 @@ intel_dp_update_dfp(struct intel_dp *intel_dp,
drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
intel_dp->downstream_ports);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
connector->base.base.id, connector->base.name,
intel_dp->dfp.max_bpc,
@@ -5518,7 +5589,7 @@ intel_dp_can_ycbcr420(struct intel_dp *intel_dp)
static void
intel_dp_update_420(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
intel_dp->dfp.ycbcr420_passthrough =
@@ -5536,7 +5607,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
connector->base.base.id, connector->base.name,
str_yes_no(intel_dp->dfp.rgb_to_ycbcr),
@@ -5547,7 +5618,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
static void
intel_dp_set_edid(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
const struct drm_edid *drm_edid;
bool vrr_capable;
@@ -5560,7 +5631,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
drm_edid_connector_update(&connector->base, drm_edid);
vrr_capable = intel_vrr_is_capable(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
@@ -5597,38 +5668,37 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
static void
intel_dp_detect_sdp_caps(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- intel_dp->as_sdp_supported = HAS_AS_SDP(i915) &&
+ intel_dp->as_sdp_supported = HAS_AS_SDP(display) &&
drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
}
static int
-intel_dp_detect(struct drm_connector *connector,
+intel_dp_detect(struct drm_connector *_connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_connector *intel_connector =
- to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
+ struct intel_display *display = to_intel_display(_connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
int ret;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- drm_WARN_ON(&dev_priv->drm,
- !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
- if (!intel_display_device_enabled(dev_priv))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(dev_priv))
- return connector->status;
+ if (!intel_display_driver_check_access(display))
+ return connector->base.status;
- intel_dp_flush_connector_commits(intel_connector);
+ intel_dp_flush_connector_commits(connector);
intel_pps_vdd_on(intel_dp);
@@ -5654,7 +5724,7 @@ intel_dp_detect(struct drm_connector *connector,
if (status == connector_status_disconnected) {
intel_dp_test_reset(intel_dp);
- memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
+ memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
intel_dp->psr.sink_panel_replay_support = false;
intel_dp->psr.sink_panel_replay_su_support = false;
@@ -5675,12 +5745,12 @@ intel_dp_detect(struct drm_connector *connector,
}
if (ret == 1)
- intel_connector->base.epoch_counter++;
+ connector->base.epoch_counter++;
if (!intel_dp_is_edp(intel_dp))
intel_psr_init_dpcd(intel_dp);
- intel_dp_detect_dsc_caps(intel_dp, intel_connector);
+ intel_dp_detect_dsc_caps(intel_dp, connector);
intel_dp_detect_sdp_caps(intel_dp);
@@ -5723,8 +5793,7 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (intel_dp_is_edp(intel_dp) ||
- to_intel_connector(connector)->detect_edid)
+ if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
status = connector_status_connected;
intel_dp_check_device_service_irq(intel_dp);
@@ -5734,7 +5803,7 @@ out_unset_edid:
intel_dp_unset_edid(intel_dp);
if (!intel_dp_is_edp(intel_dp))
- drm_dp_set_subconnector_property(connector,
+ drm_dp_set_subconnector_property(&connector->base,
status,
intel_dp->dpcd,
intel_dp->downstream_ports);
@@ -5747,15 +5816,13 @@ out_vdd_off:
static void
intel_dp_force(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return;
intel_dp_unset_edid(intel_dp);
@@ -5766,30 +5833,31 @@ intel_dp_force(struct drm_connector *connector)
intel_dp_set_edid(intel_dp);
}
-static int intel_dp_get_modes(struct drm_connector *connector)
+static int intel_dp_get_modes(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_display *display = to_intel_display(_connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int num_modes;
/* drm_edid_connector_update() done in ->detect() or ->force() */
- num_modes = drm_edid_connector_add_modes(connector);
+ num_modes = drm_edid_connector_add_modes(&connector->base);
/* Also add fixed mode, which may or may not be present in EDID */
- if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
- num_modes += intel_panel_get_modes(intel_connector);
+ if (intel_dp_is_edp(intel_dp))
+ num_modes += intel_panel_get_modes(connector);
if (num_modes)
return num_modes;
- if (!intel_connector->detect_edid) {
- struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
+ if (!connector->detect_edid) {
struct drm_display_mode *mode;
- mode = drm_dp_downstream_mode(connector->dev,
+ mode = drm_dp_downstream_mode(display->drm,
intel_dp->dpcd,
intel_dp->downstream_ports);
if (mode) {
- drm_mode_probed_add(connector, mode);
+ drm_mode_probed_add(&connector->base, mode);
num_modes++;
}
}
@@ -5800,7 +5868,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static int
intel_dp_connector_register(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_lspcon *lspcon = &dig_port->lspcon;
@@ -5810,7 +5878,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
+ drm_dbg_kms(display->drm, "registering %s bus for %s\n",
intel_dp->aux.name, connector->kdev->kobj.name);
intel_dp->aux.dev = connector->kdev;
@@ -5847,10 +5915,11 @@ intel_dp_connector_unregister(struct drm_connector *connector)
void intel_dp_connector_sync_state(struct intel_connector *connector,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (crtc_state && crtc_state->dsc.compression_enable) {
- drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
+ drm_WARN_ON(display->drm,
+ !connector->dp.dsc_decompression_aux);
connector->dp.dsc_decompression_enabled = true;
} else {
connector->dp.dsc_decompression_enabled = false;
@@ -5880,18 +5949,18 @@ void intel_dp_encoder_flush_work(struct drm_encoder *_encoder)
intel_dp_aux_fini(intel_dp);
}
-void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_pps_vdd_off_sync(intel_dp);
intel_dp_tunnel_suspend(intel_dp);
}
-void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_shutdown(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_pps_wait_power_cycle(intel_dp);
}
@@ -5899,12 +5968,12 @@ void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
static int intel_modeset_tile_group(struct intel_atomic_state *state,
int tile_group_id)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
int ret = 0;
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
@@ -5940,13 +6009,13 @@ static int intel_modeset_tile_group(struct intel_atomic_state *state,
static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
if (transcoders == 0)
return 0;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state;
int ret;
@@ -5973,7 +6042,7 @@ static int intel_modeset_affected_transcoders(struct intel_atomic_state *state,
transcoders &= ~BIT(crtc_state->cpu_transcoder);
}
- drm_WARN_ON(&dev_priv->drm, transcoders != 0);
+ drm_WARN_ON(display->drm, transcoders != 0);
return 0;
}
@@ -6007,7 +6076,7 @@ static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
static int intel_dp_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *_state)
{
- struct drm_i915_private *dev_priv = to_i915(conn->dev);
+ struct intel_display *display = to_intel_display(conn->dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
struct intel_connector *intel_conn = to_intel_connector(conn);
@@ -6037,7 +6106,7 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
if (conn->has_tile) {
@@ -6052,6 +6121,7 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
enum drm_connector_status hpd_state)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(connector->dev);
bool hpd_high = hpd_state == connector_status_connected;
@@ -6059,10 +6129,12 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
bool need_work = false;
spin_lock_irq(&i915->irq_lock);
- if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) {
- i915->display.hotplug.event_bits |= BIT(hpd_pin);
+ if (hpd_high != test_bit(hpd_pin, &display->hotplug.oob_hotplug_last_state)) {
+ display->hotplug.event_bits |= BIT(hpd_pin);
- __assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high);
+ __assign_bit(hpd_pin,
+ &display->hotplug.oob_hotplug_last_state,
+ hpd_high);
need_work = true;
}
spin_unlock_irq(&i915->irq_lock);
@@ -6094,6 +6166,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -6108,7 +6181,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
* would end up in an endless cycle of
* "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
*/
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
long_hpd ? "long" : "short",
dig_port->base.base.base.id,
@@ -6116,7 +6189,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
- drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
+ drm_dbg_kms(display->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
dig_port->base.base.base.id,
dig_port->base.base.name,
long_hpd ? "long" : "short");
@@ -6149,7 +6222,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
-static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
+static bool _intel_dp_is_port_edp(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -6157,41 +6230,40 @@ static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
return false;
- if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
+ if (DISPLAY_VER(display) < 9 && port == PORT_A)
return true;
return devdata && intel_bios_encoder_supports_edp(devdata);
}
-bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
+bool intel_dp_is_port_edp(struct intel_display *display, enum port port)
{
- struct intel_display *display = &i915->display;
const struct intel_bios_encoder_data *devdata =
intel_bios_encoder_data_lookup(display, port);
- return _intel_dp_is_port_edp(i915, devdata, port);
+ return _intel_dp_is_port_edp(display, devdata, port);
}
bool
intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
if (intel_bios_encoder_is_lspcon(encoder->devdata))
return false;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return true;
if (port == PORT_A)
return false;
- if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
- DISPLAY_VER(i915) >= 9)
+ if (display->platform.haswell || display->platform.broadwell ||
+ DISPLAY_VER(display) >= 9)
return true;
return false;
@@ -6200,19 +6272,19 @@ intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->base.port;
if (!intel_dp_is_edp(intel_dp))
drm_connector_attach_dp_subconnector_property(connector);
- if (!IS_G4X(dev_priv) && port != PORT_A)
+ if (!display->platform.g4x && port != PORT_A)
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
drm_connector_attach_max_bpc_property(connector, 6, 10);
- else if (DISPLAY_VER(dev_priv) >= 5)
+ else if (DISPLAY_VER(display) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
/* Register HDMI colorspace for case of lspcon */
@@ -6226,22 +6298,22 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
drm_connector_attach_hdr_output_metadata_property(connector);
- if (HAS_VRR(dev_priv))
+ if (HAS_VRR(display))
drm_connector_attach_vrr_capable_property(connector);
}
static void
intel_edp_add_properties(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
intel_attach_scaling_mode_property(&connector->base);
drm_connector_set_panel_orientation_with_quirk(&connector->base,
- i915->display.vbt.orientation,
+ display->vbt.orientation,
fixed_mode->hdisplay,
fixed_mode->vdisplay);
}
@@ -6249,21 +6321,20 @@ intel_edp_add_properties(struct intel_dp *intel_dp)
static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum pipe pipe = INVALID_PIPE;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
pipe = vlv_pps_backlight_initial_pipe(intel_dp);
intel_backlight_setup(connector, pipe);
}
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- struct intel_connector *intel_connector)
+ struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct drm_connector *connector = &intel_connector->base;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
@@ -6279,19 +6350,19 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* with an already powered-on LVDS power sequencer.
*/
if (intel_get_lvds_encoder(dev_priv)) {
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"LVDS was detected, not registering eDP\n");
return false;
}
- intel_bios_init_panel_early(display, &intel_connector->panel,
+ intel_bios_init_panel_early(display, &connector->panel,
encoder->devdata);
if (!intel_pps_init(intel_dp)) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] unusable PPS, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
/*
@@ -6314,11 +6385,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_alpm_init_dpcd(intel_dp);
/* Cache DPCD and EDID for edp. */
- has_dpcd = intel_edp_init_dpcd(intel_dp, intel_connector);
+ has_dpcd = intel_edp_init_dpcd(intel_dp, connector);
if (!has_dpcd) {
/* if this fails, presume the device is a ghost */
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
@@ -6341,7 +6412,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* DPCD read? Would need sort out the VDD handling...
*/
if (!intel_digital_port_connected(encoder)) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] HPD is down, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
@@ -6353,30 +6424,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* back to checking for a VGA branch device. Only do this
* on known affected platforms to minimize false positives.
*/
- if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
+ if (DISPLAY_VER(display) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
DP_DWN_STRM_PORT_TYPE_ANALOG) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
}
}
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- drm_edid = drm_edid_read_ddc(connector, connector->ddc);
+ mutex_lock(&display->drm->mode_config.mutex);
+ drm_edid = drm_edid_read_ddc(&connector->base, connector->base.ddc);
if (!drm_edid) {
/* Fallback to EDID from ACPI OpRegion, if any */
- drm_edid = intel_opregion_get_edid(intel_connector);
+ drm_edid = intel_opregion_get_edid(connector);
if (drm_edid)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using OpRegion EDID\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
}
if (drm_edid) {
- if (drm_edid_connector_update(connector, drm_edid) ||
- !drm_edid_connector_add_modes(connector)) {
- drm_edid_connector_update(connector, NULL);
+ if (drm_edid_connector_update(&connector->base, drm_edid) ||
+ !drm_edid_connector_add_modes(&connector->base)) {
+ drm_edid_connector_update(&connector->base, NULL);
drm_edid_free(drm_edid);
drm_edid = ERR_PTR(-EINVAL);
}
@@ -6384,34 +6455,34 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
drm_edid = ERR_PTR(-ENOENT);
}
- intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata,
+ intel_bios_init_panel_late(display, &connector->panel, encoder->devdata,
IS_ERR(drm_edid) ? NULL : drm_edid);
- intel_panel_add_edid_fixed_modes(intel_connector, true);
+ intel_panel_add_edid_fixed_modes(connector, true);
/* MSO requires information from the EDID */
intel_edp_mso_init(intel_dp);
/* multiply the mode clock and horizontal timings for MSO */
- list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head)
- intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head)
+ intel_edp_mso_mode_fixup(connector, fixed_mode);
/* fallback to VBT if available for eDP */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
- if (!intel_panel_preferred_fixed_mode(intel_connector)) {
- drm_info(&dev_priv->drm,
+ if (!intel_panel_preferred_fixed_mode(connector)) {
+ drm_info(display->drm,
"[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
}
- intel_panel_init(intel_connector, drm_edid);
+ intel_panel_init(connector, drm_edid);
- intel_edp_backlight_setup(intel_dp, intel_connector);
+ intel_edp_backlight_setup(intel_dp, connector);
intel_edp_add_properties(intel_dp);
@@ -6421,34 +6492,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
out_vdd_off:
intel_pps_vdd_off_sync(intel_dp);
- intel_bios_fini_panel(&intel_connector->panel);
+ intel_bios_fini_panel(&connector->panel);
return false;
}
static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
{
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
+ struct intel_connector *connector = container_of(work, typeof(*connector),
+ modeset_retry_work);
+ struct intel_display *display = to_intel_display(connector);
- intel_connector = container_of(work, typeof(*intel_connector),
- modeset_retry_work);
- connector = &intel_connector->base;
- drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
- connector->name);
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id,
+ connector->base.name);
/* Grab the locks before changing connector property*/
- mutex_lock(&connector->dev->mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
/* Set connector link status to BAD and send a Uevent to notify
* userspace to do a modeset.
*/
- drm_connector_set_link_status_property(connector,
+ drm_connector_set_link_status_property(&connector->base,
DRM_MODE_LINK_STATUS_BAD);
- mutex_unlock(&connector->dev->mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
- drm_kms_helper_connector_hotplug_event(connector);
+ drm_kms_helper_connector_hotplug_event(&connector->base);
- drm_connector_put(connector);
+ drm_connector_put(&connector->base);
}
void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
@@ -6459,45 +6528,44 @@ void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
bool
intel_dp_init_connector(struct intel_digital_port *dig_port,
- struct intel_connector *intel_connector)
+ struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_connector *connector = &intel_connector->base;
struct intel_dp *intel_dp = &dig_port->dp;
- struct intel_encoder *intel_encoder = &dig_port->base;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct intel_encoder *encoder = &dig_port->base;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_encoder->port;
+ enum port port = encoder->port;
int type;
/* Initialize the work for modeset in case of link train failure */
- intel_dp_init_modeset_retry_work(intel_connector);
+ intel_dp_init_modeset_retry_work(connector);
if (drm_WARN(dev, dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
- dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ dig_port->max_lanes, encoder->base.base.id,
+ encoder->base.name))
return false;
intel_dp->reset_link_params = true;
/* Preserve the current hw state. */
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
- intel_dp->attached_connector = intel_connector;
+ intel_dp->DP = intel_de_read(display, intel_dp->output_reg);
+ intel_dp->attached_connector = connector;
- if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
+ if (_intel_dp_is_port_edp(display, encoder->devdata, port)) {
/*
* Currently we don't support eDP on TypeC ports for DISPLAY_VER < 30,
* although in theory it could work on TypeC legacy ports.
*/
- drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder) &&
- DISPLAY_VER(dev_priv) < 30);
+ drm_WARN_ON(dev, intel_encoder_is_tc(encoder) &&
+ DISPLAY_VER(display) < 30);
type = DRM_MODE_CONNECTOR_eDP;
- intel_encoder->type = INTEL_OUTPUT_EDP;
+ encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) &&
+ if (drm_WARN_ON(dev, (display->platform.valleyview ||
+ display->platform.cherryview) &&
port != PORT_B && port != PORT_C))
return false;
} else {
@@ -6507,37 +6575,37 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_set_default_sink_rates(intel_dp);
intel_dp_set_default_max_sink_lane_count(intel_dp);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
vlv_pps_pipe_init(intel_dp);
intel_dp_aux_init(intel_dp);
- intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
+ connector->dp.dsc_decompression_aux = &intel_dp->aux;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Adding %s connector on [ENCODER:%d:%s]\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- intel_encoder->base.base.id, intel_encoder->base.name);
+ encoder->base.base.id, encoder->base.name);
- drm_connector_init_with_ddc(dev, connector, &intel_dp_connector_funcs,
+ drm_connector_init_with_ddc(dev, &connector->base, &intel_dp_connector_funcs,
type, &intel_dp->aux.ddc);
- drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &intel_dp_connector_helper_funcs);
- if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12)
- connector->interlace_allowed = true;
+ if (!HAS_GMCH(display) && DISPLAY_VER(display) < 12)
+ connector->base.interlace_allowed = true;
if (type != DRM_MODE_CONNECTOR_eDP)
- intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
- intel_connector->base.polled = intel_connector->polled;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->base.polled = connector->polled;
- intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_connector_attach_encoder(connector, encoder);
- if (HAS_DDI(dev_priv))
- intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ if (HAS_DDI(display))
+ connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
- intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->sync_state = intel_dp_connector_sync_state;
+ connector->get_hw_state = intel_connector_get_hw_state;
+ connector->sync_state = intel_dp_connector_sync_state;
- if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+ if (!intel_edp_init_connector(intel_dp, connector)) {
intel_dp_aux_fini(intel_dp);
goto fail;
}
@@ -6547,15 +6615,14 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_reset_link_params(intel_dp);
/* init MST on ports that can support it */
- intel_dp_mst_encoder_init(dig_port,
- intel_connector->base.base.id);
+ intel_dp_mst_encoder_init(dig_port, connector->base.base.id);
- intel_dp_add_properties(intel_dp, connector);
+ intel_dp_add_properties(intel_dp, &connector->base);
if (is_hdcp_supported(display, port) && !intel_dp_is_edp(intel_dp)) {
- int ret = intel_dp_hdcp_init(dig_port, intel_connector);
+ int ret = intel_dp_hdcp_init(dig_port, connector);
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HDCP init failed, skipping.\n");
}
@@ -6568,19 +6635,19 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
fail:
intel_display_power_flush_work(dev_priv);
- drm_connector_cleanup(connector);
+ drm_connector_cleanup(&connector->base);
return false;
}
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
+void intel_dp_mst_suspend(struct intel_display *display)
{
struct intel_encoder *encoder;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_dp *intel_dp;
if (encoder->type != INTEL_OUTPUT_DDI)
@@ -6596,14 +6663,14 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
}
}
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
+void intel_dp_mst_resume(struct intel_display *display)
{
struct intel_encoder *encoder;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_dp *intel_dp;
int ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 48f10876be65..ca49f0a05da5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -12,14 +12,14 @@ enum intel_output_format;
enum pipe;
enum port;
struct drm_connector_state;
+struct drm_dp_vsc_sdp;
struct drm_encoder;
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
-struct drm_dp_vsc_sdp;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_digital_port;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
@@ -87,15 +87,15 @@ bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
bool intel_dp_has_dsc(const struct intel_connector *connector);
int intel_dp_link_symbol_size(int rate);
int intel_dp_link_symbol_clock(int rate);
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_dp_is_port_edp(struct intel_display *display, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
+void intel_dp_mst_suspend(struct intel_display *display);
+void intel_dp_mst_resume(struct intel_display *display);
int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
@@ -112,15 +112,15 @@ void intel_dp_reset_link_params(struct intel_dp *intel_dp);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
-bool intel_dp_source_supports_tps3(struct drm_i915_private *i915);
-bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
+bool intel_dp_source_supports_tps3(struct intel_display *display);
+bool intel_dp_source_supports_tps4(struct intel_display *display);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
int max_dprx_rate, int max_dprx_lanes);
-bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915,
+bool intel_dp_joiner_needs_dsc(struct intel_display *display,
int num_joined_pipes);
bool intel_dp_has_joiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
@@ -137,16 +137,16 @@ bool intel_digital_port_connected(struct intel_encoder *encoder);
bool intel_digital_port_connected_locked(struct intel_encoder *encoder);
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 dsc_max_bpc);
-u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
+u16 intel_dp_dsc_get_max_compressed_bpp(struct intel_display *display,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
int num_joined_pipes,
enum intel_output_format output_format,
u32 pipe_bpp,
u32 timeslots);
-int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config);
+int intel_dp_dsc_sink_min_compressed_bpp(const struct intel_crtc_state *pipe_config);
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *pipe_config,
int bpc);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
@@ -170,10 +170,11 @@ bool intel_dp_supports_fec(struct intel_dp *intel_dp,
const struct intel_connector *connector,
const struct intel_crtc_state *pipe_config);
-bool intel_dp_supports_dsc(const struct intel_connector *connector,
+bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state);
-u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp);
+u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp);
void intel_ddi_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -193,11 +194,11 @@ void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp);
void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
-bool
-intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool dsc,
- struct link_config_limits *limits);
+bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ bool respect_downstream_limits,
+ bool dsc,
+ struct link_config_limits *limits);
void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector);
bool intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder);
@@ -206,5 +207,7 @@ bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
u8 lane_count);
bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state);
+int intel_dp_dsc_max_src_input_bpc(struct intel_display *display);
+int intel_dp_dsc_min_src_input_bpc(void);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 04a7acd7f73c..40c697476b72 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -5,8 +5,6 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_trace.h"
-#include "intel_bios.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -15,6 +13,7 @@
#include "intel_pps.h"
#include "intel_quirks.h"
#include "intel_tc.h"
+#include "intel_uncore_trace.h"
#define AUX_CH_NAME_BUFSIZE 6
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 33f72db99b58..c846ef4acf5b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -34,8 +34,9 @@
* for some reason.
*/
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux_backlight.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 397cc4ebae52..8b1977cfec50 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -25,7 +25,8 @@
#include <drm/display/drm_dp_helper.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -221,7 +222,6 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp_is_edp(intel_dp))
return 0;
@@ -230,7 +230,7 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
- if (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))
+ if (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)
if (drm_dp_dpcd_probe(&intel_dp->aux,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
return -EIO;
@@ -262,7 +262,6 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
int lttpr_count = 0;
/*
@@ -270,7 +269,7 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
if (!intel_dp_is_edp(intel_dp) &&
- (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))) {
+ (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)) {
u8 dpcd[DP_RECEIVER_CAP_SIZE];
int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
@@ -391,10 +390,9 @@ static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
- DISPLAY_VER(display) >= 10 || IS_BROXTON(i915);
+ DISPLAY_VER(display) >= 10 || display->platform.broxton;
}
/* 128b/132b */
@@ -898,7 +896,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
- usleep_range(delay_us, 2 * delay_us);
+ fsleep(delay_us);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
@@ -959,7 +957,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
/* UHBR+ use separate 128b/132b TPS2 */
@@ -972,7 +969,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
* TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
* LTTPRs must support TPS4.
*/
- source_tps4 = intel_dp_source_supports_tps4(i915);
+ source_tps4 = intel_dp_source_supports_tps4(display);
sink_tps4 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps4_supported(intel_dp->dpcd);
if (source_tps4 && sink_tps4) {
@@ -990,7 +987,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
* TPS3 support is mandatory for downstream devices that
* support HBR2. However, not all sinks follow the spec.
*/
- source_tps3 = intel_dp_source_supports_tps3(i915);
+ source_tps3 = intel_dp_source_supports_tps3(display);
sink_tps3 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps3_supported(intel_dp->dpcd);
if (source_tps3 && sink_tps3) {
@@ -1040,7 +1037,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
}
for (tries = 0; tries < 5; tries++) {
- usleep_range(delay_us, 2 * delay_us);
+ fsleep(delay_us);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
@@ -1414,16 +1411,10 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
}
/* Time budget for the LANEx_EQ_DONE Sequence */
- deadline = jiffies + msecs_to_jiffies_timeout(400);
+ deadline = jiffies + msecs_to_jiffies_timeout(450);
for (try = 0; try < max_tries; try++) {
- usleep_range(delay_us, 2 * delay_us);
-
- /*
- * The delay may get updated. The transmitter shall read the
- * delay before link status during link training.
- */
- delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+ fsleep(delay_us);
if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
@@ -1451,8 +1442,15 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
if (time_after(jiffies, deadline))
timeout = true; /* try one last time after deadline */
- /* Update signal levels and training set as requested. */
+ /*
+ * During LT, Tx shall read AUX_RD_INTERVAL just before writing the new FFE
+ * presets.
+ */
+ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+
intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
+
+ /* Update signal levels and training set as requested. */
if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n");
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 5bba078c00d8..0c44fc7dd86c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -53,14 +53,64 @@
#include "intel_vdsc.h"
#include "skl_scaler.h"
+/*
+ * DP MST (DisplayPort Multi-Stream Transport)
+ *
+ * MST support on the source depends on the platform and port. DP initialization
+ * sets up MST for each MST capable encoder. This will become the primary
+ * encoder for the port.
+ *
+ * MST initialization of each primary encoder creates MST stream encoders, one
+ * per pipe, and initializes the MST topology manager. The MST stream encoders
+ * are sometimes called "fake encoders", because they're virtual, not
+ * physical. Thus there are (number of MST capable ports) x (number of pipes)
+ * MST stream encoders in total.
+ *
+ * Decision to use MST for a sink happens at detect on the connector attached to
+ * the primary encoder, and this will not change while the sink is connected. We
+ * always use MST when possible, including for SST sinks with sideband messaging
+ * support.
+ *
+ * The connectors for the MST streams are added and removed dynamically by the
+ * topology manager. Their connection status is also determined by the topology
+ * manager.
+ *
+ * On hardware, each transcoder may be associated with a single DDI
+ * port. Multiple transcoders may be associated with the same DDI port only if
+ * the port is in MST mode.
+ *
+ * On TGL+, all the transcoders streaming on the same DDI port will indicate a
+ * primary transcoder; the TGL_DP_TP_CTL and TGL_DP_TP_STATUS registers are
+ * relevant only on the primary transcoder. Prior to that, they are port
+ * registers.
+ */
+
+/* From fake MST stream encoder to primary encoder */
+static struct intel_encoder *to_primary_encoder(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+
+ return &dig_port->base;
+}
+
+/* From fake MST stream encoder to primary DP */
+static struct intel_dp *to_primary_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+
+ return &dig_port->dp;
+}
+
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(i915) >= 20 || !dsc)
+ if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc)
return INT_MAX;
/*
@@ -89,7 +139,6 @@ static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
}
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
- const struct intel_connector *connector,
bool ssc, int dsc_slice_count, int bpp_x16)
{
const struct drm_display_mode *adjusted_mode =
@@ -118,7 +167,6 @@ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
}
static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
- const struct intel_connector *connector,
int overhead,
int bpp_x16,
struct intel_link_m_n *m_n)
@@ -161,35 +209,22 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
-static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- int max_bpp,
- int min_bpp,
- struct link_config_limits *limits,
- struct drm_connector_state *conn_state,
- int step,
- bool dsc)
+int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp, int min_bpp,
+ struct drm_connector_state *conn_state,
+ int step, bool dsc)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_atomic_state *state = crtc_state->uapi.state;
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
- struct drm_dp_mst_topology_state *mst_state;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ fixed20_12 pbn_div;
int bpp, slots = -EINVAL;
int dsc_slice_count = 0;
int max_dpt_bpp;
- int ret = 0;
-
- mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
- if (IS_ERR(mst_state))
- return PTR_ERR(mst_state);
-
- crtc_state->lane_count = limits->max_lane_count;
- crtc_state->port_clock = limits->max_rate;
if (dsc) {
if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
@@ -198,24 +233,23 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
}
- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
- crtc_state->port_clock,
- crtc_state->lane_count);
+ pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count);
max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
if (max_bpp > max_dpt_bpp) {
- drm_dbg_kms(&i915->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
+ drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
max_bpp, max_dpt_bpp);
max_bpp = max_dpt_bpp;
}
- drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
+ drm_dbg_kms(display->drm, "Looking for slots in range min bpp %d max bpp %d\n",
min_bpp, max_bpp);
if (dsc) {
dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state);
if (!dsc_slice_count) {
- drm_dbg_kms(&i915->drm, "Can't get valid DSC slice count\n");
+ drm_dbg_kms(display->drm, "Can't get valid DSC slice count\n");
return -ENOSPC;
}
@@ -223,149 +257,169 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
int local_bw_overhead;
- int remote_bw_overhead;
int link_bpp_x16;
- int remote_tu;
- fixed20_12 pbn;
- drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
+ drm_dbg_kms(display->drm, "Trying bpp %d\n", bpp);
link_bpp_x16 = fxp_q4_from_int(dsc ? bpp :
intel_dp_output_bpp(crtc_state->output_format, bpp));
- local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
+ local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
false, dsc_slice_count, link_bpp_x16);
- remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
- true, dsc_slice_count, link_bpp_x16);
-
- intel_dp_mst_compute_m_n(crtc_state, connector,
+ intel_dp_mst_compute_m_n(crtc_state,
local_bw_overhead,
link_bpp_x16,
&crtc_state->dp_m_n);
- /*
- * The TU size programmed to the HW determines which slots in
- * an MTP frame are used for this stream, which needs to match
- * the payload size programmed to the first downstream branch
- * device's payload table.
- *
- * Note that atm the payload's PBN value DRM core sends via
- * the ALLOCATE_PAYLOAD side-band message matches the payload
- * size (which it calculates from the PBN value) it programs
- * to the first branch device's payload table. The allocation
- * in the payload table could be reduced though (to
- * crtc_state->dp_m_n.tu), provided that the driver doesn't
- * enable SSC on the corresponding link.
- */
- pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
- link_bpp_x16,
- remote_bw_overhead));
- remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full);
-
- /*
- * Aligning the TUs ensures that symbols consisting of multiple
- * (4) symbol cycles don't get split between two consecutive
- * MTPs, as required by Bspec.
- * TODO: remove the alignment restriction for 128b/132b links
- * on some platforms, where Bspec allows this.
- */
- remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
-
- /*
- * Also align PBNs accordingly, since MST core will derive its
- * own copy of TU from the PBN in drm_dp_atomic_find_time_slots().
- * The above comment about the difference between the PBN
- * allocated for the whole path and the TUs allocated for the
- * first branch device's link also applies here.
- */
- pbn.full = remote_tu * mst_state->pbn_div.full;
- crtc_state->pbn = dfixed_trunc(pbn);
-
- drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
- crtc_state->dp_m_n.tu = remote_tu;
+ if (intel_dp->is_mst) {
+ int remote_bw_overhead;
+ int remote_tu;
+ fixed20_12 pbn;
+
+ remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
+ true, dsc_slice_count, link_bpp_x16);
+
+ /*
+ * The TU size programmed to the HW determines which slots in
+ * an MTP frame are used for this stream, which needs to match
+ * the payload size programmed to the first downstream branch
+ * device's payload table.
+ *
+ * Note that atm the payload's PBN value DRM core sends via
+ * the ALLOCATE_PAYLOAD side-band message matches the payload
+ * size (which it calculates from the PBN value) it programs
+ * to the first branch device's payload table. The allocation
+ * in the payload table could be reduced though (to
+ * crtc_state->dp_m_n.tu), provided that the driver doesn't
+ * enable SSC on the corresponding link.
+ */
+ pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
+ link_bpp_x16,
+ remote_bw_overhead));
+ remote_tu = DIV_ROUND_UP(pbn.full, pbn_div.full);
+
+ /*
+ * Aligning the TUs ensures that symbols consisting of multiple
+ * (4) symbol cycles don't get split between two consecutive
+ * MTPs, as required by Bspec.
+ * TODO: remove the alignment restriction for 128b/132b links
+ * on some platforms, where Bspec allows this.
+ */
+ remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
+
+ /*
+ * Also align PBNs accordingly, since MST core will derive its
+ * own copy of TU from the PBN in drm_dp_atomic_find_time_slots().
+ * The above comment about the difference between the PBN
+ * allocated for the whole path and the TUs allocated for the
+ * first branch device's link also applies here.
+ */
+ pbn.full = remote_tu * pbn_div.full;
+
+ drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu);
+ crtc_state->dp_m_n.tu = remote_tu;
+
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
+ connector->port,
+ dfixed_trunc(pbn));
+ } else {
+ /* Same as above for remote_tu */
+ crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu,
+ 4 / crtc_state->lane_count);
+
+ if (crtc_state->dp_m_n.tu <= 64)
+ slots = crtc_state->dp_m_n.tu;
+ else
+ slots = -EINVAL;
+ }
- slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
- connector->port,
- crtc_state->pbn);
if (slots == -EDEADLK)
return slots;
if (slots >= 0) {
- drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
+ drm_WARN_ON(display->drm, slots != crtc_state->dp_m_n.tu);
break;
}
}
- /* We failed to find a proper bpp/timeslots, return error */
- if (ret)
- slots = ret;
-
if (slots < 0) {
- drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
+ drm_dbg_kms(display->drm, "failed finding vcpi slots:%d\n",
slots);
- } else {
- if (!dsc)
- crtc_state->pipe_bpp = bpp;
- else
- crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
- drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
+ return slots;
}
- return slots;
+ if (!dsc)
+ crtc_state->pipe_bpp = bpp;
+ else
+ crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
+
+ drm_dbg_kms(display->drm, "Got %d slots for pipe bpp %d dsc %d\n",
+ slots, bpp, dsc);
+
+ return 0;
}
-static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static int mst_stream_find_vcpi_slots_for_bpp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp, int min_bpp,
+ struct link_config_limits *limits,
+ struct drm_connector_state *conn_state,
+ int step, bool dsc)
{
- int slots = -EINVAL;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_dp_mst_topology_state *mst_state;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_rate;
+
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count);
+ return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state,
+ max_bpp, min_bpp,
+ conn_state, step, dsc);
+}
+
+static int mst_stream_compute_link_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
/*
* FIXME: allocate the BW according to link_bpp, which in the case of
* YUV420 is only half of the pipe bpp value.
*/
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
- fxp_q4_to_int(limits->link.max_bpp_x16),
- fxp_q4_to_int(limits->link.min_bpp_x16),
- limits,
- conn_state, 2 * 3, false);
-
- if (slots < 0)
- return slots;
-
- return 0;
+ return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state,
+ fxp_q4_to_int(limits->link.max_bpp_x16),
+ fxp_q4_to_int(limits->link.min_bpp_x16),
+ limits,
+ conn_state, 2 * 3, false);
}
-static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
{
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- int slots = -EINVAL;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
int i, num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
- u8 dsc_max_bpc;
int min_compressed_bpp, max_compressed_bpp;
- /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
- if (DISPLAY_VER(i915) >= 12)
- dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
- else
- dsc_max_bpc = min_t(u8, 10, conn_state->max_requested_bpc);
-
- max_bpp = min_t(u8, dsc_max_bpc * 3, limits->pipe.max_bpp);
+ max_bpp = limits->pipe.max_bpp;
min_bpp = limits->pipe.min_bpp;
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
dsc_bpc);
- drm_dbg_kms(&i915->drm, "DSC Source supported min bpp %d max bpp %d\n",
+ drm_dbg_kms(display->drm, "DSC Source supported min bpp %d max bpp %d\n",
min_bpp, max_bpp);
sink_max_bpp = dsc_bpc[0] * 3;
@@ -378,7 +432,7 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
sink_max_bpp = dsc_bpc[i] * 3;
}
- drm_dbg_kms(&i915->drm, "DSC Sink supported min bpp %d max bpp %d\n",
+ drm_dbg_kms(display->drm, "DSC Sink supported min bpp %d max bpp %d\n",
sink_min_bpp, sink_max_bpp);
if (min_bpp < sink_min_bpp)
@@ -389,41 +443,28 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
crtc_state->pipe_bpp = max_bpp;
- max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
- crtc_state,
- max_bpp / 3);
- max_compressed_bpp = min(max_compressed_bpp,
- fxp_q4_to_int(limits->link.max_bpp_x16));
-
- min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
- min_compressed_bpp = max(min_compressed_bpp,
- fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
+ max_compressed_bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
+ min_compressed_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
- drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
+ drm_dbg_kms(display->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
min_compressed_bpp, max_compressed_bpp);
/* Align compressed bpps according to our own constraints */
- max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
+ max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, max_compressed_bpp,
crtc_state->pipe_bpp);
- min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
+ min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp,
crtc_state->pipe_bpp);
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
- min_compressed_bpp, limits,
- conn_state, 1, true);
-
- if (slots < 0)
- return slots;
-
- return 0;
+ return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state, max_compressed_bpp,
+ min_compressed_bpp, limits,
+ conn_state, 1, true);
}
-static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+
+static int mst_stream_update_slots(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_topology_state *topology_state;
u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
@@ -431,7 +472,7 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
if (IS_ERR(topology_state)) {
- drm_dbg_kms(&i915->drm, "slot update failed\n");
+ drm_dbg_kms(display->drm, "slot update failed\n");
return PTR_ERR(topology_state);
}
@@ -474,12 +515,13 @@ hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
}
static bool
-adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
+adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state,
struct link_config_limits *limits,
bool dsc)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int min_bpp_x16 = limits->link.min_bpp_x16;
@@ -487,15 +529,15 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
return true;
if (!dsc) {
- if (intel_dp_supports_dsc(connector, crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ if (intel_dp_supports_dsc(intel_dp, connector, crtc_state)) {
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
return false;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
@@ -508,7 +550,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
return true;
}
- drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
+ drm_WARN_ON(display->drm, limits->min_rate != limits->max_rate);
if (limits->max_rate < 540000)
min_bpp_x16 = fxp_q4_from_int(13);
@@ -518,7 +560,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
if (limits->link.min_bpp_x16 >= min_bpp_x16)
return true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name,
@@ -533,56 +575,31 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
}
static bool
-intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
- struct intel_crtc_state *crtc_state,
- bool dsc,
- struct link_config_limits *limits)
-{
- /*
- * for MST we always configure max link bw - the spec doesn't
- * seem to suggest we should do otherwise.
- */
- limits->min_rate = limits->max_rate =
- intel_dp_max_link_rate(intel_dp);
-
- limits->min_lane_count = limits->max_lane_count =
- intel_dp_max_lane_count(intel_dp);
-
- limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
- /*
- * FIXME: If all the streams can't fit into the link with
- * their current pipe_bpp we should reduce pipe_bpp across
- * the board until things start to fit. Until then we
- * limit to <= 8bpc since that's what was hardcoded for all
- * MST streams previously. This hack should be removed once
- * we have the proper retry logic in place.
- */
- limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
-
- intel_dp_test_compute_config(intel_dp, crtc_state, limits);
-
- if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
- crtc_state,
- dsc,
- limits))
+mst_stream_compute_config_limits(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ struct intel_crtc_state *crtc_state,
+ bool dsc,
+ struct link_config_limits *limits)
+{
+ if (!intel_dp_compute_config_limits(intel_dp, crtc_state, false, dsc,
+ limits))
return false;
- return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
+ return adjust_limits_for_dsc_hblank_expansion_quirk(intel_dp,
+ connector,
crtc_state,
limits,
dsc);
}
-static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int mst_stream_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
@@ -609,18 +626,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes);
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_mst_compute_config_limits(intel_dp,
- connector,
- pipe_config,
- false,
- &limits);
+ !mst_stream_compute_config_limits(intel_dp, connector,
+ pipe_config, false, &limits);
if (!dsc_needed) {
- ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
- conn_state, &limits);
+ ret = mst_stream_compute_link_config(intel_dp, pipe_config,
+ conn_state, &limits);
if (ret == -EDEADLK)
return ret;
@@ -629,35 +643,37 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
dsc_needed = true;
}
+ if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) {
+ drm_dbg_kms(display->drm, "DSC required but not available\n");
+ return -EINVAL;
+ }
+
/* enable compression if the mode doesn't fit available BW */
if (dsc_needed) {
- drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_supports_dsc(connector, pipe_config))
- return -EINVAL;
- if (!intel_dp_mst_compute_config_limits(intel_dp,
- connector,
- pipe_config,
- true,
- &limits))
+ if (!mst_stream_compute_config_limits(intel_dp, connector,
+ pipe_config, true,
+ &limits))
return -EINVAL;
/*
* FIXME: As bpc is hardcoded to 8, as mentioned above,
* WARN and ignore the debug flag force_dsc_bpc for now.
*/
- drm_WARN(&dev_priv->drm, intel_dp->force_dsc_bpc, "Cannot Force BPC for MST\n");
+ drm_WARN(display->drm, intel_dp->force_dsc_bpc,
+ "Cannot Force BPC for MST\n");
/*
* Try to get at least some timeslots and then see, if
* we can fit there with DSC.
*/
- drm_dbg_kms(&dev_priv->drm, "Trying to find VCPI slots in DSC mode\n");
+ drm_dbg_kms(display->drm, "Trying to find VCPI slots in DSC mode\n");
- ret = intel_dp_dsc_mst_compute_link_config(encoder, pipe_config,
- conn_state, &limits);
+ ret = mst_stream_dsc_compute_link_config(intel_dp, pipe_config,
+ conn_state, &limits);
if (ret < 0)
return ret;
@@ -669,14 +685,14 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state);
+ ret = mst_stream_update_slots(intel_dp, pipe_config, conn_state);
if (ret)
return ret;
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -698,13 +714,13 @@ static unsigned int
intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
struct intel_dp *mst_port)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_digital_connector_state *conn_state;
struct intel_connector *connector;
u8 transcoders = 0;
int i;
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
return 0;
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
@@ -758,7 +774,7 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mst_mgr,
struct intel_link_bw_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
u8 mst_pipe_mask;
u8 fec_pipe_mask = 0;
@@ -766,12 +782,12 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mst_pipe_mask) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
/* Atomic connector check should've added all the MST CRTCs. */
- if (drm_WARN_ON(&i915->drm, !crtc_state))
+ if (drm_WARN_ON(display->drm, !crtc_state))
return -EINVAL;
if (crtc_state->fec_enable)
@@ -850,13 +866,12 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
return 0;
}
-static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static int mst_stream_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
/* lowest numbered transcoder will be designated master */
crtc_state->mst_master_transcoder =
@@ -879,10 +894,10 @@ static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
* recomputation of the corresponding CRTC states.
*/
static int
-intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
- struct intel_atomic_state *state)
+mst_connector_atomic_topology_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
int ret = 0;
@@ -890,7 +905,7 @@ intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
- drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
+ drm_connector_list_iter_begin(display->drm, &connector_list_iter);
for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
struct intel_digital_connector_state *conn_iter_state;
struct intel_crtc_state *crtc_state;
@@ -928,8 +943,8 @@ intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
}
static int
-intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *_state)
+mst_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_connector *intel_connector =
@@ -940,7 +955,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
+ ret = mst_connector_atomic_topology_check(intel_connector, state);
if (ret)
return ret;
@@ -957,42 +972,18 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
intel_connector->port);
}
-static void clear_act_sent(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void mst_stream_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
- intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_ACT_SENT);
-}
-
-static void wait_for_act_sent(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
-
- if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_ACT_SENT, 1))
- drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
-
- drm_dp_check_act_status(&intel_dp->mst_mgr);
-}
-
-static void intel_mst_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- drm_dbg_kms(&i915->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
if (intel_dp->active_mst_links == 1)
@@ -1003,15 +994,15 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
}
-static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void mst_stream_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_dp_mst_topology_state *old_mst_state =
@@ -1022,15 +1013,13 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_crtc *pipe_crtc;
bool last_mst_stream;
int i;
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
- drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -1044,13 +1033,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
- clear_act_sent(encoder, old_crtc_state);
+ intel_ddi_clear_act_sent(encoder, old_crtc_state);
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, old_crtc_state->cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder),
TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
- wait_for_act_sent(encoder, old_crtc_state);
+ intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state,
old_payload, new_payload);
@@ -1063,7 +1053,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_dsc_disable(old_pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
@@ -1080,8 +1070,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* BSpec 4287: disable DIP after the transcoder is disabled and before
* the transcoder clock select is set to none.
*/
- intel_dp_set_infoframes(&dig_port->base, false,
- old_crtc_state, NULL);
+ intel_dp_set_infoframes(primary_encoder, false, old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder"
@@ -1089,51 +1078,49 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* From older GENs spec: "Configure Transcoder Clock Select to direct
* no clock to the transcoder"
*/
- if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
+ if (DISPLAY_VER(display) < 12 || !last_mst_stream)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_mst->connector = NULL;
if (last_mst_stream)
- dig_port->base.post_disable(state, &dig_port->base,
- old_crtc_state, NULL);
+ primary_encoder->post_disable(state, primary_encoder,
+ old_crtc_state, NULL);
- drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
}
-static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
if (intel_dp->active_mst_links == 0 &&
- dig_port->base.post_pll_disable)
- dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state);
+ primary_encoder->post_pll_disable)
+ primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
-static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
if (intel_dp->active_mst_links == 0)
- dig_port->base.pre_pll_enable(state, &dig_port->base,
- pipe_config, NULL);
+ primary_encoder->pre_pll_enable(state, primary_encoder,
+ pipe_config, NULL);
else
/*
* The port PLL state needs to get updated for secondary
* streams as for the primary stream.
*/
- intel_ddi_update_active_dpll(state, &dig_port->base,
+ intel_ddi_update_active_dpll(state, primary_encoder,
to_intel_crtc(pipe_config->uapi.crtc));
}
@@ -1164,15 +1151,15 @@ static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
crtc_state->port_clock, crtc_state->lane_count);
}
-static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void mst_stream_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_dp_mst_topology_state *mst_state =
@@ -1186,11 +1173,10 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
connector->encoder = encoder;
intel_mst->connector = connector;
first_mst_stream = intel_dp->active_mst_links == 0;
- drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 12 && first_mst_stream &&
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
if (first_mst_stream)
@@ -1201,8 +1187,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_sink_enable_decompression(state, connector, pipe_config);
if (first_mst_stream) {
- dig_port->base.pre_enable(state, &dig_port->base,
- pipe_config, NULL);
+ primary_encoder->pre_enable(state, primary_encoder,
+ pipe_config, NULL);
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
@@ -1212,24 +1198,28 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (ret < 0)
- intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
+ intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
/*
* Before Gen 12 this is not done as part of
- * dig_port->base.pre_enable() and should be done here. For
+ * primary_encoder->pre_enable() and should be done here. For
* Gen 12+ the step in which this should be done is different for the
* first MST stream, so it's done on the DDI for the first stream and
* here for the following ones.
*/
- if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
+ if (DISPLAY_VER(display) < 12 || !first_mst_stream)
intel_ddi_enable_transcoder_clock(encoder, pipe_config);
- intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
+ if (DISPLAY_VER(display) >= 13 && !first_mst_stream)
+ intel_ddi_config_transcoder_func(encoder, pipe_config);
+
+ intel_dsc_dp_pps_write(primary_encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 clear = 0;
u32 set = 0;
@@ -1237,7 +1227,7 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
if (!IS_ALDERLAKE_P(i915))
return;
- if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER))
+ if (!IS_DISPLAY_STEP(display, STEP_D0, STEP_FOREVER))
return;
/* Wa_14013163432:adlp */
@@ -1245,7 +1235,7 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
/* Wa_14014143976:adlp */
- if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) {
+ if (IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER)) {
if (intel_dp_is_uhbr(crtc_state))
set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
else if (crtc_state->fec_enable)
@@ -1258,20 +1248,18 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
if (!clear && !set)
return;
- intel_de_rmw(i915, CHICKEN_MISC_3, clear, set);
+ intel_de_rmw(display, CHICKEN_MISC_3, clear, set);
}
-static void intel_mst_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void mst_stream_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
@@ -1279,16 +1267,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_crtc *pipe_crtc;
int ret, i;
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+ drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
if (intel_dp_is_uhbr(pipe_config)) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
- intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
+ intel_de_write(display, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
- intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
+ intel_de_write(display, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
@@ -1296,15 +1284,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, pipe_config);
- clear_act_sent(encoder, pipe_config);
+ intel_ddi_clear_act_sent(encoder, pipe_config);
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, trans), 0,
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
- wait_for_act_sent(encoder, pipe_config);
+ intel_ddi_wait_for_act_sent(encoder, pipe_config);
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
@@ -1313,10 +1302,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
drm_atomic_get_mst_payload_state(mst_state,
connector->port));
if (ret < 0)
- intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
+ intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
- if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
+ if (DISPLAY_VER(display) >= 12)
+ intel_de_rmw(display, CHICKEN_TRANS(display, trans),
FECSTALL_DIS_DPTSTREAM_DPTTG,
pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
@@ -1334,8 +1323,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
-static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe)
+static bool mst_stream_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
*pipe = intel_mst->pipe;
@@ -1344,28 +1333,26 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
return false;
}
-static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void mst_stream_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
- dig_port->base.get_config(&dig_port->base, pipe_config);
+ primary_encoder->get_config(primary_encoder, pipe_config);
}
-static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+static bool mst_stream_initial_fastset_check(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
- return intel_dp_initial_fastset_check(&dig_port->base, crtc_state);
+ return intel_dp_initial_fastset_check(primary_encoder, crtc_state);
}
-static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+static int mst_connector_get_ddc_modes(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
struct intel_dp *intel_dp = intel_connector->mst_port;
const struct drm_edid *drm_edid;
int ret;
@@ -1373,7 +1360,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
if (drm_connector_is_unregistered(connector))
return intel_connector_update_modes(connector, NULL);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
@@ -1386,7 +1373,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
}
static int
-intel_dp_mst_connector_late_register(struct drm_connector *connector)
+mst_connector_late_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
int ret;
@@ -1405,7 +1392,7 @@ intel_dp_mst_connector_late_register(struct drm_connector *connector)
}
static void
-intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+mst_connector_early_unregister(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1414,35 +1401,36 @@ intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
intel_connector->port);
}
-static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+static const struct drm_connector_funcs mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_dp_mst_connector_late_register,
- .early_unregister = intel_dp_mst_connector_early_unregister,
+ .late_register = mst_connector_late_register,
+ .early_unregister = mst_connector_early_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_dp_mst_get_modes(struct drm_connector *connector)
+static int mst_connector_get_modes(struct drm_connector *connector)
{
- return intel_dp_mst_get_ddc_modes(connector);
+ return mst_connector_get_ddc_modes(connector);
}
static int
-intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct drm_modeset_acquire_ctx *ctx,
- enum drm_mode_status *status)
+mst_connector_mode_valid_ctx(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_port *port = intel_connector->port;
const int min_bpp = 18;
- int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
int ret;
bool dsc = false;
@@ -1512,7 +1500,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (drm_dp_sink_supports_fec(intel_connector->dp.fec_capability)) {
dsc_max_compressed_bpp =
- intel_dp_dsc_get_max_compressed_bpp(dev_priv,
+ intel_dp_dsc_get_max_compressed_bpp(display,
max_link_clock,
max_lanes,
target_clock,
@@ -1530,7 +1518,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- if (intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes) && !dsc) {
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
@@ -1544,8 +1532,9 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
-static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
- struct drm_atomic_state *state)
+static struct drm_encoder *
+mst_connector_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_atomic_state *state)
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
connector);
@@ -1557,20 +1546,20 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
}
static int
-intel_dp_mst_detect(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx, bool force)
+mst_connector_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
intel_dp_flush_connector_commits(intel_connector);
@@ -1579,15 +1568,15 @@ intel_dp_mst_detect(struct drm_connector *connector,
intel_connector->port);
}
-static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
- .get_modes = intel_dp_mst_get_modes,
- .mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
- .atomic_best_encoder = intel_mst_atomic_best_encoder,
- .atomic_check = intel_dp_mst_atomic_check,
- .detect_ctx = intel_dp_mst_detect,
+static const struct drm_connector_helper_funcs mst_connector_helper_funcs = {
+ .get_modes = mst_connector_get_modes,
+ .mode_valid_ctx = mst_connector_mode_valid_ctx,
+ .atomic_best_encoder = mst_connector_atomic_best_encoder,
+ .atomic_check = mst_connector_atomic_check,
+ .detect_ctx = mst_connector_detect_ctx,
};
-static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void mst_stream_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
@@ -1595,31 +1584,32 @@ static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_mst);
}
-static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
- .destroy = intel_dp_mst_encoder_destroy,
+static const struct drm_encoder_funcs mst_stream_encoder_funcs = {
+ .destroy = mst_stream_encoder_destroy,
};
-static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+static bool mst_connector_get_hw_state(struct intel_connector *connector)
{
- if (intel_attached_encoder(connector) && connector->base.state->crtc) {
- enum pipe pipe;
- if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
- return false;
- return true;
- }
- return false;
+ /* This is the MST stream encoder set in ->pre_enable, if any */
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ enum pipe pipe;
+
+ if (!encoder || !connector->base.state->crtc)
+ return false;
+
+ return encoder->get_hw_state(encoder, &pipe);
}
-static int intel_dp_mst_add_properties(struct intel_dp *intel_dp,
- struct drm_connector *connector,
- const char *pathprop)
+static int mst_topology_add_connector_properties(struct intel_dp *intel_dp,
+ struct drm_connector *connector,
+ const char *pathprop)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(intel_dp);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.path_property, 0);
+ display->drm->mode_config.path_property, 0);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tile_property, 0);
+ display->drm->mode_config.tile_property, 0);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
@@ -1653,7 +1643,7 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux;
struct drm_dp_desc desc;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -1691,21 +1681,21 @@ static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *conn
!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
return false;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
connector->base.base.id, connector->base.name);
return true;
}
-static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- const char *pathprop)
+static struct drm_connector *
+mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector;
struct drm_connector *connector;
enum pipe pipe;
@@ -1715,7 +1705,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (!intel_connector)
return NULL;
- intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ connector = &intel_connector->base;
+
+ intel_connector->get_hw_state = mst_connector_get_hw_state;
intel_connector->sync_state = intel_dp_connector_sync_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
@@ -1723,23 +1715,22 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_dp_init_modeset_retry_work(intel_connector);
- intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
- intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
- intel_connector->dp.dsc_hblank_expansion_quirk =
- detect_dsc_hblank_expansion_quirk(intel_connector);
-
- connector = &intel_connector->base;
- ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_dynamic_init(display->drm, connector, &mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort, NULL);
if (ret) {
drm_dp_mst_put_port_malloc(port);
intel_connector_free(intel_connector);
return NULL;
}
- drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+ intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
+ intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
+ intel_connector->dp.dsc_hblank_expansion_quirk =
+ detect_dsc_hblank_expansion_quirk(intel_connector);
+
+ drm_connector_helper_add(connector, &mst_connector_helper_funcs);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
@@ -1748,13 +1739,13 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
goto err;
}
- ret = intel_dp_mst_add_properties(intel_dp, connector, pathprop);
+ ret = mst_topology_add_connector_properties(intel_dp, connector, pathprop);
if (ret)
goto err;
ret = intel_dp_hdcp_init(dig_port, intel_connector);
if (ret)
- drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+ drm_dbg_kms(display->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
connector->name, connector->base.id);
return connector;
@@ -1765,24 +1756,26 @@ err:
}
static void
-intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
+mst_topology_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
}
-static const struct drm_dp_mst_topology_cbs mst_cbs = {
- .add_connector = intel_dp_add_mst_connector,
- .poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
+static const struct drm_dp_mst_topology_cbs mst_topology_cbs = {
+ .add_connector = mst_topology_add_connector,
+ .poll_hpd_irq = mst_topology_poll_hpd_irq,
};
+/* Create a fake encoder for an individual MST stream */
static struct intel_dp_mst_encoder *
-intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
+mst_stream_encoder_create(struct intel_digital_port *dig_port, enum pipe pipe)
{
+ struct intel_display *display = to_intel_display(dig_port);
+ struct intel_encoder *primary_encoder = &dig_port->base;
struct intel_dp_mst_encoder *intel_mst;
- struct intel_encoder *intel_encoder;
- struct drm_device *dev = dig_port->base.base.dev;
+ struct intel_encoder *encoder;
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
@@ -1790,16 +1783,16 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
return NULL;
intel_mst->pipe = pipe;
- intel_encoder = &intel_mst->base;
+ encoder = &intel_mst->base;
intel_mst->primary = dig_port;
- drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &mst_stream_encoder_funcs,
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
- intel_encoder->type = INTEL_OUTPUT_DP_MST;
- intel_encoder->power_domain = dig_port->base.power_domain;
- intel_encoder->port = dig_port->base.port;
- intel_encoder->cloneable = 0;
+ encoder->type = INTEL_OUTPUT_DP_MST;
+ encoder->power_domain = primary_encoder->power_domain;
+ encoder->port = primary_encoder->port;
+ encoder->cloneable = 0;
/*
* This is wrong, but broken userspace uses the intersection
* of possible_crtcs of all the encoders of a given connector
@@ -1808,36 +1801,37 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
* To keep such userspace functioning we must misconfigure
* this to make sure the intersection is not empty :(
*/
- intel_encoder->pipe_mask = ~0;
-
- intel_encoder->compute_config = intel_dp_mst_compute_config;
- intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
- intel_encoder->disable = intel_mst_disable_dp;
- intel_encoder->post_disable = intel_mst_post_disable_dp;
- intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
- intel_encoder->update_pipe = intel_ddi_update_pipe;
- intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
- intel_encoder->pre_enable = intel_mst_pre_enable_dp;
- intel_encoder->enable = intel_mst_enable_dp;
- intel_encoder->audio_enable = intel_audio_codec_enable;
- intel_encoder->audio_disable = intel_audio_codec_disable;
- intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
- intel_encoder->get_config = intel_dp_mst_enc_get_config;
- intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
+ encoder->pipe_mask = ~0;
+
+ encoder->compute_config = mst_stream_compute_config;
+ encoder->compute_config_late = mst_stream_compute_config_late;
+ encoder->disable = mst_stream_disable;
+ encoder->post_disable = mst_stream_post_disable;
+ encoder->post_pll_disable = mst_stream_post_pll_disable;
+ encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->pre_pll_enable = mst_stream_pre_pll_enable;
+ encoder->pre_enable = mst_stream_pre_enable;
+ encoder->enable = mst_stream_enable;
+ encoder->audio_enable = intel_audio_codec_enable;
+ encoder->audio_disable = intel_audio_codec_disable;
+ encoder->get_hw_state = mst_stream_get_hw_state;
+ encoder->get_config = mst_stream_get_config;
+ encoder->initial_fastset_check = mst_stream_initial_fastset_check;
return intel_mst;
}
+/* Create the fake encoders for MST streams */
static bool
-intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
+mst_stream_encoders_create(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
- intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
+ for_each_pipe(display, pipe)
+ intel_dp->mst_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe);
return true;
}
@@ -1850,25 +1844,25 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
enum port port = dig_port->base.port;
int ret;
- if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ if (!HAS_DP_MST(display) || intel_dp_is_edp(intel_dp))
return 0;
- if (DISPLAY_VER(i915) < 12 && port == PORT_A)
+ if (DISPLAY_VER(display) < 12 && port == PORT_A)
return 0;
- if (DISPLAY_VER(i915) < 11 && port == PORT_E)
+ if (DISPLAY_VER(display) < 11 && port == PORT_E)
return 0;
- intel_dp->mst_mgr.cbs = &mst_cbs;
+ intel_dp->mst_mgr.cbs = &mst_topology_cbs;
/* create encoders */
- intel_dp_create_fake_mst_encoders(dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
+ mst_stream_encoders_create(dig_port);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm,
&intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->mst_mgr.cbs = NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 8343804ce3f8..c6bdc1d190a4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_connector_state;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -30,4 +31,10 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp);
bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
+int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp, int min_bpp,
+ struct drm_connector_state *conn_state,
+ int step, bool dsc);
+
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index e05819300d77..380b359b0420 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -8,7 +8,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_de.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 94198bc04939..589872babdd7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -3,11 +3,10 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
-
#include <drm/display/drm_dp_tunnel.h>
#include "intel_atomic.h"
+#include "intel_display_core.h"
#include "intel_display_limits.h"
#include "intel_display_types.h"
#include "intel_dp.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 0f12f2c3467c..52a36a2281e6 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -22,6 +22,7 @@
*/
#include "bxt_dpio_phy_regs.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -855,6 +856,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -871,7 +873,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
dig_port->release_cl2_override =
- !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+ !chv_phy_powergate_ch(display, DPIO_PHY0, DPIO_CH1, true);
chv_phy_powergate_lanes(encoder, true, lane_mask);
@@ -1013,11 +1015,11 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (dig_port->release_cl2_override) {
- chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+ chv_phy_powergate_ch(display, DPIO_PHY0, DPIO_CH1, false);
dig_port->release_cl2_override = false;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 198ceda790d2..3256b1293f7f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/string_helpers.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index e60497bb8a94..d86cc9ffd4ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -25,6 +25,7 @@
#include <linux/string_helpers.h>
#include "bxt_dpio_phy_regs.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index ce8c76e44e6a..8b1f0e92a11c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -205,7 +205,7 @@ void intel_dpt_resume(struct drm_i915_private *i915)
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
- i915_ggtt_resume_vm(fb->dpt_vm);
+ i915_ggtt_resume_vm(fb->dpt_vm, true);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
}
@@ -233,7 +233,7 @@ void intel_dpt_suspend(struct drm_i915_private *i915)
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
- i915_ggtt_suspend_vm(fb->dpt_vm);
+ i915_ggtt_suspend_vm(fb->dpt_vm, true);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
index 573f72068899..d2dede0a5229 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index bb39eb96e812..0fec01b79b23 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -68,7 +68,9 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
enum transcoder cpu_transcoder)
{
- if (HAS_DOUBLE_BUFFERED_M_N(i915))
+ struct intel_display *display = &i915->display;
+
+ if (HAS_DOUBLE_BUFFERED_M_N(display))
return true;
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 4d3785f5cb52..e6f8fc743fb4 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -256,15 +256,6 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
}
-static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg)
-{
- /* only full byte-enables can be converted to indexed writes */
- return intel_dsb_prev_ins_is_write(dsb,
- DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT |
- DSB_BYTE_EN << DSB_BYTE_EN_SHIFT,
- reg);
-}
-
static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
{
return intel_dsb_prev_ins_is_write(dsb,
@@ -273,7 +264,7 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_
}
/**
- * intel_dsb_reg_write_indexed() - Emit register wriite to the DSB context
+ * intel_dsb_reg_write_indexed() - Emit indexed register write to the DSB context
* @dsb: DSB context
* @reg: register address.
* @val: value.
@@ -304,44 +295,23 @@ void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
* we are writing odd no of dwords, Zeros will be added in the end for
* padding.
*/
- if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) &&
- !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) {
- intel_dsb_emit(dsb, val,
- (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
- (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ if (!intel_dsb_prev_ins_is_indexed_write(dsb, reg))
+ intel_dsb_emit(dsb, 0, /* count */
+ (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
i915_mmio_reg_offset(reg));
- } else {
- if (!assert_dsb_has_room(dsb))
- return;
-
- /* convert to indexed write? */
- if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
- u32 prev_val = dsb->ins[0];
- dsb->ins[0] = 1; /* count */
- dsb->ins[1] = (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
- i915_mmio_reg_offset(reg);
-
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
- dsb->ins[0]);
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
- dsb->ins[1]);
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2,
- prev_val);
-
- dsb->free_pos++;
- }
+ if (!assert_dsb_has_room(dsb))
+ return;
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
- /* Update the count */
- dsb->ins[0]++;
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
- dsb->ins[0]);
+ /* Update the count */
+ dsb->ins[0]++;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
+ dsb->ins[0]);
- /* if number of data words is odd, then the last dword should be 0.*/
- if (dsb->free_pos & 0x1)
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
- }
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
}
void intel_dsb_reg_write(struct intel_dsb *dsb,
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index e8129a720210..b2b78f39cfd3 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -745,6 +745,23 @@ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
+static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
+{
+ switch (format) {
+ case PIXEL_FORMAT_RGB888:
+ return MIPI_DSI_FMT_RGB888;
+ case PIXEL_FORMAT_RGB666_LOOSELY_PACKED:
+ return MIPI_DSI_FMT_RGB666;
+ case PIXEL_FORMAT_RGB666:
+ return MIPI_DSI_FMT_RGB666_PACKED;
+ case PIXEL_FORMAT_RGB565:
+ return MIPI_DSI_FMT_RGB565;
+ default:
+ MISSING_CASE(format);
+ return MIPI_DSI_FMT_RGB666;
+ }
+}
+
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -762,8 +779,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format =
- pixel_format_from_register_bits(
- mipi_config->videomode_color_format << 7);
+ vbt_to_dsi_pixel_format(mipi_config->videomode_color_format);
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 2d5ffb37eac9..abf19dfd6d9d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -318,6 +318,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *_connector, bool force)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
@@ -325,10 +326,10 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->base.status;
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
@@ -336,11 +337,11 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
static int intel_dvo_get_modes(struct drm_connector *_connector)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int num_modes;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(&connector->base);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 6a7060889f40..223c4218c019 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1694,7 +1694,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
* arithmetic related to alignment and offset calculation.
*/
if (is_gen12_ccs_cc_plane(&fb->base, i)) {
- if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
+ if (IS_ALIGNED(fb->base.offsets[i], 64))
continue;
else
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 98e1a3606227..37cdfa9c692a 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -7,6 +7,7 @@
#include <drm/drm_fixed.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index cbcd1e91b7be..8a49e2bb37fa 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -75,7 +75,7 @@ intel_atomic_global_state_get(struct intel_global_state *obj_state)
return obj_state;
}
-void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+void intel_atomic_global_obj_init(struct intel_display *display,
struct intel_global_obj *obj,
struct intel_global_state *state,
const struct intel_global_state_funcs *funcs)
@@ -88,26 +88,26 @@ void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
obj->state = state;
obj->funcs = funcs;
- list_add_tail(&obj->head, &dev_priv->display.global.obj_list);
+ list_add_tail(&obj->head, &display->global.obj_list);
}
-void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
+void intel_atomic_global_obj_cleanup(struct intel_display *display)
{
struct intel_global_obj *obj, *next;
- list_for_each_entry_safe(obj, next, &dev_priv->display.global.obj_list, head) {
+ list_for_each_entry_safe(obj, next, &display->global.obj_list, head) {
list_del(&obj->head);
- drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
+ drm_WARN_ON(display->drm, kref_read(&obj->state->ref) != 1);
intel_atomic_global_state_put(obj->state);
}
}
-static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
+static void assert_global_state_write_locked(struct intel_display *display)
{
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
drm_modeset_lock_assert_held(&crtc->base.mutex);
}
@@ -126,23 +126,23 @@ static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
static void assert_global_state_read_locked(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (modeset_lock_is_held(ctx, &crtc->base.mutex))
return;
}
- drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
+ drm_WARN(display->drm, 1, "Global state not read locked\n");
}
struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
struct intel_global_obj *obj)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
int index, num_objs, i;
size_t size;
struct __intel_global_objs_state *arr;
@@ -184,7 +184,7 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
state->num_global_objs = num_objs;
- drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
+ drm_dbg_atomic(display->drm, "Added new global object %p state %p to %p\n",
obj, obj_state, state);
return obj_state;
@@ -218,14 +218,14 @@ intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
void intel_atomic_swap_global_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *old_obj_state, *new_obj_state;
struct intel_global_obj *obj;
int i;
for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
new_obj_state, i) {
- drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
+ drm_WARN_ON(display->drm, obj->state != old_obj_state);
/*
* If the new state wasn't modified (and properly
@@ -234,7 +234,7 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
if (!new_obj_state->changed)
continue;
- assert_global_state_write_locked(dev_priv);
+ assert_global_state_write_locked(display);
old_obj_state->state = state;
new_obj_state->state = NULL;
@@ -265,10 +265,10 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state)
int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
{
struct intel_atomic_state *state = obj_state->state;
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
int ret;
ret = drm_modeset_lock(&crtc->base.mutex,
@@ -298,10 +298,10 @@ int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
bool
intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
if (!intel_atomic_get_new_crtc_state(state, crtc))
return false;
return true;
@@ -344,7 +344,7 @@ intel_atomic_global_state_setup_commit(struct intel_atomic_state *state)
int
intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_global_state *old_obj_state;
struct intel_global_obj *obj;
int i;
@@ -358,7 +358,7 @@ intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state
ret = wait_for_completion_timeout(&commit->done, 10 * HZ);
if (ret == 0) {
- drm_err(&i915->drm, "global state timed out\n");
+ drm_err(display->drm, "global state timed out\n");
return -ETIMEDOUT;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index 6506a8e32972..d42fb2547ee9 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -9,8 +9,8 @@
#include <linux/kref.h>
#include <linux/list.h>
-struct drm_i915_private;
struct intel_atomic_state;
+struct intel_display;
struct intel_global_obj;
struct intel_global_state;
@@ -69,11 +69,11 @@ struct __intel_global_objs_state {
struct intel_global_state *state, *old_state, *new_state;
};
-void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+void intel_atomic_global_obj_init(struct intel_display *display,
struct intel_global_obj *obj,
struct intel_global_state *state,
const struct intel_global_state_funcs *funcs);
-void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv);
+void intel_atomic_global_obj_cleanup(struct intel_display *display);
struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index e3d938c7f83e..807cf606e7a8 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -496,14 +496,13 @@ static int
gmbus_xfer_read(struct intel_display *display, struct i2c_msg *msg,
u32 gmbus0_reg, u32 gmbus1_index)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u8 *buf = msg->buf;
unsigned int rx_size = msg->len;
unsigned int len;
int ret;
do {
- if (HAS_GMBUS_BURST_READ(i915))
+ if (HAS_GMBUS_BURST_READ(display))
len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
else
len = min(rx_size, gmbus_max_xfer_size(display));
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 8fee26d791f4..7464b44c8bb3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -31,27 +31,33 @@
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
-/* WA: 16022217614 */
static void
-intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
- struct intel_hdcp *hdcp)
+intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
+ struct intel_hdcp *hdcp,
+ bool enable)
{
struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t rekey_reg;
+ u32 rekey_bit = 0;
/* Here we assume HDMI is in TMDS mode of operation */
if (encoder->type != INTEL_OUTPUT_HDMI)
return;
- if (DISPLAY_VER(display) >= 14) {
- if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER))
- intel_de_rmw(display, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
- 0, HDCP_LINE_REKEY_DISABLE);
- else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
- IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER))
- intel_de_rmw(display,
- TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder),
- 0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
+ if (DISPLAY_VER(display) >= 30) {
+ rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
+ rekey_bit = XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
+ } else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
+ IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) {
+ rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
+ rekey_bit = TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
+ } else if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) {
+ rekey_reg = CHICKEN_TRANS(display, hdcp->cpu_transcoder);
+ rekey_bit = HDCP_LINE_REKEY_DISABLE;
}
+
+ if (rekey_bit)
+ intel_de_rmw(display, rekey_reg, rekey_bit, enable ? 0 : rekey_bit);
}
static int intel_conn_to_vcpi(struct intel_atomic_state *state,
@@ -343,7 +349,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
/* PG1 (power well #1) needs to be enabled */
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- enabled = intel_display_power_well_is_enabled(i915, id);
+ enabled = intel_display_power_well_is_enabled(display, id);
/*
* Another req for hdcp key loadability is enabled state of pll for
@@ -1048,6 +1054,8 @@ static int intel_hdcp1_enable(struct intel_connector *connector)
return ret;
}
+ intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, true);
+
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
ret = intel_hdcp_auth(connector);
@@ -2075,7 +2083,7 @@ static int _intel_hdcp2_enable(struct intel_atomic_state *state,
connector->base.base.id, connector->base.name,
hdcp->content_type);
- intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp);
+ intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, false);
ret = hdcp2_authenticate_and_encrypt(state, connector);
if (ret) {
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index c6ce6bb88d7c..ed29dd0ccef0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1600,14 +1600,12 @@ static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
- struct intel_display *display = to_intel_display(dig_port);
int retry;
for (retry = 0; retry < 3; retry++)
if (intel_hdmi_hdcp_check_link_once(dig_port, connector))
return true;
- drm_err(display->drm, "Link check failed\n");
return false;
}
@@ -2556,10 +2554,10 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_device_enabled(dev_priv))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return connector->status;
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
@@ -2586,12 +2584,11 @@ static void
intel_hdmi_force(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *i915 = to_i915(connector->dev);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return;
intel_hdmi_unset_edid(connector);
@@ -3042,7 +3039,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
}
}
-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
struct intel_display *display = to_intel_display(dig_port);
@@ -3059,17 +3056,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
intel_encoder->base.base.id, intel_encoder->base.name);
if (DISPLAY_VER(display) < 12 && drm_WARN_ON(dev, port == PORT_A))
- return;
+ return false;
if (drm_WARN(dev, dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
- return;
+ return false;
ddc_pin = intel_hdmi_ddc_pin(intel_encoder);
if (!ddc_pin)
- return;
+ return false;
drm_connector_init_with_ddc(dev, connector,
&intel_hdmi_connector_funcs,
@@ -3114,6 +3111,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
&conn_info);
if (!intel_hdmi->cec_notifier)
drm_dbg_kms(display->drm, "CEC notifier get failed\n");
+
+ return true;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 466f48df8a74..38deaeb302a2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -22,7 +22,7 @@ struct intel_encoder;
struct intel_hdmi;
union hdmi_infoframe;
-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index a013b0e0ef54..3adc791d3776 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -813,8 +813,10 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*/
void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
if (!HAS_DISPLAY(dev_priv) ||
- !intel_display_device_enabled(dev_priv))
+ !intel_display_device_enabled(display))
return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index cb64c6f0ad1b..476ac88087e0 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1457,7 +1457,11 @@ void intel_hpd_enable_detection(struct intel_encoder *encoder)
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display.irq.display_irqs_enabled && i915->display.funcs.hotplug)
+ if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
+ !i915->display.irq.vlv_display_irqs_enabled)
+ return;
+
+ if (i915->display.funcs.hotplug)
i915->display.funcs.hotplug->hpd_irq_setup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index 19d1f196d9fb..fb6b84f6a81d 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -3,7 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_device.h>
+
#include "intel_de.h"
#include "intel_display.h"
#include "intel_hti.h"
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index c87cd1d16d0a..29705c159119 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -5,10 +5,9 @@
#include <drm/drm_fixed.h>
-#include "i915_drv.h"
-
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index b457c69dc0be..86cc03a4413c 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -7,9 +7,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_load_detect.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f9db867fae89..d75dd17fad32 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -29,11 +29,12 @@
#include <drm/drm_edid.h>
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_lspcon.h"
#include "intel_hdmi.h"
+#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 6d7637ad980a..6ffd55c17445 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -57,12 +57,7 @@
/* Private structure for the integrated LVDS support */
struct intel_lvds_pps {
- /* 100us units */
- int t1_t2;
- int t3;
- int t4;
- int t5;
- int tx;
+ struct intel_pps_delays delays;
int divider;
@@ -168,12 +163,12 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
- pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
- pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
+ pps->delays.power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
+ pps->delays.backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
val = intel_de_read(dev_priv, PP_OFF_DELAYS(dev_priv, 0));
- pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
- pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
+ pps->delays.power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
+ pps->delays.backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
val = intel_de_read(dev_priv, PP_DIVISOR(dev_priv, 0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
@@ -186,25 +181,30 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
if (val)
val--;
/* Convert from 100ms to 100us units */
- pps->t4 = val * 1000;
+ pps->delays.power_cycle = val * 1000;
if (DISPLAY_VER(dev_priv) < 5 &&
- pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
+ pps->delays.power_up == 0 &&
+ pps->delays.backlight_on == 0 &&
+ pps->delays.power_down == 0 &&
+ pps->delays.backlight_off == 0) {
drm_dbg_kms(&dev_priv->drm,
"Panel power timings uninitialized, "
"setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
- pps->t1_t2 = 40 * 10;
- pps->t5 = 200 * 10;
+ pps->delays.power_up = 40 * 10;
+ pps->delays.backlight_on = 200 * 10;
/* Set T3 to 35ms and Tx to 200ms in 100 usec units */
- pps->t3 = 35 * 10;
- pps->tx = 200 * 10;
+ pps->delays.power_down = 35 * 10;
+ pps->delays.backlight_off = 200 * 10;
}
- drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ drm_dbg(&dev_priv->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
"divider %d port %d powerdown_on_reset %d\n",
- pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
- pps->divider, pps->port, pps->powerdown_on_reset);
+ pps->delays.power_up, pps->delays.power_down,
+ pps->delays.power_cycle, pps->delays.backlight_on,
+ pps->delays.backlight_off, pps->divider,
+ pps->port, pps->powerdown_on_reset);
}
static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
@@ -221,16 +221,17 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, PP_ON_DELAYS(dev_priv, 0),
REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
- REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->delays.power_up) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->delays.backlight_on));
intel_de_write(dev_priv, PP_OFF_DELAYS(dev_priv, 0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
- REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->delays.power_down) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->delays.backlight_off));
intel_de_write(dev_priv, PP_DIVISOR(dev_priv, 0),
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
- REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(pps->delays.power_cycle, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 2c8668b1ebae..9a2bea19f17b 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -116,6 +116,7 @@ static void set_encoder_for_connector(struct intel_connector *connector,
static void reset_encoder_connector_state(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_pmdemand_state *pmdemand_state =
to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
@@ -128,7 +129,7 @@ static void reset_encoder_connector_state(struct intel_encoder *encoder)
continue;
/* Clear the corresponding bit in pmdemand active phys mask */
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state, false);
set_encoder_for_connector(connector, NULL);
@@ -152,6 +153,7 @@ static void reset_crtc_encoder_state(struct intel_crtc *crtc)
static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(i915->display.bw.obj.state);
@@ -185,7 +187,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
- intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe, 0);
+ intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, 0);
}
/*
@@ -582,6 +584,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -613,7 +616,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->base.name);
/* Clear the corresponding bit in pmdemand active phys mask */
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state, false);
/*
@@ -770,11 +773,11 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
}
}
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state,
true);
} else {
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state,
false);
@@ -899,13 +902,13 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
cdclk_state->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
- intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe,
+ intel_pmdemand_update_port_clock(display, pmdemand_state, pipe,
crtc_state->port_clock);
intel_bw_crtc_update(bw_state, crtc_state);
}
- intel_pmdemand_init_pmdemand_params(i915, pmdemand_state);
+ intel_pmdemand_init_pmdemand_params(display, pmdemand_state);
}
static void
@@ -1024,5 +1027,5 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
- intel_power_domains_sanitize_state(i915);
+ intel_power_domains_sanitize_state(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 2ec14096ba9c..ca30fff61876 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -183,7 +183,7 @@ struct overlay_registers {
};
struct intel_overlay {
- struct drm_i915_private *i915;
+ struct intel_display *display;
struct intel_context *context;
struct intel_crtc *crtc;
struct i915_vma *vma;
@@ -205,17 +205,17 @@ struct intel_overlay {
void (*flip_complete)(struct intel_overlay *ovl);
};
-static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
+static void i830_overlay_clock_gating(struct intel_display *display,
bool enable)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u8 val;
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0);
+ intel_de_write(display, DSPCLK_GATE_D(display), 0);
else
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_write(display, DSPCLK_GATE_D(display),
OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
@@ -253,11 +253,11 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 *cs;
- drm_WARN_ON(&dev_priv->drm, overlay->active);
+ drm_WARN_ON(display->drm, overlay->active);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -271,8 +271,8 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = true;
- if (IS_I830(dev_priv))
- i830_overlay_clock_gating(dev_priv, false);
+ if (display->platform.i830)
+ i830_overlay_clock_gating(display, false);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
*cs++ = overlay->flip_addr | OFC_UPDATE;
@@ -288,10 +288,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *frontbuffer = NULL;
- drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
+ drm_WARN_ON(display->drm, overlay->old_vma);
if (vma)
frontbuffer = intel_frontbuffer_get(intel_bo_to_drm_bo(vma->obj));
@@ -303,8 +305,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
- intel_frontbuffer_flip_prepare(overlay->i915,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_flip_prepare(i915, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
if (vma)
@@ -318,20 +319,20 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct i915_vma *vma,
bool load_polyphase_filter)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 flip_addr = overlay->flip_addr;
u32 tmp, *cs;
- drm_WARN_ON(&dev_priv->drm, !overlay->active);
+ drm_WARN_ON(display->drm, !overlay->active);
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
/* check for underruns */
- tmp = intel_de_read(dev_priv, DOVSTA);
+ tmp = intel_de_read(display, DOVSTA);
if (tmp & (1 << 17))
- drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
+ drm_dbg(display->drm, "overlay underrun, DOVSTA: %x\n", tmp);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -355,14 +356,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
{
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
- if (drm_WARN_ON(&overlay->i915->drm, !vma))
+ if (drm_WARN_ON(display->drm, !vma))
return;
- intel_frontbuffer_flip_complete(overlay->i915,
- INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip_complete(i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_vma_unpin(vma);
i915_vma_put(vma);
@@ -376,7 +378,7 @@ intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
intel_overlay_release_old_vma(overlay);
@@ -384,8 +386,8 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
overlay->crtc = NULL;
overlay->active = false;
- if (IS_I830(dev_priv))
- i830_overlay_clock_gating(dev_priv, true);
+ if (display->platform.i830)
+ i830_overlay_clock_gating(display, true);
}
static void intel_overlay_last_flip_retire(struct i915_active *active)
@@ -400,10 +402,11 @@ static void intel_overlay_last_flip_retire(struct i915_active *active)
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 *cs, flip_addr = overlay->flip_addr;
- drm_WARN_ON(&overlay->i915->drm, !overlay->active);
+ drm_WARN_ON(display->drm, !overlay->active);
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
@@ -452,7 +455,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
*/
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 *cs;
@@ -463,7 +466,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (!overlay->old_vma)
return 0;
- if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ if (!(intel_de_read(display, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -487,9 +490,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return i915_active_wait(&overlay->last_flip);
}
-void intel_overlay_reset(struct drm_i915_private *dev_priv)
+void intel_overlay_reset(struct intel_display *display)
{
- struct intel_overlay *overlay = dev_priv->display.overlay;
+ struct intel_overlay *overlay = display->overlay;
if (!overlay)
return;
@@ -550,11 +553,11 @@ static int uv_vsubsampling(u32 format)
}
}
-static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
+static u32 calc_swidthsw(struct intel_display *display, u32 offset, u32 width)
{
u32 sw;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
sw = ALIGN((offset & 31) + width, 32);
else
sw = ALIGN((offset & 63) + width, 64);
@@ -789,16 +792,17 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct drm_intel_overlay_put_image *params)
{
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct overlay_registers __iomem *regs = overlay->regs;
- struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
bool scale_changed = false;
struct i915_vma *vma;
int ret, tmp_width;
- drm_WARN_ON(&dev_priv->drm,
- !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -824,7 +828,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig |= OCONF_CC_OUT_8BIT;
if (crtc_state->gamma_enable)
oconfig |= OCONF_GAMMA2_ENABLE;
- if (DISPLAY_VER(dev_priv) == 4)
+ if (DISPLAY_VER(display) == 4)
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -845,7 +849,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
tmp_width = params->src_width;
swidth = params->src_width;
- swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
+ swidthsw = calc_swidthsw(display, params->offset_Y, tmp_width);
sheight = params->src_height;
iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
@@ -858,9 +862,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth |= (params->src_width / uv_hscale) << 16;
sheight |= (params->src_height / uv_vscale) << 16;
- tmp_U = calc_swidthsw(dev_priv, params->offset_U,
+ tmp_U = calc_swidthsw(display, params->offset_U,
params->src_width / uv_hscale);
- tmp_V = calc_swidthsw(dev_priv, params->offset_V,
+ tmp_V = calc_swidthsw(display, params->offset_V,
params->src_width / uv_hscale);
swidthsw |= max(tmp_U, tmp_V) << 16;
@@ -899,11 +903,11 @@ out_pin_section:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
int ret;
- drm_WARN_ON(&dev_priv->drm,
- !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -936,26 +940,24 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
* line with the intel documentation for the i965
*/
- if (DISPLAY_VER(dev_priv) >= 4) {
- u32 tmp = intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv));
+ if (DISPLAY_VER(display) >= 4) {
+ u32 tmp = intel_de_read(display, PFIT_PGM_RATIOS(display));
/* on i965 use the PGM reg to read out the autoscaler values */
ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK_965, tmp);
} else {
u32 tmp;
- if (intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_VERT_AUTO_SCALE)
- tmp = intel_de_read(dev_priv,
- PFIT_AUTO_RATIOS(dev_priv));
+ if (intel_de_read(display, PFIT_CONTROL(display)) & PFIT_VERT_AUTO_SCALE)
+ tmp = intel_de_read(display, PFIT_AUTO_RATIOS(display));
else
- tmp = intel_de_read(dev_priv,
- PFIT_PGM_RATIOS(dev_priv));
+ tmp = intel_de_read(display, PFIT_PGM_RATIOS(display));
ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK, tmp);
}
@@ -1000,7 +1002,7 @@ static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
return 0;
}
-static int check_overlay_src(struct drm_i915_private *dev_priv,
+static int check_overlay_src(struct intel_display *display,
struct drm_intel_overlay_put_image *rec,
struct drm_i915_gem_object *new_bo)
{
@@ -1011,7 +1013,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
u32 tmp;
/* check src dimensions */
- if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
+ if (display->platform.i845g || display->platform.i830) {
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
@@ -1063,14 +1065,14 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
return -EINVAL;
/* stride checking */
- if (IS_I830(dev_priv) || IS_I845G(dev_priv))
+ if (display->platform.i830 || display->platform.i845g)
stride_mask = 255;
else
stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (DISPLAY_VER(dev_priv) == 4 && rec->stride_Y < 512)
+ if (DISPLAY_VER(display) == 4 && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1114,17 +1116,17 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_intel_overlay_put_image *params = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
struct drm_i915_gem_object *new_bo;
int ret;
- overlay = dev_priv->display.overlay;
+ overlay = display->overlay;
if (!overlay) {
- drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
+ drm_dbg(display->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1148,7 +1150,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
if (i915_gem_object_is_tiled(new_bo)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
@@ -1197,7 +1199,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = check_overlay_src(dev_priv, params, new_bo);
+ ret = check_overlay_src(display, params, new_bo);
if (ret != 0)
goto out_unlock;
@@ -1277,14 +1279,14 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_intel_overlay_attrs *attrs = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
int ret;
- overlay = dev_priv->display.overlay;
+ overlay = display->overlay;
if (!overlay) {
- drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
+ drm_dbg(display->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1297,13 +1299,13 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (DISPLAY_VER(dev_priv) != 2) {
- attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
- attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
- attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
- attrs->gamma3 = intel_de_read(dev_priv, OGAMC3);
- attrs->gamma4 = intel_de_read(dev_priv, OGAMC4);
- attrs->gamma5 = intel_de_read(dev_priv, OGAMC5);
+ if (DISPLAY_VER(display) != 2) {
+ attrs->gamma0 = intel_de_read(display, OGAMC0);
+ attrs->gamma1 = intel_de_read(display, OGAMC1);
+ attrs->gamma2 = intel_de_read(display, OGAMC2);
+ attrs->gamma3 = intel_de_read(display, OGAMC3);
+ attrs->gamma4 = intel_de_read(display, OGAMC4);
+ attrs->gamma5 = intel_de_read(display, OGAMC5);
}
} else {
if (attrs->brightness < -128 || attrs->brightness > 127)
@@ -1321,7 +1323,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
goto out_unlock;
if (overlay->active) {
@@ -1333,12 +1335,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out_unlock;
- intel_de_write(dev_priv, OGAMC0, attrs->gamma0);
- intel_de_write(dev_priv, OGAMC1, attrs->gamma1);
- intel_de_write(dev_priv, OGAMC2, attrs->gamma2);
- intel_de_write(dev_priv, OGAMC3, attrs->gamma3);
- intel_de_write(dev_priv, OGAMC4, attrs->gamma4);
- intel_de_write(dev_priv, OGAMC5, attrs->gamma5);
+ intel_de_write(display, OGAMC0, attrs->gamma0);
+ intel_de_write(display, OGAMC1, attrs->gamma1);
+ intel_de_write(display, OGAMC2, attrs->gamma2);
+ intel_de_write(display, OGAMC3, attrs->gamma3);
+ intel_de_write(display, OGAMC4, attrs->gamma4);
+ intel_de_write(display, OGAMC5, attrs->gamma5);
}
}
overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
@@ -1352,12 +1354,13 @@ out_unlock:
static int get_registers(struct intel_overlay *overlay, bool use_phys)
{
- struct drm_i915_private *i915 = overlay->i915;
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_i915_gem_object *obj = ERR_PTR(-ENODEV);
struct i915_vma *vma;
int err;
- if (!IS_METEORLAKE(i915)) /* Wa_22018444074 */
+ if (!display->platform.meteorlake) /* Wa_22018444074 */
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -1390,13 +1393,14 @@ err_put_bo:
return err;
}
-void intel_overlay_setup(struct drm_i915_private *dev_priv)
+void intel_overlay_setup(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_overlay *overlay;
struct intel_engine_cs *engine;
int ret;
- if (!HAS_OVERLAY(dev_priv))
+ if (!HAS_OVERLAY(display))
return;
engine = to_gt(dev_priv)->engine[RCS0];
@@ -1407,7 +1411,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!overlay)
return;
- overlay->i915 = dev_priv;
+ overlay->display = display;
overlay->context = engine->kernel_context;
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
@@ -1418,7 +1422,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
i915_active_init(&overlay->last_flip,
NULL, intel_overlay_last_flip_retire, 0);
- ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+ ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(display));
if (ret)
goto out_free;
@@ -1426,19 +1430,24 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
- dev_priv->display.overlay = overlay;
- drm_info(&dev_priv->drm, "Initialized overlay support.\n");
+ display->overlay = overlay;
+ drm_info(display->drm, "Initialized overlay support.\n");
return;
out_free:
kfree(overlay);
}
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
+bool intel_overlay_available(struct intel_display *display)
+{
+ return display->overlay;
+}
+
+void intel_overlay_cleanup(struct intel_display *display)
{
struct intel_overlay *overlay;
- overlay = fetch_and_zero(&dev_priv->display.overlay);
+ overlay = fetch_and_zero(&display->overlay);
if (!overlay)
return;
@@ -1447,7 +1456,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
* Furthermore modesetting teardown happens beforehand so the
* hardware should be off already.
*/
- drm_WARN_ON(&dev_priv->drm, overlay->active);
+ drm_WARN_ON(display->drm, overlay->active);
i915_gem_object_put(overlay->reg_bo);
i915_active_fini(&overlay->last_flip);
@@ -1467,8 +1476,7 @@ struct intel_overlay_snapshot {
struct intel_overlay_snapshot *
intel_overlay_snapshot_capture(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_overlay *overlay = dev_priv->display.overlay;
+ struct intel_overlay *overlay = display->overlay;
struct intel_overlay_snapshot *error;
if (!overlay || !overlay->active)
@@ -1478,8 +1486,8 @@ intel_overlay_snapshot_capture(struct intel_display *display)
if (error == NULL)
return NULL;
- error->dovsta = intel_de_read(dev_priv, DOVSTA);
- error->isr = intel_de_read(dev_priv, GEN2_ISR);
+ error->dovsta = intel_de_read(display, DOVSTA);
+ error->isr = intel_de_read(display, GEN2_ISR);
error->base = overlay->flip_addr;
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
index eafac24d1de8..45a42fce754e 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.h
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
@@ -17,19 +17,24 @@ struct intel_overlay;
struct intel_overlay_snapshot;
#ifdef I915
-void intel_overlay_setup(struct drm_i915_private *dev_priv);
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
+void intel_overlay_setup(struct intel_display *display);
+bool intel_overlay_available(struct intel_display *display);
+void intel_overlay_cleanup(struct intel_display *display);
int intel_overlay_switch_off(struct intel_overlay *overlay);
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void intel_overlay_reset(struct drm_i915_private *dev_priv);
+void intel_overlay_reset(struct intel_display *display);
#else
-static inline void intel_overlay_setup(struct drm_i915_private *dev_priv)
+static inline void intel_overlay_setup(struct intel_display *display)
{
}
-static inline void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
+static inline bool intel_overlay_available(struct intel_display *display)
+{
+ return false;
+}
+static inline void intel_overlay_cleanup(struct intel_display *display)
{
}
static inline int intel_overlay_switch_off(struct intel_overlay *overlay)
@@ -37,7 +42,7 @@ static inline int intel_overlay_switch_off(struct intel_overlay *overlay)
return 0;
}
static inline int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
return 0;
}
@@ -46,7 +51,7 @@ static inline int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
{
return 0;
}
-static inline void intel_overlay_reset(struct drm_i915_private *dev_priv)
+static inline void intel_overlay_reset(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 313bd3f35ace..4e6c5592c7ae 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -33,7 +33,6 @@
#include <drm/drm_edid.h>
-#include "i915_drv.h"
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_display_core.h"
@@ -383,12 +382,12 @@ void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
return connector_status_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 4210de87a0a2..8fa5a6334d10 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -4,8 +4,10 @@
*/
#include "g4x_dp.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crt.h"
+#include "intel_crt_regs.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 84c55971e91a..71471c1d7dc9 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -108,13 +109,13 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
struct iclkip_params {
@@ -195,7 +196,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
@@ -218,7 +219,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
temp &= ~SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
/* Wait for initialization time */
udelay(24);
@@ -236,11 +237,11 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
iclkip_params_init(&p);
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
if (temp & SBI_SSCCTL_DISABLE) {
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
return 0;
}
@@ -254,7 +255,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
@@ -279,7 +280,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
@@ -302,7 +303,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
/* Sequence to disable CLKOUT_DP */
@@ -310,7 +311,7 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
u32 reg, tmp;
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
@@ -328,7 +329,7 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
}
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
#define BEND_IDX(steps) ((50 + (steps)) / 5)
@@ -374,7 +375,7 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
if (steps % 10 != 0)
tmp = 0xAAAAAAAB;
@@ -387,7 +388,7 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
tmp |= sscdivintphase[idx];
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
#undef BEND_IDX
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 50861aa78a89..4ee03d9d14ad 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -3,8 +3,8 @@
* Copyright © 2024 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 304da826dee1..90efc6f64e52 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -28,6 +28,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_atomic.h"
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 62401f6a04e4..6789b7f14095 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -20,10 +20,10 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
struct drm_framebuffer **fb,
struct i915_vma **vma)
{
- struct drm_i915_private *i915 = to_i915(this->base.dev);
+ struct intel_display *display = to_intel_display(this);
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
@@ -48,9 +48,10 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
}
static bool
-initial_plane_phys_lmem(struct drm_i915_private *i915,
+initial_plane_phys_lmem(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
struct intel_memory_region *mem;
dma_addr_t dma_addr;
@@ -63,7 +64,7 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
pte = ioread64(gte);
if (!(pte & GEN12_GGTT_PTE_LM)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Initial plane programming missing PTE_LM bit\n");
return false;
}
@@ -75,7 +76,7 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
else
mem = i915->mm.stolen_region;
if (!mem) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Initial plane memory region not initialized\n");
return false;
}
@@ -85,13 +86,13 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
* ever be placed in the stolen portion.
*/
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
&dma_addr, mem->region.name, &mem->region.start, &mem->region.end);
return false;
}
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"Using dma_addr=%pa, based on initial plane programming\n",
&dma_addr);
@@ -102,9 +103,10 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
}
static bool
-initial_plane_phys_smem(struct drm_i915_private *i915,
+initial_plane_phys_smem(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_memory_region *mem;
u32 base;
@@ -112,7 +114,7 @@ initial_plane_phys_smem(struct drm_i915_private *i915,
mem = i915->mm.stolen_region;
if (!mem) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Initial plane memory region not initialized\n");
return false;
}
@@ -125,19 +127,22 @@ initial_plane_phys_smem(struct drm_i915_private *i915,
}
static bool
-initial_plane_phys(struct drm_i915_private *i915,
+initial_plane_phys(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
- return initial_plane_phys_lmem(i915, plane_config);
+ return initial_plane_phys_lmem(display, plane_config);
else
- return initial_plane_phys_smem(i915, plane_config);
+ return initial_plane_phys_smem(display, plane_config);
}
static struct i915_vma *
-initial_plane_vma(struct drm_i915_private *i915,
+initial_plane_vma(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct drm_mm_node orig_mm = {};
@@ -149,7 +154,7 @@ initial_plane_vma(struct drm_i915_private *i915,
if (plane_config->size == 0)
return NULL;
- if (!initial_plane_phys(i915, plane_config))
+ if (!initial_plane_phys(display, plane_config))
return NULL;
phys_base = plane_config->phys_base;
@@ -168,7 +173,7 @@ initial_plane_vma(struct drm_i915_private *i915,
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
size * 2 > i915->dsm.usable_size) {
- drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
+ drm_dbg_kms(display->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
}
@@ -176,7 +181,7 @@ initial_plane_vma(struct drm_i915_private *i915,
I915_BO_ALLOC_USER |
I915_BO_PREALLOC);
if (IS_ERR(obj)) {
- drm_dbg_kms(&i915->drm, "Failed to preallocate initial FB in %s\n",
+ drm_dbg_kms(display->drm, "Failed to preallocate initial FB in %s\n",
mem->region.name);
return NULL;
}
@@ -254,7 +259,7 @@ retry:
if (drm_mm_node_allocated(&orig_mm))
drm_mm_remove_node(&orig_mm);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Initial plane fb bound to 0x%x in the ggtt (original 0x%x)\n",
i915_ggtt_offset(vma), plane_config->base);
@@ -271,8 +276,7 @@ static bool
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_framebuffer *fb = &plane_config->fb->base;
struct i915_vma *vma;
@@ -284,13 +288,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
case I915_FORMAT_MOD_4_TILED:
break;
default:
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Unsupported modifier for initial FB: 0x%llx\n",
fb->modifier);
return false;
}
- vma = initial_plane_vma(dev_priv, plane_config);
+ vma = initial_plane_vma(display, plane_config);
if (!vma)
return false;
@@ -303,7 +307,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (intel_framebuffer_init(to_intel_framebuffer(fb),
intel_bo_to_drm_bo(vma->obj), &mode_cmd)) {
- drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ drm_dbg_kms(display->drm, "intel fb init failed\n");
goto err_vma;
}
@@ -410,12 +414,12 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
i915_vma_put(plane_config->vma);
}
-void intel_initial_plane_config(struct drm_i915_private *i915)
+void intel_initial_plane_config(struct intel_display *display)
{
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
@@ -429,7 +433,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
+ display->funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -437,7 +441,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
*/
intel_find_initial_plane_obj(crtc, plane_configs);
- if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config))
intel_crtc_wait_for_next_vblank(crtc);
plane_config_fini(plane_config);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
index 64ab95239cd4..6c6aa717ed21 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.h
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_PLANE_INITIAL_H__
#define __INTEL_PLANE_INITIAL_H__
-struct drm_i915_private;
+struct intel_display;
-void intel_initial_plane_config(struct drm_i915_private *i915);
+void intel_initial_plane_config(struct intel_display *display);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index cdd314956a31..975520322136 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -5,16 +5,50 @@
#include <linux/bitops.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_trace.h"
#include "intel_pmdemand.h"
+#include "intel_step.h"
#include "skl_watermark.h"
+struct pmdemand_params {
+ u16 qclk_gv_bw;
+ u8 voltage_index;
+ u8 qclk_gv_index;
+ u8 active_pipes;
+ u8 active_dbufs; /* pre-Xe3 only */
+ /* Total number of non type C active phys from active_phys_mask */
+ u8 active_phys;
+ u8 plls;
+ u16 cdclk_freq_mhz;
+ /* max from ddi_clocks[] */
+ u16 ddiclk_max;
+ u8 scalers; /* pre-Xe3 only */
+};
+
+struct intel_pmdemand_state {
+ struct intel_global_state base;
+
+ /* Maintain a persistent list of port clocks across all crtcs */
+ int ddi_clocks[I915_MAX_PIPES];
+
+ /* Maintain a persistent list of non type C phys mask */
+ u16 active_combo_phys_mask;
+
+ /* Parameters to be configured in the pmdemand registers */
+ struct pmdemand_params params;
+};
+
+struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_pmdemand_state, base);
+}
+
static struct intel_global_state *
intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
{
@@ -41,10 +75,10 @@ static const struct intel_global_state_funcs intel_pmdemand_funcs = {
static struct intel_pmdemand_state *
intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *pmdemand_state =
intel_atomic_get_global_obj_state(state,
- &i915->display.pmdemand.obj);
+ &display->pmdemand.obj);
if (IS_ERR(pmdemand_state))
return ERR_CAST(pmdemand_state);
@@ -55,10 +89,10 @@ intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
static struct intel_pmdemand_state *
intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *pmdemand_state =
intel_atomic_get_old_global_obj_state(state,
- &i915->display.pmdemand.obj);
+ &display->pmdemand.obj);
if (!pmdemand_state)
return NULL;
@@ -69,10 +103,10 @@ intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
static struct intel_pmdemand_state *
intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *pmdemand_state =
intel_atomic_get_new_global_obj_state(state,
- &i915->display.pmdemand.obj);
+ &display->pmdemand.obj);
if (!pmdemand_state)
return NULL;
@@ -80,7 +114,7 @@ intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
return to_intel_pmdemand_state(pmdemand_state);
}
-int intel_pmdemand_init(struct drm_i915_private *i915)
+int intel_pmdemand_init(struct intel_display *display)
{
struct intel_pmdemand_state *pmdemand_state;
@@ -88,32 +122,32 @@ int intel_pmdemand_init(struct drm_i915_private *i915)
if (!pmdemand_state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj,
+ intel_atomic_global_obj_init(display, &display->pmdemand.obj,
&pmdemand_state->base,
&intel_pmdemand_funcs);
- if (IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0))
+ if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
/* Wa_14016740474 */
- intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
+ intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
return 0;
}
-void intel_pmdemand_init_early(struct drm_i915_private *i915)
+void intel_pmdemand_init_early(struct intel_display *display)
{
- mutex_init(&i915->display.pmdemand.lock);
- init_waitqueue_head(&i915->display.pmdemand.waitqueue);
+ mutex_init(&display->pmdemand.lock);
+ init_waitqueue_head(&display->pmdemand.waitqueue);
}
void
-intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
+intel_pmdemand_update_phys_mask(struct intel_display *display,
struct intel_encoder *encoder,
struct intel_pmdemand_state *pmdemand_state,
bool set_bit)
{
enum phy phy;
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!encoder)
@@ -131,18 +165,18 @@ intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
}
void
-intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
+intel_pmdemand_update_port_clock(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state,
enum pipe pipe, int port_clock)
{
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
pmdemand_state->ddi_clocks[pipe] = port_clock;
}
static void
-intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
+intel_pmdemand_update_max_ddiclk(struct intel_display *display,
struct intel_atomic_state *state,
struct intel_pmdemand_state *pmdemand_state)
{
@@ -152,7 +186,7 @@ intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
int i;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
- intel_pmdemand_update_port_clock(i915, pmdemand_state,
+ intel_pmdemand_update_port_clock(display, pmdemand_state,
crtc->pipe,
new_crtc_state->port_clock);
@@ -163,7 +197,7 @@ intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
}
static void
-intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
+intel_pmdemand_update_connector_phys(struct intel_display *display,
struct intel_atomic_state *state,
struct drm_connector_state *conn_state,
bool set_bit,
@@ -184,12 +218,12 @@ intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
if (!crtc_state->hw.active)
return;
- intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state,
+ intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state,
set_bit);
}
static void
-intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
+intel_pmdemand_update_active_non_tc_phys(struct intel_display *display,
struct intel_atomic_state *state,
struct intel_pmdemand_state *pmdemand_state)
{
@@ -204,12 +238,12 @@ intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
continue;
/* First clear the active phys in the old connector state */
- intel_pmdemand_update_connector_phys(i915, state,
+ intel_pmdemand_update_connector_phys(display, state,
old_conn_state, false,
pmdemand_state);
/* Then set the active phys in new connector state */
- intel_pmdemand_update_connector_phys(i915, state,
+ intel_pmdemand_update_connector_phys(display, state,
new_conn_state, true,
pmdemand_state);
}
@@ -220,7 +254,7 @@ intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
}
static bool
-intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
+intel_pmdemand_encoder_has_tc_phy(struct intel_display *display,
struct intel_encoder *encoder)
{
return encoder && intel_encoder_is_tc(encoder);
@@ -229,7 +263,7 @@ intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
static bool
intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_connector *connector;
@@ -246,8 +280,8 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
continue;
if (old_encoder == new_encoder ||
- (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) &&
- intel_pmdemand_encoder_has_tc_phy(i915, new_encoder)))
+ (intel_pmdemand_encoder_has_tc_phy(display, old_encoder) &&
+ intel_pmdemand_encoder_has_tc_phy(display, new_encoder)))
continue;
return true;
@@ -304,13 +338,13 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state;
const struct intel_cdclk_state *new_cdclk_state;
const struct intel_dbuf_state *new_dbuf_state;
struct intel_pmdemand_state *new_pmdemand_state;
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return 0;
if (!intel_pmdemand_needs_update(state))
@@ -332,14 +366,14 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
if (IS_ERR(new_dbuf_state))
return PTR_ERR(new_dbuf_state);
- if (DISPLAY_VER(i915) < 30) {
+ if (DISPLAY_VER(display) < 30) {
new_pmdemand_state->params.active_dbufs =
min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
new_pmdemand_state->params.active_pipes =
min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
} else {
new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(i915));
+ min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
}
new_cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -351,9 +385,9 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
new_pmdemand_state->params.cdclk_freq_mhz =
DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
- intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state);
+ intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
- intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state);
+ intel_pmdemand_update_active_non_tc_phys(display, state, new_pmdemand_state);
/*
* Active_PLLs starts with 1 because of CDCLK PLL.
@@ -374,36 +408,36 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
return intel_atomic_lock_global_state(&new_pmdemand_state->base);
}
-static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915)
+static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
{
- return !(intel_de_wait_for_clear(i915,
+ return !(intel_de_wait_for_clear(display,
XELPDP_INITIATE_PMDEMAND_REQUEST(1),
XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
- intel_de_wait_for_clear(i915,
+ intel_de_wait_for_clear(display,
GEN12_DCPR_STATUS_1,
XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
}
void
-intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
+intel_pmdemand_init_pmdemand_params(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state)
{
u32 reg1, reg2;
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- mutex_lock(&i915->display.pmdemand.lock);
- if (drm_WARN_ON(&i915->drm,
- !intel_pmdemand_check_prev_transaction(i915))) {
+ mutex_lock(&display->pmdemand.lock);
+ if (drm_WARN_ON(display->drm,
+ !intel_pmdemand_check_prev_transaction(display))) {
memset(&pmdemand_state->params, 0,
sizeof(pmdemand_state->params));
goto unlock;
}
- reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
+ reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
- reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
+ reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
pmdemand_state->params.qclk_gv_bw =
REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
@@ -419,7 +453,7 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
pmdemand_state->params.ddiclk_max =
REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
- if (DISPLAY_VER(i915) >= 30) {
+ if (DISPLAY_VER(display) >= 30) {
pmdemand_state->params.active_pipes =
REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
} else {
@@ -433,49 +467,49 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
}
unlock:
- mutex_unlock(&i915->display.pmdemand.lock);
+ mutex_unlock(&display->pmdemand.lock);
}
-static bool intel_pmdemand_req_complete(struct drm_i915_private *i915)
+static bool intel_pmdemand_req_complete(struct intel_display *display)
{
- return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
+ return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
XELPDP_PMDEMAND_REQ_ENABLE);
}
-static void intel_pmdemand_wait(struct drm_i915_private *i915)
+static void intel_pmdemand_wait(struct intel_display *display)
{
- if (!wait_event_timeout(i915->display.pmdemand.waitqueue,
- intel_pmdemand_req_complete(i915),
+ if (!wait_event_timeout(display->pmdemand.waitqueue,
+ intel_pmdemand_req_complete(display),
msecs_to_jiffies_timeout(10)))
- drm_err(&i915->drm,
+ drm_err(display->drm,
"timed out waiting for Punit PM Demand Response\n");
}
/* Required to be programmed during Display Init Sequences. */
-void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
+void intel_pmdemand_program_dbuf(struct intel_display *display,
u8 dbuf_slices)
{
u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
/* PM Demand only tracks active dbufs on pre-Xe3 platforms */
- if (DISPLAY_VER(i915) >= 30)
+ if (DISPLAY_VER(display) >= 30)
return;
- mutex_lock(&i915->display.pmdemand.lock);
- if (drm_WARN_ON(&i915->drm,
- !intel_pmdemand_check_prev_transaction(i915)))
+ mutex_lock(&display->pmdemand.lock);
+ if (drm_WARN_ON(display->drm,
+ !intel_pmdemand_check_prev_transaction(display)))
goto unlock;
- intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
+ intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
XELPDP_PMDEMAND_DBUFS_MASK,
REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
- intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
+ intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
XELPDP_PMDEMAND_REQ_ENABLE);
- intel_pmdemand_wait(i915);
+ intel_pmdemand_wait(display);
unlock:
- mutex_unlock(&i915->display.pmdemand.lock);
+ mutex_unlock(&display->pmdemand.lock);
}
static void
@@ -535,38 +569,37 @@ intel_pmdemand_update_params(struct intel_display *display,
}
static void
-intel_pmdemand_program_params(struct drm_i915_private *i915,
+intel_pmdemand_program_params(struct intel_display *display,
const struct intel_pmdemand_state *new,
const struct intel_pmdemand_state *old,
bool serialized)
{
- struct intel_display *display = &i915->display;
bool changed = false;
u32 reg1, mod_reg1;
u32 reg2, mod_reg2;
- mutex_lock(&i915->display.pmdemand.lock);
- if (drm_WARN_ON(&i915->drm,
- !intel_pmdemand_check_prev_transaction(i915)))
+ mutex_lock(&display->pmdemand.lock);
+ if (drm_WARN_ON(display->drm,
+ !intel_pmdemand_check_prev_transaction(display)))
goto unlock;
- reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
+ reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
mod_reg1 = reg1;
- reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
+ reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
mod_reg2 = reg2;
intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
serialized);
if (reg1 != mod_reg1) {
- intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
+ intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
mod_reg1);
changed = true;
}
if (reg2 != mod_reg2) {
- intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
mod_reg2);
changed = true;
}
@@ -575,17 +608,17 @@ intel_pmdemand_program_params(struct drm_i915_private *i915,
if (!changed)
goto unlock;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"initate pmdemand request values: (0x%x 0x%x)\n",
mod_reg1, mod_reg2);
- intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
+ intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
XELPDP_PMDEMAND_REQ_ENABLE);
- intel_pmdemand_wait(i915);
+ intel_pmdemand_wait(display);
unlock:
- mutex_unlock(&i915->display.pmdemand.lock);
+ mutex_unlock(&display->pmdemand.lock);
}
static bool
@@ -597,13 +630,13 @@ intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_pmdemand_state *new_pmdemand_state =
intel_atomic_get_new_pmdemand_state(state);
const struct intel_pmdemand_state *old_pmdemand_state =
intel_atomic_get_old_pmdemand_state(state);
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!new_pmdemand_state ||
@@ -613,20 +646,20 @@ void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_pmdemand_state->base.changed);
- intel_pmdemand_program_params(i915, new_pmdemand_state,
+ intel_pmdemand_program_params(display, new_pmdemand_state,
old_pmdemand_state,
intel_atomic_global_state_is_serialized(state));
}
void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_pmdemand_state *new_pmdemand_state =
intel_atomic_get_new_pmdemand_state(state);
const struct intel_pmdemand_state *old_pmdemand_state =
intel_atomic_get_old_pmdemand_state(state);
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!new_pmdemand_state ||
@@ -636,6 +669,6 @@ void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_pmdemand_state->base.changed);
- intel_pmdemand_program_params(i915, new_pmdemand_state, NULL,
+ intel_pmdemand_program_params(display, new_pmdemand_state, NULL,
intel_atomic_global_state_is_serialized(state));
}
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h
index a1c49efdc493..821ef2c4134a 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.h
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h
@@ -6,58 +6,31 @@
#ifndef __INTEL_PMDEMAND_H__
#define __INTEL_PMDEMAND_H__
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
+#include <linux/types.h>
-struct drm_i915_private;
+enum pipe;
struct intel_atomic_state;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
+struct intel_global_state;
struct intel_plane_state;
+struct intel_pmdemand_state;
-struct pmdemand_params {
- u16 qclk_gv_bw;
- u8 voltage_index;
- u8 qclk_gv_index;
- u8 active_pipes;
- u8 active_dbufs; /* pre-Xe3 only */
- /* Total number of non type C active phys from active_phys_mask */
- u8 active_phys;
- u8 plls;
- u16 cdclk_freq_mhz;
- /* max from ddi_clocks[] */
- u16 ddiclk_max;
- u8 scalers; /* pre-Xe3 only */
-};
+struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state);
-struct intel_pmdemand_state {
- struct intel_global_state base;
-
- /* Maintain a persistent list of port clocks across all crtcs */
- int ddi_clocks[I915_MAX_PIPES];
-
- /* Maintain a persistent list of non type C phys mask */
- u16 active_combo_phys_mask;
-
- /* Parameters to be configured in the pmdemand registers */
- struct pmdemand_params params;
-};
-
-#define to_intel_pmdemand_state(global_state) \
- container_of_const((global_state), struct intel_pmdemand_state, base)
-
-void intel_pmdemand_init_early(struct drm_i915_private *i915);
-int intel_pmdemand_init(struct drm_i915_private *i915);
-void intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
+void intel_pmdemand_init_early(struct intel_display *display);
+int intel_pmdemand_init(struct intel_display *display);
+void intel_pmdemand_init_pmdemand_params(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state);
-void intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
+void intel_pmdemand_update_port_clock(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state,
enum pipe pipe, int port_clock);
-void intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
+void intel_pmdemand_update_phys_mask(struct intel_display *display,
struct intel_encoder *encoder,
struct intel_pmdemand_state *pmdemand_state,
bool clear_bit);
-void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
+void intel_pmdemand_program_dbuf(struct intel_display *display,
u8 dbuf_slices);
void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state);
void intel_pmdemand_post_plane_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 093fe37a3983..eb35f0249f2b 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -134,7 +134,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
*/
if (!pll_enabled) {
release_cl_override = display->platform.cherryview &&
- !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+ !chv_phy_powergate_ch(display, phy, ch, true);
if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
drm_err(display->drm,
@@ -163,7 +163,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
vlv_force_pll_off(dev_priv, pipe);
if (release_cl_override)
- chv_phy_powergate_ch(dev_priv, phy, ch, false);
+ chv_phy_powergate_ch(display, phy, ch, false);
}
}
@@ -668,23 +668,24 @@ static void wait_panel_power_cycle(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ktime_t panel_power_on_time;
- s64 panel_power_off_duration;
-
- drm_dbg_kms(display->drm,
- "[ENCODER:%d:%s] %s wait for panel power cycle\n",
- dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(intel_dp));
+ s64 panel_power_off_duration, remaining;
/* take the difference of current time and panel power off time
- * and then make panel wait for t11_t12 if needed. */
+ * and then make panel wait for power_cycle if needed. */
panel_power_on_time = ktime_get_boottime();
panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
+ remaining = max(0, intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power cycle (%lld ms remaining)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(intel_dp), remaining);
+
/* When we disable the VDD override bit last we have to do the manual
* wait. */
- if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
- wait_remaining_ms_from_jiffies(jiffies,
- intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+ if (remaining)
+ wait_remaining_ms_from_jiffies(jiffies, remaining);
wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
}
@@ -1387,10 +1388,10 @@ static void pps_init_timestamps(struct intel_dp *intel_dp)
}
static void
-intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct intel_pps_delays *seq)
{
struct intel_display *display = to_intel_display(intel_dp);
- u32 pp_on, pp_off, pp_ctl;
+ u32 pp_on, pp_off, pp_ctl, power_cycle_delay;
struct pps_registers regs;
intel_pps_get_registers(intel_dp, &regs);
@@ -1405,59 +1406,77 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
pp_off = intel_de_read(display, regs.pp_off);
/* Pull timing values out of registers */
- seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
- seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
- seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
- seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
+ seq->power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
pp_div = intel_de_read(display, regs.pp_div);
- seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
+ power_cycle_delay = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div);
} else {
- seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
+ power_cycle_delay = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl);
}
+
+ /* hardware wants <delay>+1 in 100ms units */
+ seq->power_cycle = power_cycle_delay ? (power_cycle_delay - 1) * 1000 : 0;
}
static void
intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
- const struct edp_power_seq *seq)
+ const struct intel_pps_delays *seq)
{
struct intel_display *display = to_intel_display(intel_dp);
drm_dbg_kms(display->drm,
- "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- state_name,
- seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+ "%s power_up %d backlight_on %d backlight_off %d power_down %d power_cycle %d\n",
+ state_name, seq->power_up, seq->backlight_on,
+ seq->backlight_off, seq->power_down, seq->power_cycle);
}
static void
intel_pps_verify_state(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct edp_power_seq hw;
- struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
+ struct intel_pps_delays hw;
+ struct intel_pps_delays *sw = &intel_dp->pps.pps_delays;
intel_pps_readout_hw_state(intel_dp, &hw);
- if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
- hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ if (hw.power_up != sw->power_up ||
+ hw.backlight_on != sw->backlight_on ||
+ hw.backlight_off != sw->backlight_off ||
+ hw.power_down != sw->power_down ||
+ hw.power_cycle != sw->power_cycle) {
drm_err(display->drm, "PPS state mismatch\n");
intel_pps_dump_state(intel_dp, "sw", sw);
intel_pps_dump_state(intel_dp, "hw", &hw);
}
}
-static bool pps_delays_valid(struct edp_power_seq *delays)
+static bool pps_delays_valid(struct intel_pps_delays *delays)
+{
+ return delays->power_up || delays->backlight_on || delays->backlight_off ||
+ delays->power_down || delays->power_cycle;
+}
+
+static int msecs_to_pps_units(int msecs)
{
- return delays->t1_t3 || delays->t8 || delays->t9 ||
- delays->t10 || delays->t11_t12;
+ /* PPS uses 100us units */
+ return msecs * 10;
+}
+
+static int pps_units_to_msecs(int val)
+{
+ /* PPS uses 100us units */
+ return DIV_ROUND_UP(val, 10);
}
static void pps_init_delays_bios(struct intel_dp *intel_dp,
- struct edp_power_seq *bios)
+ struct intel_pps_delays *bios)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1472,7 +1491,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp,
}
static void pps_init_delays_vbt(struct intel_dp *intel_dp,
- struct edp_power_seq *vbt)
+ struct intel_pps_delays *vbt)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
@@ -1488,39 +1507,28 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
* seems sufficient to avoid this problem.
*/
if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) {
- vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
+ vbt->power_cycle = max_t(u16, vbt->power_cycle, msecs_to_pps_units(1300));
drm_dbg_kms(display->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
- vbt->t11_t12);
+ vbt->power_cycle);
}
- /* T11_T12 delay is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- vbt->t11_t12 += 100 * 10;
-
intel_pps_dump_state(intel_dp, "vbt", vbt);
}
static void pps_init_delays_spec(struct intel_dp *intel_dp,
- struct edp_power_seq *spec)
+ struct intel_pps_delays *spec)
{
struct intel_display *display = to_intel_display(intel_dp);
lockdep_assert_held(&display->pps.mutex);
- /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
- * our hw here, which are all in 100usec. */
- spec->t1_t3 = 210 * 10;
- spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec->t10 = 500 * 10;
- /* This one is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- spec->t11_t12 = (510 + 100) * 10;
+ /* Upper limits from eDP 1.3 spec */
+ spec->power_up = msecs_to_pps_units(10 + 200); /* T1+T3 */
+ spec->backlight_on = msecs_to_pps_units(50); /* no limit for T8, use T7 instead */
+ spec->backlight_off = msecs_to_pps_units(50); /* no limit for T9, make it symmetric with T8 */
+ spec->power_down = msecs_to_pps_units(500); /* T10 */
+ spec->power_cycle = msecs_to_pps_units(10 + 500); /* T11+T12 */
intel_pps_dump_state(intel_dp, "spec", spec);
}
@@ -1528,7 +1536,7 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp,
static void pps_init_delays(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct edp_power_seq cur, vbt, spec,
+ struct intel_pps_delays cur, vbt, spec,
*final = &intel_dp->pps.pps_delays;
lockdep_assert_held(&display->pps.mutex);
@@ -1546,20 +1554,18 @@ static void pps_init_delays(struct intel_dp *intel_dp)
#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
spec.field : \
max(cur.field, vbt.field))
- assign_final(t1_t3);
- assign_final(t8);
- assign_final(t9);
- assign_final(t10);
- assign_final(t11_t12);
+ assign_final(power_up);
+ assign_final(backlight_on);
+ assign_final(backlight_off);
+ assign_final(power_down);
+ assign_final(power_cycle);
#undef assign_final
-#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
- intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
- intel_dp->pps.backlight_on_delay = get_delay(t8);
- intel_dp->pps.backlight_off_delay = get_delay(t9);
- intel_dp->pps.panel_power_down_delay = get_delay(t10);
- intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
-#undef get_delay
+ intel_dp->pps.panel_power_up_delay = pps_units_to_msecs(final->power_up);
+ intel_dp->pps.backlight_on_delay = pps_units_to_msecs(final->backlight_on);
+ intel_dp->pps.backlight_off_delay = pps_units_to_msecs(final->backlight_off);
+ intel_dp->pps.panel_power_down_delay = pps_units_to_msecs(final->power_down);
+ intel_dp->pps.panel_power_cycle_delay = pps_units_to_msecs(final->power_cycle);
drm_dbg_kms(display->drm,
"panel power up delay %d, power down delay %d, power cycle delay %d\n",
@@ -1573,19 +1579,20 @@ static void pps_init_delays(struct intel_dp *intel_dp)
/*
* We override the HW backlight delays to 1 because we do manual waits
- * on them. For T8, even BSpec recommends doing it. For T9, if we
- * don't do this, we'll end up waiting for the backlight off delay
- * twice: once when we do the manual sleep, and once when we disable
- * the panel and wait for the PP_STATUS bit to become zero.
+ * on them. For backlight_on, even BSpec recommends doing it. For
+ * backlight_off, if we don't do this, we'll end up waiting for the
+ * backlight off delay twice: once when we do the manual sleep, and
+ * once when we disable the panel and wait for the PP_STATUS bit to
+ * become zero.
*/
- final->t8 = 1;
- final->t9 = 1;
+ final->backlight_on = 1;
+ final->backlight_off = 1;
/*
- * HW has only a 100msec granularity for t11_t12 so round it up
+ * HW has only a 100msec granularity for power_cycle so round it up
* accordingly.
*/
- final->t11_t12 = roundup(final->t11_t12, 100 * 10);
+ final->power_cycle = roundup(final->power_cycle, msecs_to_pps_units(100));
}
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
@@ -1596,7 +1603,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
- const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
+ const struct intel_pps_delays *seq = &intel_dp->pps.pps_delays;
lockdep_assert_held(&display->pps.mutex);
@@ -1629,10 +1636,10 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
intel_de_write(display, regs.pp_ctrl, pp);
}
- pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
- pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->power_up) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->backlight_on);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->backlight_off) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->power_down);
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
@@ -1665,11 +1672,14 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
*/
if (i915_mmio_reg_valid(regs.pp_div))
intel_de_write(display, regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK,
+ (100 * div) / 2 - 1) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
else
intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
- DIV_ROUND_UP(seq->t11_t12, 1000)));
+ DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
drm_dbg_kms(display->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
@@ -1810,6 +1820,8 @@ static int intel_pps_show(struct seq_file *m, void *data)
intel_dp->pps.panel_power_up_delay);
seq_printf(m, "Panel power down delay: %d\n",
intel_dp->pps.panel_power_down_delay);
+ seq_printf(m, "Panel power cycle delay: %d\n",
+ intel_dp->pps.panel_power_cycle_delay);
seq_printf(m, "Backlight on delay: %d\n",
intel_dp->pps.backlight_on_delay);
seq_printf(m, "Backlight off delay: %d\n",
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index a784c0b81556..0b021acb330f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -871,7 +871,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
val |= EDP_PSR_TP2_TP3_TIME_100us;
check_tp3_sel:
- if (intel_dp_source_supports_tps3(dev_priv) &&
+ if (intel_dp_source_supports_tps3(display) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP_TP1_TP3;
else
@@ -1130,18 +1130,16 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp,
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
psr2_program_idle_frames(intel_dp, 0);
- intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+ intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
}
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
}
@@ -1564,13 +1562,6 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int entry_setup_frames;
- /*
- * Current PSR panels don't work reliably with VRR enabled
- * So if VRR is enabled, do not enable PSR.
- */
- if (crtc_state->vrr.enable)
- return false;
-
if (!CAN_PSR(intel_dp))
return false;
@@ -1644,6 +1635,15 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return true;
}
+static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
+ !crtc_state->has_sel_update);
+}
+
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -1679,6 +1679,12 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /*
+ * Currently PSR/PR doesn't work reliably with VRR enabled.
+ */
+ if (crtc_state->vrr.enable)
+ return;
+
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
@@ -1690,6 +1696,13 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
+
+ /* Wa_18037818876 */
+ if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
+ crtc_state->has_psr = false;
+ drm_dbg_kms(display->drm,
+ "PSR disabled to workaround PSR FSM hang issue\n");
+ }
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1773,23 +1786,6 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
intel_dp->psr.active = true;
}
-static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
-{
- switch (intel_dp->psr.pipe) {
- case PIPE_A:
- return LATENCY_REPORTING_REMOVED_PIPE_A;
- case PIPE_B:
- return LATENCY_REPORTING_REMOVED_PIPE_B;
- case PIPE_C:
- return LATENCY_REPORTING_REMOVED_PIPE_C;
- case PIPE_D:
- return LATENCY_REPORTING_REMOVED_PIPE_D;
- default:
- MISSING_CASE(intel_dp->psr.pipe);
- return 0;
- }
-}
-
/*
* Wa_16013835468
* Wa_14015648006
@@ -1798,23 +1794,25 @@ static void wm_optimization_wa(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- bool set_wa_bit = false;
+ enum pipe pipe = intel_dp->psr.pipe;
+ bool activate = false;
/* Wa_14015648006 */
- if (IS_DISPLAY_VER(display, 11, 14))
- set_wa_bit |= crtc_state->wm_level_disabled;
+ if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
+ activate = true;
/* Wa_16013835468 */
- if (DISPLAY_VER(display) == 12)
- set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
- crtc_state->hw.adjusted_mode.crtc_vdisplay;
+ if (DISPLAY_VER(display) == 12 &&
+ crtc_state->hw.adjusted_mode.crtc_vblank_start !=
+ crtc_state->hw.adjusted_mode.crtc_vdisplay)
+ activate = true;
- if (set_wa_bit)
+ if (activate)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
- 0, wa_16013835468_bit_get(intel_dp));
+ 0, LATENCY_REPORTING_REMOVED(pipe));
else
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
- wa_16013835468_bit_get(intel_dp), 0);
+ LATENCY_REPORTING_REMOVED(pipe), 0);
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -1908,7 +1906,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (intel_dp->psr.sel_update_enabled) {
if (DISPLAY_VER(display) == 9)
- intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0,
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT);
@@ -1920,7 +1918,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (!intel_dp->psr.panel_replay_enabled &&
(IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv)))
- intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
@@ -2114,7 +2112,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
*/
if (DISPLAY_VER(display) >= 11)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
- wa_16013835468_bit_get(intel_dp), 0);
+ LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
if (intel_dp->psr.sel_update_enabled) {
/* Wa_16012604467:adlp,mtl[a0,b0] */
@@ -3335,11 +3333,10 @@ unlock:
void intel_psr_init(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (!(HAS_PSR(display) || HAS_DP20(dev_priv)))
+ if (!(HAS_PSR(display) || HAS_DP20(display)))
return;
/*
@@ -3357,7 +3354,7 @@ void intel_psr_init(struct intel_dp *intel_dp)
return;
}
- if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
+ if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
DISPLAY_VER(display) >= 20)
intel_dp->psr.source_panel_replay_support = true;
@@ -3974,7 +3971,6 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
void intel_psr_connector_debugfs_add(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
@@ -3984,7 +3980,7 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
- if (HAS_PSR(display) || HAS_DP20(i915))
+ if (HAS_PSR(display) || HAS_DP20(display))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 28f497ae785b..8b30e9fd936e 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -5,7 +5,7 @@
#include <linux/dmi.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_quirks.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 7a28104f68ad..498b35ec4e0f 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2136,6 +2136,7 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
@@ -2145,10 +2146,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
if (!intel_sdvo_set_target_output(intel_sdvo,
@@ -2196,14 +2197,14 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
int num_modes = 0;
const struct drm_edid *drm_edid;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
/* set the bus switch and get the modes */
@@ -2297,6 +2298,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct intel_sdvo_connector *intel_sdvo_connector =
@@ -2310,7 +2312,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return 0;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 4b3a32736fd6..41fe26dc200b 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,6 +5,7 @@
#include <linux/math.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b16c4d2d4077..13811244c82b 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -390,7 +390,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
- bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ bool lane_reversal = dig_port->lane_reversal;
u32 val;
if (DISPLAY_VER(i915) >= 14)
@@ -1013,21 +1013,52 @@ xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
return true;
}
+/*
+ * Gfx driver WA 14020908590 for PTL tcss_rxdetect_clkswb_req/ack
+ * handshake violation when pwwreq= 0->1 during TC7/10 entry
+ */
+static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
+{
+ /* check if mailbox is running busy */
+ if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ drm_dbg_kms(display->drm,
+ "Timeout waiting for TCSS mailbox run/busy bit to clear\n");
+ return;
+ }
+
+ intel_de_write(display, TCSS_DISP_MAILBOX_IN_DATA, enable ? 1 : 0);
+ intel_de_write(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY |
+ TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
+
+ /* wait to clear mailbox running busy bit before continuing */
+ if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ drm_dbg_kms(display->drm,
+ "Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
+ return;
+ }
+}
+
static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, reg);
+ if (DISPLAY_VER(display) == 30)
+ xelpdp_tc_power_request_wa(display, enable);
+
+ val = intel_de_read(display, reg);
if (enable)
val |= XELPDP_TCSS_POWER_REQUEST;
else
val &= ~XELPDP_TCSS_POWER_REQUEST;
- intel_de_write(i915, reg, val);
+ intel_de_write(display, reg, val);
}
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 27c530218ee6..6e311dcc1a61 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1714,7 +1714,6 @@ intel_tv_detect(struct drm_connector *connector,
bool force)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
@@ -1722,10 +1721,10 @@ intel_tv_detect(struct drm_connector *connector,
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name, force);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
if (force) {
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 42022756bbd5..e9b809568cd4 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -1014,6 +1014,14 @@ struct bdb_tv_options {
* Block 27 - eDP VBT Block
*/
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __packed;
+
#define EDP_18BPP 0
#define EDP_24BPP 1
#define EDP_30BPP 2
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 40525f5c4c42..b355c479eda3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -14,6 +14,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dsi.h"
#include "intel_qp_tables.h"
#include "intel_vdsc.h"
@@ -379,7 +380,7 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
static int intel_dsc_get_vdsc_per_pipe(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->dsc.dsc_split ? 2 : 1;
+ return crtc_state->dsc.num_streams;
}
int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state)
@@ -402,8 +403,10 @@ static void intel_dsc_get_pps_reg(const struct intel_crtc_state *crtc_state, int
pipe_dsc = is_pipe_dsc(crtc, cpu_transcoder);
- if (dsc_reg_num >= 3)
+ if (dsc_reg_num >= 4)
MISSING_CASE(dsc_reg_num);
+ if (dsc_reg_num >= 3)
+ dsc_reg[2] = BMG_DSC2_PPS(pipe, pps);
if (dsc_reg_num >= 2)
dsc_reg[1] = pipe_dsc ? ICL_DSC1_PPS(pipe, pps) : DSCC_PPS(pps);
if (dsc_reg_num >= 1)
@@ -415,7 +418,7 @@ static void intel_dsc_pps_write(const struct intel_crtc_state *crtc_state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- i915_reg_t dsc_reg[2];
+ i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
@@ -770,11 +773,17 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
intel_dsc_pps_configure(crtc_state);
- dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+ dss_ctl2_val |= VDSC0_ENABLE;
if (vdsc_instances_per_pipe > 1) {
- dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+ dss_ctl2_val |= VDSC1_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
+
+ if (vdsc_instances_per_pipe > 2) {
+ dss_ctl2_val |= VDSC2_ENABLE;
+ dss_ctl2_val |= SMALL_JOINER_CONFIG_3_ENGINES;
+ }
+
if (crtc_state->joiner_pipes) {
if (intel_crtc_ultrajoiner_enable_needed(crtc_state))
dss_ctl1_val |= ULTRA_JOINER_ENABLE;
@@ -809,7 +818,7 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- i915_reg_t dsc_reg[2];
+ i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
u32 val;
@@ -972,12 +981,16 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder));
dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder));
- crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
+ crtc_state->dsc.compression_enable = dss_ctl2 & VDSC0_ENABLE;
if (!crtc_state->dsc.compression_enable)
goto out;
- crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
- (dss_ctl1 & JOINER_ENABLE);
+ if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & (VDSC2_ENABLE | SMALL_JOINER_CONFIG_3_ENGINES))
+ crtc_state->dsc.num_streams = 3;
+ else if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & VDSC1_ENABLE)
+ crtc_state->dsc.num_streams = 2;
+ else
+ crtc_state->dsc.num_streams = 1;
intel_dsc_get_pps_config(crtc_state);
out:
@@ -988,10 +1001,10 @@ static void intel_vdsc_dump_state(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state)
{
drm_printf_indent(p, indent,
- "dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, split: %s\n",
+ "dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, num_streams: %d\n",
FXP_Q4_ARGS(crtc_state->dsc.compressed_bpp_x16),
crtc_state->dsc.slice_count,
- str_yes_no(crtc_state->dsc.dsc_split));
+ crtc_state->dsc.num_streams);
}
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
@@ -1003,3 +1016,48 @@ void intel_vdsc_state_dump(struct drm_printer *p, int indent,
intel_vdsc_dump_state(p, indent, crtc_state);
drm_dsc_dump_config(p, indent, &crtc_state->dsc.config);
}
+
+int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc);
+ int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
+ int min_cdclk;
+
+ if (!crtc_state->dsc.compression_enable)
+ return 0;
+
+ /*
+ * When we decide to use only one VDSC engine, since
+ * each VDSC operates with 1 ppc throughput, pixel clock
+ * cannot be higher than the VDSC clock (cdclk)
+ * If there 2 VDSC engines, then pixel clock can't be higher than
+ * VDSC clock(cdclk) * 2 and so on.
+ */
+ min_cdclk = DIV_ROUND_UP(crtc_state->pixel_rate, num_vdsc_instances);
+
+ if (crtc_state->joiner_pipes) {
+ int pixel_clock = intel_dp_mode_to_fec_clock(crtc_state->hw.adjusted_mode.clock);
+
+ /*
+ * According to Bigjoiner bw check:
+ * compressed_bpp <= PPC * CDCLK * Big joiner Interface bits / Pixel clock
+ *
+ * We have already computed compressed_bpp, so now compute the min CDCLK that
+ * is required to support this compressed_bpp.
+ *
+ * => CDCLK >= compressed_bpp * Pixel clock / (PPC * Bigjoiner Interface bits)
+ *
+ * Since PPC = 2 with bigjoiner
+ * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
+ */
+ int bigjoiner_interface_bits = DISPLAY_VER(display) >= 14 ? 36 : 24;
+ int min_cdclk_bj =
+ (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
+ pixel_clock) / (2 * bigjoiner_interface_bits);
+
+ min_cdclk = max(min_cdclk, min_cdclk_bj);
+ }
+
+ return min_cdclk;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 290b2e9b3482..9e2812f99dd7 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -31,5 +31,6 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
+int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index bf32a3b46fb1..2d478a84b07c 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -21,8 +21,10 @@
#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
#define DSS_CTL2 _MMIO(0x67404)
-#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
-#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define VDSC0_ENABLE REG_BIT(31)
+#define VDSC2_ENABLE REG_BIT(30)
+#define SMALL_JOINER_CONFIG_3_ENGINES REG_BIT(23)
+#define VDSC1_ENABLE REG_BIT(15)
#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
@@ -57,8 +59,10 @@
#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _BMG_DSC2_PICTURE_PARAMETER_SET_0_PB 0x78970
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define _BMG_DSC2_PICTURE_PARAMETER_SET_0_PC 0x78A70
#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
_ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
@@ -71,8 +75,12 @@
#define _ICL_DSC1_PPS_0(pipe) _PICK_EVEN((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define _BMG_DSC2_PPS_0(pipe) _PICK_EVEN((pipe) - PIPE_B, \
+ _BMG_DSC2_PICTURE_PARAMETER_SET_0_PB, \
+ _BMG_DSC2_PICTURE_PARAMETER_SET_0_PC)
#define ICL_DSC0_PPS(pipe, pps) _MMIO(_ICL_DSC0_PPS_0(pipe) + ((pps) * 4))
#define ICL_DSC1_PPS(pipe, pps) _MMIO(_ICL_DSC1_PPS_0(pipe) + ((pps) * 4))
+#define BMG_DSC2_PPS(pipe, pps) _MMIO(_BMG_DSC2_PPS_0(pipe) + ((pps) * 4))
/* PPS 0 */
#define DSC_PPS0_NATIVE_422_ENABLE REG_BIT(23)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 19a5d0076bb8..70088e355055 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -4,7 +4,6 @@
*
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -288,7 +287,7 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
if (IS_DISPLAY_VER(display, 12, 13))
- intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
if (!intel_vrr_possible(crtc_state)) {
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 7dbc99b02eaa..ae21fce534dc 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -3,6 +3,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -105,10 +106,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
const struct drm_format_info *format,
u64 modifier, bool need_scaler)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
@@ -130,9 +131,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* Once NV12 is enabled, handle it here while allocating scaler
* for NV12.
*/
- if (DISPLAY_VER(dev_priv) >= 9 && crtc_state->hw.enable &&
+ if (DISPLAY_VER(display) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
@@ -150,9 +151,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (force_detach || !need_scaler) {
if (*scaler_id >= 0) {
scaler_state->scaler_users &= ~(1 << scaler_user);
- scaler_state->scalers[*scaler_id].in_use = 0;
+ scaler_state->scalers[*scaler_id].in_use = false;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"scaler_user index %u.%u: "
"Staged freeing scaler id %d scaler_users = 0x%x\n",
crtc->pipe, scaler_user, *scaler_id,
@@ -164,7 +165,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Planar YUV: src dimensions not met\n");
return -EINVAL;
}
@@ -174,17 +175,17 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
min_dst_w = SKL_MIN_DST_W;
min_dst_h = SKL_MIN_DST_H;
- if (DISPLAY_VER(dev_priv) < 11) {
+ if (DISPLAY_VER(display) < 11) {
max_src_w = SKL_MAX_SRC_W;
max_src_h = SKL_MAX_SRC_H;
max_dst_w = SKL_MAX_DST_W;
max_dst_h = SKL_MAX_DST_H;
- } else if (DISPLAY_VER(dev_priv) < 12) {
+ } else if (DISPLAY_VER(display) < 12) {
max_src_w = ICL_MAX_SRC_W;
max_src_h = ICL_MAX_SRC_H;
max_dst_w = ICL_MAX_DST_W;
max_dst_h = ICL_MAX_DST_H;
- } else if (DISPLAY_VER(dev_priv) < 14) {
+ } else if (DISPLAY_VER(display) < 14) {
max_src_w = TGL_MAX_SRC_W;
max_src_h = TGL_MAX_SRC_H;
max_dst_w = TGL_MAX_DST_W;
@@ -201,7 +202,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
dst_w < min_dst_w || dst_h < min_dst_h ||
src_w > max_src_w || src_h > max_src_h ||
dst_w > max_dst_w || dst_h > max_dst_h) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
crtc->pipe, scaler_user, src_w, src_h,
@@ -218,7 +219,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* now.
*/
if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"scaler_user index %u.%u: pipe src size %ux%u "
"is out of scaler range\n",
crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
@@ -227,7 +228,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
- drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+ drm_dbg_kms(display->drm, "scaler_user index %u.%u: "
"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
scaler_state->scaler_users);
@@ -268,20 +269,19 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *intel_plane =
- to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_framebuffer *fb = plane_state->hw.fb;
bool force_detach = !fb || !plane_state->uapi.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
- if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+ if (!icl_is_hdr_plane(dev_priv, plane->id) &&
fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
need_scaler = true;
return skl_update_scaler(crtc_state, force_detach,
- drm_plane_index(&intel_plane->base),
+ drm_plane_index(&plane->base),
&plane_state->scaler_id,
drm_rect_width(&plane_state->uapi.src) >> 16,
drm_rect_height(&plane_state->uapi.src) >> 16,
@@ -292,29 +292,37 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
need_scaler);
}
+static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
+ struct intel_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < crtc->num_scalers; i++) {
+ if (scaler_state->scalers[i].in_use)
+ continue;
+
+ scaler_state->scalers[i].in_use = true;
+
+ return i;
+ }
+
+ return -1;
+}
+
static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
- int num_scalers_need, struct intel_crtc *intel_crtc,
+ int num_scalers_need, struct intel_crtc *crtc,
const char *name, int idx,
struct intel_plane_state *plane_state,
int *scaler_id)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- int j;
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 mode;
- if (*scaler_id < 0) {
- /* find a free scaler */
- for (j = 0; j < intel_crtc->num_scalers; j++) {
- if (scaler_state->scalers[j].in_use)
- continue;
-
- *scaler_id = j;
- scaler_state->scalers[*scaler_id].in_use = 1;
- break;
- }
- }
+ if (*scaler_id < 0)
+ *scaler_id = intel_allocate_scaler(scaler_state, crtc);
- if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+ if (drm_WARN(display->drm, *scaler_id < 0,
"Cannot find scaler for %s:%d\n", name, idx))
return -EINVAL;
@@ -324,7 +332,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
plane_state->hw.fb->format->num_planes > 1) {
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- if (DISPLAY_VER(dev_priv) == 9) {
+ if (DISPLAY_VER(display) == 9) {
mode = SKL_PS_SCALER_MODE_NV12;
} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
/*
@@ -342,17 +350,17 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
if (linked)
mode |= PS_BINDING_Y_PLANE(linked->id);
}
- } else if (DISPLAY_VER(dev_priv) >= 10) {
+ } else if (DISPLAY_VER(display) >= 10) {
mode = PS_SCALER_MODE_NORMAL;
- } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+ } else if (num_scalers_need == 1 && crtc->num_scalers > 1) {
/*
* when only 1 scaler is in use on a pipe with 2 scalers
* scaler 0 operates in high quality (HQ) mode.
* In this case use scaler 0 to take advantage of HQ mode
*/
- scaler_state->scalers[*scaler_id].in_use = 0;
+ scaler_state->scalers[*scaler_id].in_use = false;
*scaler_id = 0;
- scaler_state->scalers[0].in_use = 1;
+ scaler_state->scalers[0].in_use = true;
mode = SKL_PS_SCALER_MODE_HQ;
} else {
mode = SKL_PS_SCALER_MODE_DYN;
@@ -376,7 +384,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
* unnecessarily.
*/
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
/*
* On versions 14 and up, only the first
* scaler supports a vertical scaling factor
@@ -389,7 +397,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
else
max_vscale = 0x10000;
- } else if (DISPLAY_VER(dev_priv) >= 10 ||
+ } else if (DISPLAY_VER(display) >= 10 ||
!intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
max_hscale = 0x30000 - 1;
max_vscale = 0x30000 - 1;
@@ -408,7 +416,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
if (hscale < 0 || vscale < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Scaler %d doesn't support required plane scaling\n",
*scaler_id);
drm_rect_debug_print("src: ", src, true);
@@ -418,18 +426,66 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
}
}
- drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
+ drm_dbg_kms(display->drm, "Attached scaler id %u.%u to %s:%d\n",
+ crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
return 0;
}
+static int setup_crtc_scaler(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+
+ return intel_atomic_setup_scaler(scaler_state,
+ hweight32(scaler_state->scaler_users),
+ crtc, "CRTC", crtc->base.base.id,
+ NULL, &scaler_state->scaler_id);
+}
+
+static int setup_plane_scaler(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct intel_plane_state *plane_state;
+
+ /* plane on different crtc cannot be a scaler user of this crtc */
+ if (drm_WARN_ON(display->drm, plane->pipe != crtc->pipe))
+ return 0;
+
+ plane_state = intel_atomic_get_new_plane_state(state, plane);
+
+ /*
+ * GLK+ scalers don't have a HQ mode so it
+ * isn't necessary to change between HQ and dyn mode
+ * on those platforms.
+ */
+ if (!plane_state && DISPLAY_VER(display) >= 10)
+ return 0;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ return intel_atomic_setup_scaler(scaler_state,
+ hweight32(scaler_state->scaler_users),
+ crtc, "PLANE", plane->base.base.id,
+ plane_state, &plane_state->scaler_id);
+}
+
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
- * @dev_priv: i915 device
- * @intel_crtc: intel crtc
- * @crtc_state: incoming crtc_state to validate and setup scalers
+ * @state: atomic state
+ * @crtc: crtc
*
* This function sets up scalers based on staged scaling requests for
* a @crtc and its planes. It is called from crtc level check path. If request
@@ -442,16 +498,14 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
* 0 - scalers were setup successfully
* error code - otherwise
*/
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
+int intel_atomic_setup_scalers(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_plane *plane = NULL;
- struct intel_plane *intel_plane;
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- struct drm_atomic_state *drm_state = crtc_state->uapi.state;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
int i;
@@ -470,80 +524,33 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
*/
/* fail if required scalers > available scalers */
- if (num_scalers_need > intel_crtc->num_scalers) {
- drm_dbg_kms(&dev_priv->drm,
+ if (num_scalers_need > crtc->num_scalers) {
+ drm_dbg_kms(display->drm,
"Too many scaling requests %d > %d\n",
- num_scalers_need, intel_crtc->num_scalers);
+ num_scalers_need, crtc->num_scalers);
return -EINVAL;
}
/* walkthrough scaler_users bits and start assigning scalers */
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
- struct intel_plane_state *plane_state = NULL;
- int *scaler_id;
- const char *name;
- int idx, ret;
+ int ret;
/* skip if scaler not required */
if (!(scaler_state->scaler_users & (1 << i)))
continue;
if (i == SKL_CRTC_INDEX) {
- name = "CRTC";
- idx = intel_crtc->base.base.id;
-
- /* panel fitter case: assign as a crtc scaler */
- scaler_id = &scaler_state->scaler_id;
+ ret = setup_crtc_scaler(state, crtc);
+ if (ret)
+ return ret;
} else {
- name = "PLANE";
-
- /* plane scaler case: assign as a plane scaler */
- /* find the plane that set the bit as scaler_user */
- plane = drm_state->planes[i].ptr;
+ struct intel_plane *plane =
+ to_intel_plane(drm_plane_from_index(display->drm, i));
- /*
- * to enable/disable hq mode, add planes that are using scaler
- * into this transaction
- */
- if (!plane) {
- struct drm_plane_state *state;
-
- /*
- * GLK+ scalers don't have a HQ mode so it
- * isn't necessary to change between HQ and dyn mode
- * on those platforms.
- */
- if (DISPLAY_VER(dev_priv) >= 10)
- continue;
-
- plane = drm_plane_from_index(&dev_priv->drm, i);
- state = drm_atomic_get_plane_state(drm_state, plane);
- if (IS_ERR(state)) {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to add [PLANE:%d] to drm_state\n",
- plane->base.id);
- return PTR_ERR(state);
- }
- }
-
- intel_plane = to_intel_plane(plane);
- idx = plane->base.id;
-
- /* plane on different crtc cannot be a scaler user of this crtc */
- if (drm_WARN_ON(&dev_priv->drm,
- intel_plane->pipe != intel_crtc->pipe))
- continue;
-
- plane_state = intel_atomic_get_new_plane_state(intel_state,
- intel_plane);
- scaler_id = &plane_state->scaler_id;
+ ret = setup_plane_scaler(state, crtc, plane);
+ if (ret)
+ return ret;
}
-
- ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
- intel_crtc, name, idx,
- plane_state, scaler_id);
- if (ret < 0)
- return ret;
}
return 0;
@@ -596,12 +603,12 @@ static u16 glk_nearest_filter_coef(int t)
*
*/
-static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
+static void glk_program_nearest_filter_coefs(struct intel_display *display,
enum pipe pipe, int id, int set)
{
int i;
- intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set),
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set),
PS_COEF_INDEX_AUTO_INC);
for (i = 0; i < 17 * 7; i += 2) {
@@ -614,11 +621,11 @@ static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
t = glk_coef_tap(i + 1);
tmp |= glk_nearest_filter_coef(t) << 16;
- intel_de_write_fw(dev_priv, GLK_PS_COEF_DATA_SET(pipe, id, set),
+ intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(pipe, id, set),
tmp);
}
- intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
@@ -634,14 +641,14 @@ static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
return PS_FILTER_MEDIUM;
}
-static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
+static void skl_scaler_setup_filter(struct intel_display *display, enum pipe pipe,
int id, int set, enum drm_scaling_filter filter)
{
switch (filter) {
case DRM_SCALING_FILTER_DEFAULT:
break;
case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
- glk_program_nearest_filter_coefs(dev_priv, pipe, id, set);
+ glk_program_nearest_filter_coefs(display, pipe, id, set);
break;
default:
MISSING_CASE(filter);
@@ -650,8 +657,8 @@ static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe
void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
@@ -669,7 +676,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
if (!crtc_state->pch_pfit.enabled)
return;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
crtc_state->scaler_state.scaler_id < 0))
return;
@@ -688,18 +695,18 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
- skl_scaler_setup_filter(dev_priv, pipe, id, 0,
+ skl_scaler_setup_filter(display, pipe, id, 0,
crtc_state->hw.scaling_filter);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
+ intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ intel_de_write_fw(display, SKL_PS_VPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ intel_de_write_fw(display, SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, id),
PS_WIN_XPOS(x) | PS_WIN_YPOS(y));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, id),
PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height));
}
@@ -708,6 +715,7 @@ skl_program_plane_scaler(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
@@ -751,28 +759,27 @@ skl_program_plane_scaler(struct intel_plane *plane,
ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode |
skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
- skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
+ skl_scaler_setup_filter(display, pipe, scaler_id, 0,
plane_state->hw.scaling_filter);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ intel_de_write_fw(display, SKL_PS_VPHASE(pipe, scaler_id),
PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_HPHASE(pipe, scaler_id),
PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, scaler_id),
PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, scaler_id),
PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h));
}
static void skl_detach_scaler(struct intel_crtc *crtc, int id)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
+ intel_de_write_fw(display, SKL_PS_CTRL(crtc->pipe, id), 0);
+ intel_de_write_fw(display, SKL_PS_WIN_POS(crtc->pipe, id), 0);
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
}
/*
@@ -803,8 +810,8 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
int id = -1;
int i;
@@ -813,15 +820,15 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
for (i = 0; i < crtc->num_scalers; i++) {
u32 ctl, pos, size;
- ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
+ ctl = intel_de_read(display, SKL_PS_CTRL(crtc->pipe, i));
if ((ctl & (PS_SCALER_EN | PS_BINDING_MASK)) != (PS_SCALER_EN | PS_BINDING_PIPE))
continue;
id = i;
crtc_state->pch_pfit.enabled = true;
- pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
- size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
+ pos = intel_de_read(display, SKL_PS_WIN_POS(crtc->pipe, i));
+ size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, i));
drm_rect_init(&crtc_state->pch_pfit.dst,
REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 63f93ca03c89..4d2e2dbb1666 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -5,11 +5,7 @@
#ifndef INTEL_SCALER_H
#define INTEL_SCALER_H
-#include <linux/types.h>
-
-enum drm_scaling_filter;
-enum pipe;
-struct drm_i915_private;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
@@ -20,9 +16,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
+int intel_atomic_setup_scalers(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 038ca2ec5d7a..ff9764cac1e7 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -239,7 +239,9 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915))
+ struct intel_display *display = &i915->display;
+
+ if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
return BIT(PLANE_4) | BIT(PLANE_5);
else
return BIT(PLANE_6) | BIT(PLANE_7);
@@ -715,6 +717,22 @@ static u32 skl_plane_ddb_reg_val(const struct skl_ddb_entry *entry)
PLANE_BUF_START(entry->start);
}
+static u32 xe3_plane_min_ddb_reg_val(const u16 *min_ddb,
+ const u16 *interim_ddb)
+{
+ u32 val = 0;
+
+ if (*min_ddb)
+ val |= PLANE_MIN_DBUF_BLOCKS(*min_ddb);
+
+ if (*interim_ddb)
+ val |= PLANE_INTERIM_DBUF_BLOCKS(*interim_ddb);
+
+ val |= val ? PLANE_AUTO_MIN_DBUF_EN : 0;
+
+ return val;
+}
+
static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level)
{
u32 val = 0;
@@ -723,6 +741,9 @@ static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level)
val |= PLANE_WM_EN;
if (level->ignore_lines)
val |= PLANE_WM_IGNORE_LINES;
+ if (level->auto_min_alloc_wm_enable)
+ val |= PLANE_WM_AUTO_MIN_ALLOC_EN;
+
val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
@@ -734,7 +755,6 @@ static void skl_write_plane_wm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -742,16 +762,19 @@ static void skl_write_plane_wm(struct intel_dsb *dsb,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const u16 *min_ddb = &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ const u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
intel_de_write_dsb(display, dsb, PLANE_WM(pipe, plane_id, level),
skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
intel_de_write_dsb(display, dsb, PLANE_WM_TRANS(pipe, plane_id),
skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
intel_de_write_dsb(display, dsb, PLANE_WM_SAGV(pipe, plane_id),
@@ -763,9 +786,13 @@ static void skl_write_plane_wm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_BUF_CFG(pipe, plane_id),
skl_plane_ddb_reg_val(ddb));
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
intel_de_write_dsb(display, dsb, PLANE_NV12_BUF_CFG(pipe, plane_id),
skl_plane_ddb_reg_val(ddb_y));
+
+ if (DISPLAY_VER(display) >= 30)
+ intel_de_write_dsb(display, dsb, PLANE_MIN_BUF_CFG(pipe, plane_id),
+ xe3_plane_min_ddb_reg_val(min_ddb, interim_ddb));
}
static void
@@ -2548,13 +2575,14 @@ static bool tgl_plane_has_mc_ccs(struct drm_i915_private *i915,
static u8 skl_get_plane_caps(struct drm_i915_private *i915,
enum pipe pipe, enum plane_id plane_id)
{
+ struct intel_display *display = &i915->display;
u8 caps = INTEL_PLANE_CAP_TILING_X;
- if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915))
+ if (DISPLAY_VER(display) < 13 || display->platform.alderlake_p)
caps |= INTEL_PLANE_CAP_TILING_Y;
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
caps |= INTEL_PLANE_CAP_TILING_Yf;
- if (HAS_4TILE(i915))
+ if (HAS_4TILE(display))
caps |= INTEL_PLANE_CAP_TILING_4;
if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
@@ -2562,14 +2590,14 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
caps |= INTEL_PLANE_CAP_CCS_RC_CC;
}
if (tgl_plane_has_mc_ccs(i915, plane_id))
caps |= INTEL_PLANE_CAP_CCS_MC;
- if (DISPLAY_VER(i915) >= 14 && IS_DGFX(i915))
+ if (DISPLAY_VER(display) >= 14 && display->platform.dgfx)
caps |= INTEL_PLANE_CAP_NEED64K_PHYS;
return caps;
@@ -2743,6 +2771,7 @@ void
skl_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2824,7 +2853,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF: /* aka PLANE_CTL_TILED_4 on XE_LPD+ */
- if (HAS_4TILE(dev_priv)) {
+ if (HAS_4TILE(display)) {
u32 rc_mask = PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
PLANE_CTL_CLEAR_COLOR_DISABLE;
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
index ff31a00d511e..ca9fdfbbe57c 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
@@ -322,6 +322,7 @@
_PLANE_WM_2_A_0, _PLANE_WM_2_B_0)
#define PLANE_WM_EN REG_BIT(31)
#define PLANE_WM_IGNORE_LINES REG_BIT(30)
+#define PLANE_WM_AUTO_MIN_ALLOC_EN REG_BIT(29)
#define PLANE_WM_LINES_MASK REG_GENMASK(26, 14)
#define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0)
@@ -373,12 +374,26 @@
#define PLANE_BUF_CFG(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \
_PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B, \
_PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
+
/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
#define PLANE_BUF_END_MASK REG_GENMASK(27, 16)
#define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end))
#define PLANE_BUF_START_MASK REG_GENMASK(11, 0)
#define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start))
+#define _PLANE_MIN_BUF_CFG_1_A 0x70274
+#define _PLANE_MIN_BUF_CFG_2_A 0x70374
+#define _PLANE_MIN_BUF_CFG_1_B 0x71274
+#define _PLANE_MIN_BUF_CFG_2_B 0x71374
+#define PLANE_MIN_BUF_CFG(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \
+ _PLANE_MIN_BUF_CFG_1_A, _PLANE_MIN_BUF_CFG_1_B, \
+ _PLANE_MIN_BUF_CFG_2_A, _PLANE_MIN_BUF_CFG_2_B)
+#define PLANE_AUTO_MIN_DBUF_EN REG_BIT(31)
+#define PLANE_MIN_DBUF_BLOCKS_MASK REG_GENMASK(27, 16)
+#define PLANE_MIN_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_MIN_DBUF_BLOCKS_MASK, (val))
+#define PLANE_INTERIM_DBUF_BLOCKS_MASK REG_GENMASK(11, 0)
+#define PLANE_INTERIM_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_INTERIM_DBUF_BLOCKS_MASK, (val))
+
/* tgl+ */
#define _SEL_FETCH_PLANE_CTL_1_A 0x70890
#define _SEL_FETCH_PLANE_CTL_2_A 0x708b0
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 3b0e87edbacf..f4458d1185b3 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -77,20 +77,23 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
bool
intel_has_sagv(struct drm_i915_private *i915)
{
- return HAS_SAGV(i915) &&
- i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
+ struct intel_display *display = &i915->display;
+
+ return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
}
static u32
intel_sagv_block_time(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 14) {
+ struct intel_display *display = &i915->display;
+
+ if (DISPLAY_VER(display) >= 14) {
u32 val;
- val = intel_de_read(i915, MTL_LATENCY_SAGV);
+ val = intel_de_read(display, MTL_LATENCY_SAGV);
return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
u32 val = 0;
int ret;
@@ -98,14 +101,14 @@ intel_sagv_block_time(struct drm_i915_private *i915)
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
&val, NULL);
if (ret) {
- drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
+ drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
return 0;
}
return val;
- } else if (DISPLAY_VER(i915) == 11) {
+ } else if (DISPLAY_VER(display) == 11) {
return 10;
- } else if (HAS_SAGV(i915)) {
+ } else if (HAS_SAGV(display)) {
return 30;
} else {
return 0;
@@ -114,31 +117,33 @@ intel_sagv_block_time(struct drm_i915_private *i915)
static void intel_sagv_init(struct drm_i915_private *i915)
{
- if (!HAS_SAGV(i915))
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ struct intel_display *display = &i915->display;
+
+ if (!HAS_SAGV(display))
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
/*
* Probe to see if we have working SAGV control.
* For icl+ this was already determined by intel_bw_init_hw().
*/
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
skl_sagv_disable(i915);
- drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
+ drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
- i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
+ display->sagv.block_time_us = intel_sagv_block_time(i915);
- drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
+ drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us);
/* avoid overflow when adding with wm0 latency/etc. */
- if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
+ if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
"Excessive SAGV block time %u, ignoring\n",
- i915->display.sagv.block_time_us))
- i915->display.sagv.block_time_us = 0;
+ display->sagv.block_time_us))
+ display->sagv.block_time_us = 0;
if (!intel_has_sagv(i915))
- i915->display.sagv.block_time_us = 0;
+ display->sagv.block_time_us = 0;
}
/*
@@ -444,6 +449,7 @@ bool intel_can_enable_sagv(struct drm_i915_private *i915,
static int intel_compute_sagv_mask(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
int ret;
struct intel_crtc *crtc;
@@ -479,7 +485,7 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
* other crtcs can't be allowed to use the more optimal
* normal (ie. non-SAGV) watermarks.
*/
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
DISPLAY_VER(i915) >= 12 &&
intel_crtc_can_enable_sagv(new_crtc_state);
@@ -795,30 +801,40 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+ struct skl_ddb_entry *ddb_y,
+ u16 *min_ddb, u16 *interim_ddb)
{
+ struct intel_display *display = &i915->display;
u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = intel_de_read(i915, CUR_BUF_CFG(pipe));
+ val = intel_de_read(display, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(ddb, val);
return;
}
- val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(display, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb, val);
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 30) {
+ val = intel_de_read(display, PLANE_MIN_BUF_CFG(pipe, plane_id));
+
+ *min_ddb = REG_FIELD_GET(PLANE_MIN_DBUF_BLOCKS_MASK, val);
+ *interim_ddb = REG_FIELD_GET(PLANE_INTERIM_DBUF_BLOCKS_MASK, val);
+ }
+
+ if (DISPLAY_VER(display) >= 11)
return;
- val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(display, PLANE_NV12_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb_y, val);
}
static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+ struct skl_ddb_entry *ddb_y,
+ u16 *min_ddb, u16 *interim_ddb)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
@@ -835,7 +851,9 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
skl_ddb_get_hw_plane_state(i915, pipe,
plane_id,
&ddb[plane_id],
- &ddb_y[plane_id]);
+ &ddb_y[plane_id],
+ &min_ddb[plane_id],
+ &interim_ddb[plane_id]);
intel_display_power_put(i915, power_domain, wakeref);
}
@@ -1370,13 +1388,30 @@ static bool
use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- return DISPLAY_VER(i915) >= 13 &&
+ /* Xe3+ are auto minimum DDB capble. So don't force minimal wm0 */
+ return IS_DISPLAY_VER(display, 13, 20) &&
crtc_state->uapi.async_flip &&
plane->async_flip;
}
+unsigned int
+skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane, int width, int height,
+ int cpp)
+{
+ /*
+ * We calculate extra ddb based on ratio plane rate/total data rate
+ * in case, in some cases we should not allocate extra ddb for the plane,
+ * so do not count its data rate, if this is the case.
+ */
+ if (use_minimal_wm0_only(crtc_state, plane))
+ return 0;
+
+ return width * height * cpp;
+}
+
static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
@@ -1513,6 +1548,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state =
intel_atomic_get_new_dbuf_state(state);
const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ struct intel_display *display = to_intel_display(state);
int num_active = hweight8(dbuf_state->active_pipes);
struct skl_plane_ddb_iter iter;
enum plane_id plane_id;
@@ -1523,6 +1559,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+ memset(crtc_state->wm.skl.plane_min_ddb, 0,
+ sizeof(crtc_state->wm.skl.plane_min_ddb));
+ memset(crtc_state->wm.skl.plane_interim_ddb, 0,
+ sizeof(crtc_state->wm.skl.plane_interim_ddb));
if (!crtc_state->hw.active)
return 0;
@@ -1595,6 +1635,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *min_ddb = &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -1611,6 +1654,11 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
crtc_state->rel_data_rate[plane_id]);
}
+
+ if (DISPLAY_VER(display) >= 30) {
+ *min_ddb = wm->wm[0].min_ddb_alloc;
+ *interim_ddb = wm->sagv.wm0.min_ddb_alloc;
+ }
}
drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
@@ -1654,6 +1702,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -1667,6 +1717,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
skl_check_wm_level(&wm->sagv.wm0, ddb);
+ if (DISPLAY_VER(display) >= 30)
+ *interim_ddb = wm->sagv.wm0.min_ddb_alloc;
+
skl_check_wm_level(&wm->sagv.trans_wm, ddb);
}
@@ -1745,6 +1798,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int color_plane, unsigned int pan_x)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 interm_pbpl;
@@ -1803,7 +1857,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines,
wp->dbuf_block_size);
- if (DISPLAY_VER(i915) >= 30)
+ if (DISPLAY_VER(display) >= 30)
interm_pbpl += (pan_x != 0);
else if (DISPLAY_VER(i915) >= 10)
interm_pbpl++;
@@ -1868,6 +1922,13 @@ static int skl_wm_max_lines(struct drm_i915_private *i915)
return 31;
}
+static bool xe3_auto_min_alloc_capable(struct intel_plane *plane, int level)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ return DISPLAY_VER(display) >= 30 && level == 0 && plane->id != PLANE_CURSOR;
+}
+
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane,
int level,
@@ -2000,6 +2061,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
result->enable = true;
+ result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
result->can_sagv = latency >= i915->display.sagv.block_time_us;
@@ -2379,16 +2441,18 @@ static bool skl_wm_level_equals(const struct skl_wm_level *l1,
return l1->enable == l2->enable &&
l1->ignore_lines == l2->ignore_lines &&
l1->lines == l2->lines &&
- l1->blocks == l2->blocks;
+ l1->blocks == l2->blocks &&
+ l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
}
static bool skl_plane_wm_equals(struct drm_i915_private *i915,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
+ struct intel_display *display = &i915->display;
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2496,6 +2560,7 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
struct intel_dbuf_state *new_dbuf_state = NULL;
@@ -2524,7 +2589,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- if (HAS_MBUS_JOINING(i915)) {
+ if (HAS_MBUS_JOINING(display)) {
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
@@ -2742,10 +2807,10 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
const struct skl_pipe_wm *old_pipe_wm,
const struct skl_pipe_wm *new_pipe_wm)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2756,7 +2821,7 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
return false;
}
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
@@ -2847,32 +2912,58 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
* Program DEEP PKG_C_LATENCY Pkg C with all 1's.
* Program PKG_C_LATENCY Added Wake Time = 0
*/
-static void
-skl_program_dpkgc_latency(struct drm_i915_private *i915, bool enable_dpkgc)
+void
+intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
- u32 max_latency = 0;
- u32 clear = 0, val = 0;
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ u32 latency = LNL_PKG_C_LATENCY_MASK;
u32 added_wake_time = 0;
+ u32 max_linetime = 0;
+ u32 clear, val;
+ bool fixed_refresh_rate = false;
+ int i;
- if (DISPLAY_VER(i915) < 20)
+ if (DISPLAY_VER(display) < 20)
return;
- if (enable_dpkgc) {
- max_latency = skl_watermark_max_latency(i915, 1);
- if (max_latency == 0)
- max_latency = LNL_PKG_C_LATENCY_MASK;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->vrr.enable ||
+ (new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
+ new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline))
+ fixed_refresh_rate = true;
+
+ max_linetime = max(new_crtc_state->linetime, max_linetime);
+ }
+
+ if (fixed_refresh_rate) {
added_wake_time = DSB_EXE_TIME +
- i915->display.sagv.block_time_us;
- } else {
- max_latency = LNL_PKG_C_LATENCY_MASK;
- added_wake_time = 0;
+ display->sagv.block_time_us;
+
+ latency = skl_watermark_max_latency(i915, 1);
+
+ /* Wa_22020432604 */
+ if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
+ latency += added_wake_time;
+ added_wake_time = 0;
+ }
+
+ /* Wa_22020299601 */
+ if ((latency && max_linetime) &&
+ (DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30)) {
+ latency = max_linetime * DIV_ROUND_UP(latency, max_linetime);
+ } else if (!latency) {
+ latency = LNL_PKG_C_LATENCY_MASK;
+ }
}
- clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
- val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency);
- val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
+ clear = LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
+ val = REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency) |
+ REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
- intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val);
+ intel_de_rmw(display, LNL_PKG_C_LATENCY, clear, val);
}
static int
@@ -2881,7 +2972,6 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
- bool enable_dpkgc = false;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
@@ -2906,32 +2996,28 @@ skl_compute_wm(struct intel_atomic_state *state)
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
-
- if ((new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
- new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline) ||
- !new_crtc_state->vrr.enable)
- enable_dpkgc = true;
}
- skl_program_dpkgc_latency(to_i915(state->base.dev), enable_dpkgc);
-
skl_print_wm_changes(state);
return 0;
}
-static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
+static void skl_wm_level_from_reg_val(struct intel_display *display,
+ u32 val, struct skl_wm_level *level)
{
level->enable = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
+ level->auto_min_alloc_wm_enable = DISPLAY_VER(display) >= 30 ?
+ val & PLANE_WM_AUTO_MIN_ALLOC_EN : 0;
}
static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
int level;
@@ -2940,37 +3026,37 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
+ val = intel_de_read(display, PLANE_WM(pipe, plane_id, level));
else
- val = intel_de_read(i915, CUR_WM(pipe, level));
+ val = intel_de_read(display, CUR_WM(pipe, level));
- skl_wm_level_from_reg_val(val, &wm->wm[level]);
+ skl_wm_level_from_reg_val(display, val, &wm->wm[level]);
}
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_TRANS(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_TRANS(pipe));
+ val = intel_de_read(display, CUR_WM_TRANS(pipe));
- skl_wm_level_from_reg_val(val, &wm->trans_wm);
+ skl_wm_level_from_reg_val(display, val, &wm->trans_wm);
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_SAGV(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_SAGV(pipe));
+ val = intel_de_read(display, CUR_WM_SAGV(pipe));
- skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+ skl_wm_level_from_reg_val(display, val, &wm->sagv.wm0);
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_SAGV_TRANS(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
+ val = intel_de_read(display, CUR_WM_SAGV_TRANS(pipe));
- skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
- } else if (DISPLAY_VER(i915) >= 12) {
+ skl_wm_level_from_reg_val(display, val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(display) >= 12) {
wm->sagv.wm0 = wm->wm[0];
wm->sagv.trans_wm = wm->trans_wm;
}
@@ -2984,12 +3070,12 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_crtc *crtc;
- if (HAS_MBUS_JOINING(i915))
- dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+ if (HAS_MBUS_JOINING(display))
+ dbuf_state->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN;
dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(display, &display->cdclk.hw);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -3010,12 +3096,17 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
&crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *min_ddb =
+ &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
if (!crtc_state->hw.active)
continue;
skl_ddb_get_hw_plane_state(i915, crtc->pipe,
- plane_id, ddb, ddb_y);
+ plane_id, ddb, ddb_y,
+ min_ddb, interim_ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
@@ -3037,7 +3128,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->slices[pipe] =
skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
@@ -3045,203 +3136,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
str_yes_no(dbuf_state->joined_mbus));
}
- dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
-}
-
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
-{
- const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
- struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- entries[crtc->pipe] = crtc_state->wm.skl.ddb;
- }
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- u8 slices;
-
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- if (dbuf_state->slices[crtc->pipe] & ~slices)
- return true;
-
- if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
- I915_MAX_PIPES, crtc->pipe))
- return true;
- }
-
- return false;
-}
-
-static void skl_wm_sanitize(struct drm_i915_private *i915)
-{
- struct intel_crtc *crtc;
-
- /*
- * On TGL/RKL (at least) the BIOS likes to assign the planes
- * to the wrong DBUF slices. This will cause an infinite loop
- * in skl_commit_modeset_enables() as it can't find a way to
- * transition between the old bogus DBUF layout to the new
- * proper DBUF layout without DBUF allocation overlaps between
- * the planes (which cannot be allowed or else the hardware
- * may hang). If we detect a bogus DBUF layout just turn off
- * all the planes so that skl_commit_modeset_enables() can
- * simply ignore them.
- */
- if (!skl_dbuf_is_misconfigured(i915))
- return;
-
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (plane_state->uapi.visible)
- intel_plane_disable_noatomic(crtc, plane);
-
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
-
- memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
- }
-}
-
-static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
-{
- skl_wm_get_hw_state(i915);
- skl_wm_sanitize(i915);
-}
-
-void intel_wm_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_hw_state {
- struct skl_ddb_entry ddb[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_pipe_wm wm;
- } *hw;
- const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- struct intel_plane *plane;
- u8 hw_enabled_slices;
- int level;
-
- if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
- return;
-
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return;
-
- skl_pipe_wm_get_hw_state(crtc, &hw->wm);
-
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
-
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
-
- if (DISPLAY_VER(i915) >= 11 &&
- hw_enabled_slices != i915->display.dbuf.enabled_slices)
- drm_err(&i915->drm,
- "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- i915->display.dbuf.enabled_slices,
- hw_enabled_slices);
-
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
- const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- const struct skl_wm_level *hw_wm_level, *sw_wm_level;
-
- /* Watermarks */
- for (level = 0; level < i915->display.wm.num_levels; level++) {
- hw_wm_level = &hw->wm.planes[plane->id].wm[level];
- sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
-
- if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
- continue;
-
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name, level,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
- sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
-
- if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
-
- if (HAS_HW_SAGV_WM(i915) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
-
- if (HAS_HW_SAGV_WM(i915) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
- plane->base.base.id, plane->base.name,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- kfree(hw);
+ dbuf_state->enabled_slices = display->dbuf.enabled_slices;
}
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
@@ -3386,31 +3281,19 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
static void skl_setup_wm_latency(struct drm_i915_private *i915)
{
- if (HAS_HW_SAGV_WM(i915))
- i915->display.wm.num_levels = 6;
- else
- i915->display.wm.num_levels = 8;
+ struct intel_display *display = &i915->display;
- if (DISPLAY_VER(i915) >= 14)
- mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
+ if (HAS_HW_SAGV_WM(display))
+ display->wm.num_levels = 6;
else
- skl_read_wm_latency(i915, i915->display.wm.skl_latency);
-
- intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
-}
-
-static const struct intel_wm_funcs skl_wm_funcs = {
- .compute_global_watermarks = skl_compute_wm,
- .get_hw_state = skl_wm_get_hw_state_and_sanitize,
-};
-
-void skl_wm_init(struct drm_i915_private *i915)
-{
- intel_sagv_init(i915);
+ display->wm.num_levels = 8;
- skl_setup_wm_latency(i915);
+ if (DISPLAY_VER(display) >= 14)
+ mtl_read_wm_latency(i915, display->wm.skl_latency);
+ else
+ skl_read_wm_latency(i915, display->wm.skl_latency);
- i915->display.funcs.wm = &skl_wm_funcs;
+ intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3450,13 +3333,14 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
int intel_dbuf_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state;
dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
if (!dbuf_state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
+ intel_atomic_global_obj_init(display, &display->dbuf.obj,
&dbuf_state->base, &intel_dbuf_funcs);
return 0;
@@ -3466,38 +3350,27 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
{
switch (pipe) {
case PIPE_A:
- return !(active_pipes & BIT(PIPE_D));
case PIPE_D:
- return !(active_pipes & BIT(PIPE_A));
+ active_pipes &= BIT(PIPE_A) | BIT(PIPE_D);
+ break;
case PIPE_B:
- return !(active_pipes & BIT(PIPE_C));
case PIPE_C:
- return !(active_pipes & BIT(PIPE_B));
+ active_pipes &= BIT(PIPE_B) | BIT(PIPE_C);
+ break;
default: /* to suppress compiler warning */
MISSING_CASE(pipe);
- break;
+ return false;
}
- return false;
+ return is_power_of_2(active_pipes);
}
-static void intel_mbus_dbox_update(struct intel_atomic_state *state)
+static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
+ const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc *crtc;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 val = 0;
- if (DISPLAY_VER(i915) < 11)
- return;
-
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (!new_dbuf_state ||
- (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
- new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
- return;
-
if (DISPLAY_VER(i915) >= 14)
val |= MBUS_DBOX_I_CREDIT(2);
@@ -3508,12 +3381,12 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
}
if (DISPLAY_VER(i915) >= 14)
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
- MBUS_DBOX_A_CREDIT(8);
+ val |= dbuf_state->joined_mbus ?
+ MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
else if (IS_ALDERLAKE_P(i915))
/* Wa_22010947358:adl-p */
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
- MBUS_DBOX_A_CREDIT(4);
+ val |= dbuf_state->joined_mbus ?
+ MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
else
val |= MBUS_DBOX_A_CREDIT(2);
@@ -3530,19 +3403,42 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
val |= MBUS_DBOX_B_CREDIT(8);
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) {
- u32 pipe_val = val;
+ if (DISPLAY_VERx100(i915) == 1400) {
+ if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
+ val |= MBUS_DBOX_BW_8CREDITS_MTL;
+ else
+ val |= MBUS_DBOX_BW_4CREDITS_MTL;
+ }
- if (DISPLAY_VERx100(i915) == 1400) {
- if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
- new_dbuf_state->active_pipes))
- pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
- else
- pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
- }
+ return val;
+}
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
- }
+static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915,
+ const struct intel_dbuf_state *dbuf_state)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes)
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe),
+ pipe_mbus_dbox_ctl(crtc, dbuf_state));
+}
+
+static void intel_mbus_dbox_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ pipe_mbus_dbox_ctl_update(i915, new_dbuf_state);
}
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -3562,23 +3458,24 @@ int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
int ratio, bool joined_mbus)
{
+ struct intel_display *display = &i915->display;
enum dbuf_slice slice;
- if (!HAS_MBUS_JOINING(i915))
+ if (!HAS_MBUS_JOINING(display))
return;
- if (DISPLAY_VER(i915) >= 20)
- intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_rmw(display, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
if (joined_mbus)
ratio *= 2;
- drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
+ drm_dbg_kms(display->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
ratio, str_yes_no(joined_mbus));
- for_each_dbuf_slice(i915, slice)
- intel_de_rmw(i915, DBUF_CTL_S(slice),
+ for_each_dbuf_slice(display, slice)
+ intel_de_rmw(display, DBUF_CTL_S(slice),
DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
}
@@ -3625,22 +3522,13 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
return INVALID_PIPE;
}
-static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
- enum pipe pipe)
+static void mbus_ctl_join_update(struct drm_i915_private *i915,
+ const struct intel_dbuf_state *dbuf_state,
+ enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
u32 mbus_ctl;
- drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
- str_yes_no(old_dbuf_state->joined_mbus),
- str_yes_no(new_dbuf_state->joined_mbus),
- pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
-
- if (new_dbuf_state->joined_mbus)
+ if (dbuf_state->joined_mbus)
mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
else
mbus_ctl = MBUS_HASHING_MODE_2x2;
@@ -3655,6 +3543,23 @@ static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
}
+static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
+ enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus),
+ pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
+
+ mbus_ctl_join_update(i915, new_dbuf_state, pipe);
+}
+
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
{
const struct intel_dbuf_state *new_dbuf_state =
@@ -3757,6 +3662,245 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(i915, new_slices);
}
+static void skl_mbus_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_display *display = &i915->display;
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(display->dbuf.obj.state);
+
+ if (!HAS_MBUS_JOINING(display))
+ return;
+
+ if (!dbuf_state->joined_mbus ||
+ adlp_check_mbus_joined(dbuf_state->active_pipes))
+ return;
+
+ drm_dbg_kms(display->drm, "Disabling redundant MBUS joining (active pipes 0x%x)\n",
+ dbuf_state->active_pipes);
+
+ dbuf_state->joined_mbus = false;
+ intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ dbuf_state->mdclk_cdclk_ratio,
+ dbuf_state->joined_mbus);
+ pipe_mbus_dbox_ctl_update(i915, dbuf_state);
+ mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE);
+}
+
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_dbuf_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
+static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ skl_wm_get_hw_state(i915);
+
+ skl_mbus_sanitize(i915);
+ skl_dbuf_sanitize(i915);
+}
+
+void intel_wm_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ u16 min_ddb[I915_MAX_PLANES];
+ u16 interim_ddb[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+ int level;
+
+ if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ return;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y, hw->min_ddb, hw->interim_ddb);
+
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ hw_enabled_slices != i915->display.dbuf.enabled_slices)
+ drm_err(&i915->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ i915->display.dbuf.enabled_slices,
+ hw_enabled_slices);
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
+
+ /* Watermarks */
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
+
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(display) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(display) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
+ }
+
+ kfree(hw);
+}
+
+static const struct intel_wm_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+ .get_hw_state = skl_wm_get_hw_state_and_sanitize,
+};
+
+void skl_wm_init(struct drm_i915_private *i915)
+{
+ intel_sagv_init(i915);
+
+ skl_setup_wm_latency(i915);
+
+ i915->display.funcs.wm = &skl_wm_funcs;
+}
+
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = m->private;
@@ -3830,13 +3974,14 @@ DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
void skl_watermark_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct intel_display *display = &i915->display;
+ struct drm_minor *minor = display->drm->primary;
- if (HAS_IPC(i915))
+ if (HAS_IPC(display))
debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
&skl_watermark_ipc_status_fops);
- if (HAS_SAGV(i915))
+ if (HAS_SAGV(display))
debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
&intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index e73baec94873..8659f89427f2 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -18,6 +18,7 @@ struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
+struct intel_plane_state;
struct skl_pipe_wm;
struct skl_wm_level;
@@ -53,6 +54,9 @@ const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
int level);
const struct skl_wm_level *skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id);
+unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane, int width,
+ int height, int cpp);
struct intel_dbuf_state {
struct intel_global_state base;
@@ -87,6 +91,7 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
int ratio, bool joined_mbus);
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
+void intel_program_dpkgc_latency(struct intel_atomic_state *state);
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 9383eedee2d4..d49e9b3c7627 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -67,9 +67,8 @@ static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
(bpp * burst_mode_ratio));
}
-enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+static enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
{
- /* It just so happens the VBT matches register contents. */
switch (fmt) {
case VID_MODE_FORMAT_RGB888:
return MIPI_DSI_FMT_RGB888;
@@ -1760,6 +1759,31 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
intel_dsi_log_params(intel_dsi);
}
+int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ /*
+ * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+ * than 320000KHz.
+ */
+ if (IS_VALLEYVIEW(dev_priv))
+ return 320000;
+
+ /*
+ * On Geminilake once the CDCLK gets as low as 79200
+ * picture gets unstable, despite that values are
+ * correct for DSI PLL and DE PLL.
+ */
+ if (IS_GEMINILAKE(dev_priv))
+ return 158400;
+
+ return 0;
+}
+
typedef void (*vlv_dsi_dmi_quirk_func)(struct intel_dsi *intel_dsi);
/*
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
index cf9d7b82f288..277bacfbc551 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -6,21 +6,20 @@
#ifndef __VLV_DSI_H__
#define __VLV_DSI_H__
-#include <linux/types.h>
-
enum port;
struct drm_i915_private;
+struct intel_crtc_state;
struct intel_dsi;
#ifdef I915
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
-enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state);
void vlv_dsi_init(struct drm_i915_private *dev_priv);
#else
static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
}
-static inline enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 3198b64ad7db..388f90784d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -53,29 +53,6 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
}
/**
- * __i915_gem_object_is_lmem - Whether the object is resident in
- * lmem while in the fence signaling critical path.
- * @obj: The object to check.
- *
- * This function is intended to be called from within the fence signaling
- * path where the fence, or a pin, keeps the object from being migrated. For
- * example during gpu reset or similar.
- *
- * Return: Whether the object is resident in lmem.
- */
-bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
-{
- struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
-
-#ifdef CONFIG_LOCKDEP
- GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
- i915_gem_object_evictable(obj));
-#endif
- return mr && (mr->type == INTEL_MEMORY_LOCAL ||
- mr->type == INTEL_MEMORY_STOLEN_LOCAL);
-}
-
-/**
* __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
* minimum page size for the backing pages.
* @i915: The i915 instance.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 5a7a14e85c3f..ecd8f1a633a1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -19,8 +19,6 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
-bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
-
struct drm_i915_gem_object *
i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
const void *data, size_t size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3dc61cbd2e11..bb713e096db2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -283,9 +283,7 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- /* TODO: make DPT shrinkable when it has no bound vmas */
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
- !obj->is_dpt;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 89d4dc8b60c6..eb0158e43417 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -369,7 +369,7 @@ static int live_parallel_switch(void *arg)
if (!data[n].ce[0])
continue;
- worker = kthread_create_worker(0, "igt/parallel:%s",
+ worker = kthread_run_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 40269e4c1e31..325da0414d94 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -126,9 +126,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
return active;
}
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 72090f52fb85..4a80ffa1b962 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -405,15 +405,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
return active;
}
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
-
- return __unwind_incomplete_requests(engine);
-}
-
static void
execlists_context_status_change(struct i915_request *rq, unsigned long status)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index d60a6ca0cae5..f6c59f20832f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -107,11 +107,12 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
+ * @evict_all: Evict all VMAs
*
* Suspend the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
*/
-void i915_ggtt_suspend_vm(struct i915_address_space *vm)
+void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all)
{
struct i915_vma *vma, *vn;
int save_skip_rewrite;
@@ -157,7 +158,7 @@ retry:
goto retry;
}
- if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ if (evict_all || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
i915_vma_wait_for_bind(vma);
__i915_vma_evict(vma, false);
@@ -172,13 +173,15 @@ retry:
vm->skip_pte_rewrite = save_skip_rewrite;
mutex_unlock(&vm->mutex);
+
+ drm_WARN_ON(&vm->i915->drm, evict_all && !list_empty(&vm->bound_list));
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
struct intel_gt *gt;
- i915_ggtt_suspend_vm(&ggtt->vm);
+ i915_ggtt_suspend_vm(&ggtt->vm, false);
ggtt->invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
@@ -1545,6 +1548,7 @@ int i915_ggtt_enable_hw(struct drm_i915_private *i915)
/**
* i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
* @vm: The VM to restore the mappings for
+ * @all_evicted: Were all VMAs expected to be evicted on suspend?
*
* Restore the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
@@ -1552,13 +1556,18 @@ int i915_ggtt_enable_hw(struct drm_i915_private *i915)
* Returns %true if restoring the mapping for any object that was in a write
* domain before suspend.
*/
-bool i915_ggtt_resume_vm(struct i915_address_space *vm)
+bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted)
{
struct i915_vma *vma;
bool write_domain_objs = false;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+ if (all_evicted) {
+ drm_WARN_ON(&vm->i915->drm, !list_empty(&vm->bound_list));
+ return false;
+ }
+
/* First fill our portion of the GTT with scratch pages */
vm->clear_range(vm, 0, vm->total);
@@ -1598,7 +1607,7 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
intel_gt_check_and_clear_faults(gt);
- flush = i915_ggtt_resume_vm(&ggtt->vm);
+ flush = i915_ggtt_resume_vm(&ggtt->vm, false);
if (drm_mm_node_allocated(&ggtt->error_capture))
ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 6b85222ee3ea..0a36ea751b63 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -608,8 +608,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
-void i915_ggtt_suspend_vm(struct i915_address_space *vm);
-bool i915_ggtt_resume_vm(struct i915_address_space *vm);
+void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all);
+bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index f42f21632306..aae5a081cb53 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1113,6 +1113,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* Warn CI about the unrecoverable wedged condition.
* Time for a reboot.
*/
+ gt_err(gt, "Unrecoverable wedged condition\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
return false;
}
@@ -1198,6 +1199,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason)
{
+ struct intel_display *display = &gt->i915->display;
intel_engine_mask_t awake;
int ret;
@@ -1243,7 +1245,7 @@ void intel_gt_reset(struct intel_gt *gt,
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
intel_irq_resume(gt->i915);
- intel_overlay_reset(gt->i915);
+ intel_overlay_reset(display);
/* sanitize uC after engine reset */
if (!intel_uc_uses_guc_submission(&gt->uc))
@@ -1263,8 +1265,10 @@ void intel_gt_reset(struct intel_gt *gt,
}
ret = resume(gt);
- if (ret)
+ if (ret) {
+ gt_err(gt, "Failed to resume (%d)\n", ret);
goto taint;
+ }
finish:
reset_finish(gt, awake);
@@ -1607,6 +1611,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
/* Wedged on init is non-recoverable */
+ gt_err(gt, "Non-recoverable wedged on init\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 59da4b7bd262..b74d9205c0f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -308,30 +308,6 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
return cs;
}
-/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct i915_request *rq)
-{
- int num_dwords;
- void *cs;
-
- num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
- if (num_dwords == 0)
- return 0;
-
- num_dwords = CACHELINE_DWORDS - num_dwords;
- GEM_BUG_ON(num_dwords & 1);
-
- cs = intel_ring_begin(rq, num_dwords);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
- intel_ring_advance(rq, cs + num_dwords);
-
- GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
- return 0;
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_ring.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index 1b32dadfb8c3..64b322e25f36 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -16,7 +16,6 @@ struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
-int intel_ring_cacheline_align(struct i915_request *rq);
unsigned int intel_ring_update_space(struct intel_ring *ring);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 32f3b52a183a..458e29d89978 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -26,6 +26,7 @@
#include "shmem_utils.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "intel_gt_print.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
@@ -230,8 +231,13 @@ static int xcs_resume(struct intel_engine_cs *engine)
set_pp_dir(engine);
- /* First wake the ring up to an empty/idle ring */
- for ((kt) = ktime_get() + (2 * NSEC_PER_MSEC);
+ /*
+ * First wake the ring up to an empty/idle ring.
+ * Use 50ms of delay to let the engine write successfully
+ * for all platforms. Experimented with different values and
+ * determined that 50ms works best based on testing.
+ */
+ for ((kt) = ktime_get() + (50 * NSEC_PER_MSEC);
ktime_before(ktime_get(), (kt)); cpu_relax()) {
/*
* In case of resets fails because engine resumes from
@@ -282,16 +288,16 @@ static int xcs_resume(struct intel_engine_cs *engine)
return 0;
err:
- drm_err(&engine->i915->drm,
- "%s initialization failed; "
- "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_CTL) & RING_VALID,
- ENGINE_READ(engine, RING_HEAD), ring->head,
- ENGINE_READ(engine, RING_TAIL), ring->tail,
- ENGINE_READ(engine, RING_START),
- i915_ggtt_offset(ring->vma));
+ gt_err(engine->gt, "%s initialization failed\n", engine->name);
+ ENGINE_TRACE(engine,
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ GEM_TRACE_DUMP();
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 222ca7c44951..81c31396eceb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -3574,7 +3574,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
arg[id].batch = NULL;
arg[id].count = 0;
- worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
+ worker[id] = kthread_run_worker(0, "igt/smoke:%d", id);
if (IS_ERR(worker[id])) {
err = PTR_ERR(worker[id]);
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 9ce8ff1c04fe..9d3aeb237295 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1025,7 +1025,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
threads[tmp].engine = other;
threads[tmp].flags = flags;
- worker = kthread_create_worker(0, "igt/%s",
+ worker = kthread_run_worker(0, "igt/%s",
other->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index ca460cee4f8b..1bf7b88d9a9d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -262,7 +262,7 @@ static int clear(struct intel_migrate *migrate,
{
struct drm_i915_private *i915 = migrate->context->engine->i915;
struct drm_i915_gem_object *obj;
- struct i915_request *rq;
+ struct i915_request *rq = NULL;
struct i915_gem_ww_ctx ww;
u32 *vaddr, val = 0;
bool ccs_cap = false;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 1aa1446c8fb0..27b6d51ef145 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -8,6 +8,7 @@
#include "intel_gpu_commands.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
+#include "intel_rps.h"
#include "selftest_rc6.h"
#include "selftests/i915_random.h"
@@ -38,6 +39,9 @@ int live_rc6_manual(void *arg)
ktime_t dt;
u64 res[2];
int err = 0;
+ u32 rc0_freq = 0;
+ u32 rc6_freq = 0;
+ struct intel_rps *rps = &gt->rps;
/*
* Our claim is that we can "encourage" the GPU to enter rc6 at will.
@@ -66,6 +70,7 @@ int live_rc6_manual(void *arg)
rc0_power = librapl_energy_uJ() - rc0_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
+ rc0_freq = intel_rps_read_actual_frequency_fw(rps);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 1000ms!\n",
(res[1] - res[0]) >> 10);
@@ -77,7 +82,11 @@ int live_rc6_manual(void *arg)
rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
ktime_to_ns(dt));
if (!rc0_power) {
- pr_err("No power measured while in RC0\n");
+ if (rc0_freq)
+ pr_debug("No power measured while in RC0! GPU Freq: %u in RC0\n",
+ rc0_freq);
+ else
+ pr_err("No power and freq measured while in RC0\n");
err = -EINVAL;
goto out_unlock;
}
@@ -90,7 +99,8 @@ int live_rc6_manual(void *arg)
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
dt = ktime_get();
rc6_power = librapl_energy_uJ();
- msleep(100);
+ msleep(1000);
+ rc6_freq = intel_rps_read_actual_frequency_fw(rps);
rc6_power = librapl_energy_uJ() - rc6_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
@@ -108,7 +118,8 @@ int live_rc6_manual(void *arg)
pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
rc0_power, rc6_power);
if (2 * rc6_power > rc0_power) {
- pr_err("GPU leaked energy while in RC6!\n");
+ pr_err("GPU leaked energy while in RC6! GPU Freq: %u in RC6 and %u in RC0\n",
+ rc6_freq, rc0_freq);
err = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index dcef8d498919..c207a4fb03bf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -1125,6 +1125,7 @@ static u64 measure_power(struct intel_rps *rps, int *freq)
static u64 measure_power_at(struct intel_rps *rps, int *freq)
{
*freq = rps_set_check(rps, *freq);
+ msleep(100);
return measure_power(rps, freq);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 4ecc4ae74a54..e218b229681f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -489,7 +489,7 @@ static int live_slpc_tile_interaction(void *arg)
return -ENOMEM;
for_each_gt(gt, i915, i) {
- threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
+ threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id);
if (IS_ERR(threads[i].worker)) {
ret = PTR_ERR(threads[i].worker);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 222c95f62156..e8a04e476c57 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -18,7 +18,7 @@
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
-#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#elif IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index c0bd730383f2..12f1ba7ca9c1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1725,6 +1725,10 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
spin_lock_irq(guc_to_gt(guc)->irq_lock);
spin_unlock_irq(guc_to_gt(guc)->irq_lock);
+ /* Flush tasklet */
+ tasklet_disable(&guc->ct.receive_tasklet);
+ tasklet_enable(&guc->ct.receive_tasklet);
+
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
flush_work(&guc->ct.requests.worker);
@@ -2042,6 +2046,8 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
+ int outstanding;
+
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
@@ -2055,8 +2061,10 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
* see in CI if this happens frequently / a precursor to taking down the
* machine.
*/
- if (atomic_read(&guc->outstanding_submission_g2h))
- guc_err(guc, "Unexpected outstanding GuC to Host in reset finish\n");
+ outstanding = atomic_read(&guc->outstanding_submission_g2h);
+ if (outstanding)
+ guc_err(guc, "Unexpected outstanding GuC to Host response(s) in reset finish: %d\n",
+ outstanding);
atomic_set(&guc->outstanding_submission_g2h, 0);
intel_guc_global_policies_update(guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d7ac31c3254c..b3cbf85c00cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -427,19 +427,6 @@ void intel_huc_fini(struct intel_huc *huc)
intel_uc_fw_fini(&huc->fw);
}
-void intel_huc_suspend(struct intel_huc *huc)
-{
- if (!intel_uc_fw_is_loadable(&huc->fw))
- return;
-
- /*
- * in the unlikely case that we're suspending before the GSC has
- * completed its loading sequence, just stop waiting. We'll restart
- * on resume.
- */
- delayed_huc_load_complete(huc);
-}
-
static const char *auth_mode_string(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index ba5cb08e9e7b..d5e441b9e08d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -57,7 +57,6 @@ int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
-void intel_huc_suspend(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type);
int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
enum intel_huc_authentication_type type);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 81d67a46cd9e..6439c8e91a8d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1286,6 +1286,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
+ struct intel_display *display = &dev_priv->display;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1314,9 +1315,9 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
if (info->plane == PLANE_A) {
- info->ctrl_reg = DSPCNTR(dev_priv, info->pipe);
- info->stride_reg = DSPSTRIDE(dev_priv, info->pipe);
- info->surf_reg = DSPSURF(dev_priv, info->pipe);
+ info->ctrl_reg = DSPCNTR(display, info->pipe);
+ info->stride_reg = DSPSTRIDE(display, info->pipe);
+ info->surf_reg = DSPSURF(display, info->pipe);
} else if (info->plane == PLANE_B) {
info->ctrl_reg = SPRCTL(info->pipe);
info->stride_reg = SPRSTRIDE(info->pipe);
@@ -1332,6 +1333,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
+ struct intel_display *display = &dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1380,9 +1382,9 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
- info->ctrl_reg = DSPCNTR(dev_priv, info->pipe);
- info->stride_reg = DSPSTRIDE(dev_priv, info->pipe);
- info->surf_reg = DSPSURF(dev_priv, info->pipe);
+ info->ctrl_reg = DSPCNTR(display, info->pipe);
+ info->stride_reg = DSPSTRIDE(display, info->pipe);
+ info->surf_reg = DSPSURF(display, info->pipe);
return 0;
}
@@ -1419,6 +1421,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
+ struct intel_display *display = &dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
@@ -1436,7 +1439,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
}
if (info->plane == PLANE_PRIMARY)
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, info->pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, info->pipe))++;
if (info->async_flip)
intel_vgpu_trigger_virtual_event(vgpu, info->event);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 17f74cb244bb..95570cabdf27 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -40,6 +40,7 @@
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
+#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display.h"
#include "display/intel_dpio_phy.h"
@@ -68,8 +69,9 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
- if (!(vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -80,12 +82,13 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
if (drm_WARN_ON(&dev_priv->drm,
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, pipe)) & TRANSCONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) & TRANSCONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -180,6 +183,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -192,21 +196,21 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) |
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
- for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, pipe)) &=
+ for_each_pipe(display, pipe) {
+ vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) &=
~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE);
- vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) &= ~DISP_ENABLE;
+ vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) &= ~MCURSOR_MODE_MASK;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) |= MCURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE;
}
for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) {
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, trans)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, trans)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE);
}
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
@@ -254,8 +258,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* TRANSCODER_A can be enabled. PORT_x depends on the input of
* setup_virtual_dp_monitor.
*/
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_ENABLE;
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
/*
* Golden M/N are calculated based on:
@@ -263,11 +267,11 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* DP link clk 1620 MHz and non-constant_n.
* TODO: calculate DP link symbol clk and stream clk m/n.
*/
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) = TU_SIZE(64);
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) |= 0x5b425e;
- vgpu_vreg_t(vgpu, PIPE_DATA_N1(dev_priv, TRANSCODER_A)) = 0x800000;
- vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A)) = 0x3cd6e;
- vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A)) = 0x80000;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64);
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000;
/* Enable per-DDI/PORT vreg */
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
@@ -290,7 +294,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP)) |=
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
@@ -320,7 +324,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -351,7 +355,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -400,11 +404,11 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* DP link clk 1620 MHz and non-constant_n.
* TODO: calculate DP link symbol clk and stream clk m/n.
*/
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) = TU_SIZE(64);
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) |= 0x5b425e;
- vgpu_vreg_t(vgpu, PIPE_DATA_N1(dev_priv, TRANSCODER_A)) = 0x800000;
- vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A)) = 0x3cd6e;
- vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A)) = 0x80000;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64);
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
@@ -415,10 +419,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -441,10 +445,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -467,10 +471,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -508,14 +512,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
/* Disable Primary/Sprite/Cursor plane */
- for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) &= ~DISP_ENABLE;
+ for_each_pipe(display, pipe) {
+ vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) &= ~MCURSOR_MODE_MASK;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) |= MCURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE;
}
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -631,6 +635,7 @@ void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -652,17 +657,19 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
}
if (pipe_is_enabled(vgpu, pipe)) {
- vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(dev_priv, pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(display, pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
}
}
void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ struct intel_display *display = &i915->display;
int pipe;
mutex_lock(&vgpu->vgpu_lock);
- for_each_pipe(vgpu->gvt->gt->i915, pipe)
+ for_each_pipe(display, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index c454e25b2b0f..15cce973e1ae 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -154,8 +154,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
- u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(dev_priv, pipe)) & stride_mask;
+ u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(display, pipe)) & stride_mask;
u32 stride = stride_reg;
if (GRAPHICS_VER(dev_priv) >= 9) {
@@ -210,6 +211,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
u32 val, fmt;
int pipe;
@@ -217,7 +219,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES)
return -ENODEV;
- val = vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, DSPCNTR(display, pipe));
plane->enabled = !!(val & DISP_ENABLE);
if (!plane->enabled)
return -ENODEV;
@@ -251,7 +253,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
- plane->base = vgpu_vreg_t(vgpu, DSPSURF(dev_priv, pipe)) & I915_GTT_PAGE_MASK;
+ plane->base = vgpu_vreg_t(vgpu, DSPSURF(display, pipe)) & I915_GTT_PAGE_MASK;
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
@@ -267,14 +269,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
- plane->width = (vgpu_vreg_t(vgpu, PIPESRC(dev_priv, pipe)) & _PIPE_H_SRCSZ_MASK) >>
+ plane->width = (vgpu_vreg_t(vgpu, PIPESRC(display, pipe)) & _PIPE_H_SRCSZ_MASK) >>
_PIPE_H_SRCSZ_SHIFT;
plane->width += 1;
- plane->height = (vgpu_vreg_t(vgpu, PIPESRC(dev_priv, pipe)) &
+ plane->height = (vgpu_vreg_t(vgpu, PIPESRC(display, pipe)) &
_PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
plane->height += 1; /* raw height is one minus the real value */
- val = vgpu_vreg_t(vgpu, DSPTILEOFF(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, DSPTILEOFF(display, pipe));
plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
_PRI_PLANE_X_OFF_SHIFT;
plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
@@ -340,6 +342,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
int pipe;
@@ -348,7 +351,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES)
return -ENODEV;
- val = vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, CURCNTR(display, pipe));
mode = val & MCURSOR_MODE_MASK;
plane->enabled = (mode != MCURSOR_MODE_DISABLE);
if (!plane->enabled)
@@ -374,7 +377,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
alpha_plane, alpha_force);
- plane->base = vgpu_vreg_t(vgpu, CURBASE(dev_priv, pipe)) & I915_GTT_PAGE_MASK;
+ plane->base = vgpu_vreg_t(vgpu, CURBASE(display, pipe)) & I915_GTT_PAGE_MASK;
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
@@ -385,7 +388,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -EINVAL;
}
- val = vgpu_vreg_t(vgpu, CURPOS(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, CURPOS(display, pipe));
plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 9494d812c00a..241cff0fc683 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -45,6 +45,7 @@
#include "intel_mchbar_regs.h"
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
+#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
@@ -655,11 +656,12 @@ static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
enum port port;
u32 dp_br, link_m, link_n, htotal, vtotal;
/* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
- port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &
+ port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &
TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
if (port != PORT_B && port != PORT_D) {
gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
@@ -675,12 +677,12 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
/* Get DP link symbol clock M/N */
- link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A));
- link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A));
+ link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A));
+ link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A));
/* Get H/V total from transcoder timing */
- htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(dev_priv, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
- vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(dev_priv, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
+ htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(display, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
+ vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(display, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
if (dp_br && link_n && htotal && vtotal) {
u64 pixel_clk = 0;
@@ -1011,22 +1013,23 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
return 0;
}
-#define DSPSURF_TO_PIPE(dev_priv, offset) \
- calc_index(offset, DSPSURF(dev_priv, PIPE_A), DSPSURF(dev_priv, PIPE_B), DSPSURF(dev_priv, PIPE_C))
+#define DSPSURF_TO_PIPE(display, offset) \
+ calc_index(offset, DSPSURF(display, PIPE_A), DSPSURF(display, PIPE_B), DSPSURF(display, PIPE_C))
static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- u32 pipe = DSPSURF_TO_PIPE(dev_priv, offset);
+ struct intel_display *display = &dev_priv->display;
+ u32 pipe = DSPSURF_TO_PIPE(display, offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, DSPSURFLIVE(dev_priv, pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
- if (vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) & PLANE_CTL_ASYNC_FLIP)
+ if (vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) & PLANE_CTL_ASYNC_FLIP)
intel_vgpu_trigger_virtual_event(vgpu, event);
else
set_bit(event, vgpu->irq.flip_done_event[pipe]);
@@ -1059,14 +1062,15 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
write_vreg(vgpu, offset, p_data, bytes);
if (plane == PLANE_PRIMARY) {
- vgpu_vreg_t(vgpu, DSPSURFLIVE(dev_priv, pipe)) = vgpu_vreg(vgpu, offset);
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, pipe))++;
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
} else {
vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
}
@@ -2192,6 +2196,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
@@ -2280,21 +2285,21 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_A), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_A), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_B), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_B), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_C), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_C), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_EDP), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_EDP), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(DSPSURF(dev_priv, PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_DH(DSPSURF(display, PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
- MMIO_DH(DSPSURF(dev_priv, PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_DH(DSPSURF(display, PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
- MMIO_DH(DSPSURF(dev_priv, PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_DH(DSPSURF(display, PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 35319228bc51..0dbc4e289300 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -527,24 +527,6 @@ int i915_active_acquire(struct i915_active *ref)
return err;
}
-int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
-{
- struct i915_active_fence *active;
- int err;
-
- err = i915_active_acquire(ref);
- if (err)
- return err;
-
- active = active_instance(ref, idx);
- if (!active) {
- i915_active_release(ref);
- return -ENOMEM;
- }
-
- return 0; /* return with active ref */
-}
-
void i915_active_release(struct i915_active *ref)
{
debug_active_assert(ref);
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 77c676ecc263..821f7c21ea9b 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -186,7 +186,6 @@ int i915_request_await_active(struct i915_request *rq,
#define I915_ACTIVE_AWAIT_BARRIER BIT(2)
int i915_active_acquire(struct i915_active *ref);
-int i915_active_acquire_for_context(struct i915_active *ref, u64 idx);
bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 365329ff8a07..c2ae37d6b94d 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -45,6 +45,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include "display/i9xx_display_sr.h"
#include "display/intel_acpi.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
@@ -60,6 +61,7 @@
#include "display/intel_pch_refclk.h"
#include "display/intel_pps.h"
#include "display/intel_sprite_uapi.h"
+#include "display/intel_vga.h"
#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
@@ -93,17 +95,20 @@
#include "i915_memcpy.h"
#include "i915_perf.h"
#include "i915_query.h"
-#include "i915_suspend.h"
+#include "i915_reg.h"
#include "i915_switcheroo.h"
#include "i915_sysfs.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_clock_gating.h"
+#include "intel_cpu_info.h"
#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_region_ttm.h"
+#include "intel_sbi.h"
+#include "vlv_sideband.h"
#include "vlv_suspend.h"
static const struct drm_driver i915_drm_driver;
@@ -217,6 +222,7 @@ static void sanitize_gpu(struct drm_i915_private *i915)
*/
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
int ret = 0;
if (i915_inject_probe_failure(dev_priv))
@@ -231,8 +237,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
+ intel_sbi_init(dev_priv);
+ vlv_iosf_sb_init(dev_priv);
mutex_init(&dev_priv->sb_lock);
- cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -259,7 +266,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_detect_pch(dev_priv);
intel_irq_init(dev_priv);
- intel_display_driver_early_probe(dev_priv);
+ intel_display_driver_early_probe(display);
intel_clock_gating_hooks_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -282,16 +289,19 @@ err_workqueues:
*/
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
intel_irq_fini(dev_priv);
- intel_power_domains_cleanup(dev_priv);
+ intel_power_domains_cleanup(display);
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release_all(dev_priv);
intel_region_ttm_device_fini(dev_priv);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
- cpu_latency_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
+ vlv_iosf_sb_fini(dev_priv);
+ intel_sbi_fini(dev_priv);
i915_params_free(&dev_priv->params);
}
@@ -307,6 +317,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
*/
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -332,7 +343,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_gmch_bar_setup(dev_priv);
intel_device_info_runtime_init(dev_priv);
- intel_display_device_info_runtime_init(dev_priv);
+ intel_display_device_info_runtime_init(display);
for_each_gt(gt, dev_priv, i) {
ret = intel_gt_init_mmio(gt);
@@ -415,6 +426,18 @@ mask_err:
return ret;
}
+/* Wa_14022698537:dg2 */
+static void i915_enable_g8(struct drm_i915_private *i915)
+{
+ if (IS_DG2(i915)) {
+ if (IS_DG2_D(i915) && !intel_match_g8_cpu())
+ return;
+
+ snb_pcode_write_p(&i915->uncore, PCODE_POWER_SETUP,
+ POWER_SETUP_SUBCOMMAND_G8_ENABLE, 0, 0);
+ }
+}
+
static int i915_pcode_init(struct drm_i915_private *i915)
{
struct intel_gt *gt;
@@ -428,6 +451,7 @@ static int i915_pcode_init(struct drm_i915_private *i915)
}
}
+ i915_enable_g8(i915);
return 0;
}
@@ -599,6 +623,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
*/
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
@@ -627,9 +652,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
i915_hwmon_register(dev_priv);
- intel_display_driver_register(dev_priv);
+ intel_display_driver_register(display);
- intel_power_domains_enable(dev_priv);
+ intel_power_domains_enable(display);
intel_runtime_pm_enable(&dev_priv->runtime_pm);
intel_register_dsm_handler();
@@ -644,6 +669,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
@@ -652,9 +678,9 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_unregister_dsm_handler();
intel_runtime_pm_disable(&dev_priv->runtime_pm);
- intel_power_domains_disable(dev_priv);
+ intel_power_domains_disable(display);
- intel_display_driver_unregister(dev_priv);
+ intel_display_driver_unregister(display);
intel_pxp_fini(dev_priv);
@@ -731,7 +757,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, match_info);
- intel_display_device_probe(i915);
+ intel_display_device_probe(pdev);
return i915;
}
@@ -750,6 +776,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct drm_i915_private *i915;
+ struct intel_display *display;
int ret;
ret = pci_enable_device(pdev);
@@ -764,6 +791,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
}
+ display = &i915->display;
+
ret = i915_driver_early_probe(i915);
if (ret < 0)
goto out_pci_disable;
@@ -784,7 +813,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- ret = intel_display_driver_probe_noirq(i915);
+ ret = intel_display_driver_probe_noirq(display);
if (ret < 0)
goto out_cleanup_hw;
@@ -792,7 +821,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset;
- ret = intel_display_driver_probe_nogem(i915);
+ ret = intel_display_driver_probe_nogem(display);
if (ret)
goto out_cleanup_irq;
@@ -804,7 +833,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret && ret != -ENODEV)
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
- ret = intel_display_driver_probe(i915);
+ ret = intel_display_driver_probe(display);
if (ret)
goto out_cleanup_gem;
@@ -824,14 +853,14 @@ out_cleanup_gem:
i915_gem_driver_release(i915);
out_cleanup_modeset2:
/* FIXME clean up the error path */
- intel_display_driver_remove(i915);
+ intel_display_driver_remove(display);
intel_irq_uninstall(i915);
- intel_display_driver_remove_noirq(i915);
+ intel_display_driver_remove_noirq(display);
goto out_cleanup_modeset;
out_cleanup_irq:
intel_irq_uninstall(i915);
out_cleanup_modeset:
- intel_display_driver_remove_nogem(i915);
+ intel_display_driver_remove_nogem(display);
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -851,6 +880,7 @@ out_pci_disable:
void i915_driver_remove(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
@@ -864,16 +894,16 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_gvt_driver_remove(i915);
- intel_display_driver_remove(i915);
+ intel_display_driver_remove(display);
intel_irq_uninstall(i915);
- intel_display_driver_remove_noirq(i915);
+ intel_display_driver_remove_noirq(display);
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
- intel_display_driver_remove_nogem(i915);
+ intel_display_driver_remove_nogem(display);
i915_driver_hw_remove(i915);
@@ -883,6 +913,7 @@ void i915_driver_remove(struct drm_i915_private *i915)
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t wakeref;
@@ -906,7 +937,7 @@ static void i915_driver_release(struct drm_device *dev)
i915_driver_late_release(dev_priv);
- intel_display_device_remove(dev_priv);
+ intel_display_device_remove(display);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -936,25 +967,27 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
disable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_disable(&i915->runtime_pm);
- intel_power_domains_disable(i915);
+ intel_power_domains_disable(display);
intel_fbdev_set_suspend(&i915->drm, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
- intel_display_driver_disable_user_access(i915);
+ intel_display_driver_disable_user_access(display);
drm_atomic_helper_shutdown(&i915->drm);
}
- intel_dp_mst_suspend(i915);
+ intel_dp_mst_suspend(display);
intel_irq_suspend(i915);
intel_hpd_cancel_work(i915);
if (HAS_DISPLAY(i915))
- intel_display_driver_suspend_access(i915);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&i915->display);
intel_encoder_shutdown_all(&i915->display);
@@ -974,7 +1007,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
* - unify the driver remove and system/runtime suspend sequences with
* the above unified shutdown/poweroff sequence.
*/
- intel_power_domains_driver_remove(i915);
+ intel_power_domains_driver_remove(display);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_driver_last_release(&i915->runtime_pm);
@@ -1022,24 +1055,22 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
- intel_power_domains_disable(dev_priv);
+ intel_power_domains_disable(display);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(dev_priv)) {
drm_kms_helper_poll_disable(dev);
- intel_display_driver_disable_user_access(dev_priv);
+ intel_display_driver_disable_user_access(display);
}
pci_save_state(pdev);
- intel_display_driver_suspend(dev_priv);
-
- intel_dp_mst_suspend(dev_priv);
+ intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
intel_hpd_cancel_work(dev_priv);
if (HAS_DISPLAY(dev_priv))
- intel_display_driver_suspend_access(dev_priv);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&dev_priv->display);
@@ -1047,7 +1078,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_dpt_suspend(dev_priv);
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
- i915_save_display(dev_priv);
+ i9xx_display_sr_save(display);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_suspend(display, opregion_target_state);
@@ -1066,6 +1097,7 @@ static int i915_drm_suspend(struct drm_device *dev)
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_gt *gt;
@@ -1081,14 +1113,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
for_each_gt(gt, dev_priv, i)
intel_uncore_suspend(gt->uncore);
- intel_power_domains_suspend(dev_priv, s2idle);
-
- intel_display_power_suspend_late(dev_priv);
+ intel_display_power_suspend_late(display, s2idle);
ret = vlv_suspend_complete(dev_priv);
if (ret) {
drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
- intel_power_domains_resume(dev_priv);
+ intel_display_power_resume_early(display);
goto out;
}
@@ -1166,7 +1196,12 @@ static int i915_drm_resume(struct drm_device *dev)
intel_dmc_resume(display);
- i915_restore_display(dev_priv);
+ i9xx_display_sr_restore(display);
+
+ intel_vga_redisable(display);
+
+ intel_gmbus_reset(display);
+
intel_pps_unlock_regs_wa(display);
intel_init_pch_refclk(dev_priv);
@@ -1188,21 +1223,19 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_display_driver_init_hw(dev_priv);
+ intel_display_driver_init_hw(display);
intel_clock_gating_init(dev_priv);
if (HAS_DISPLAY(dev_priv))
- intel_display_driver_resume_access(dev_priv);
+ intel_display_driver_resume_access(display);
intel_hpd_init(dev_priv);
- /* MST sideband requires HPD interrupts enabled */
- intel_dp_mst_resume(dev_priv);
- intel_display_driver_resume(dev_priv);
+ intel_display_driver_resume(display);
if (HAS_DISPLAY(dev_priv)) {
- intel_display_driver_enable_user_access(dev_priv);
+ intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
intel_hpd_poll_disable(dev_priv);
@@ -1211,7 +1244,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- intel_power_domains_enable(dev_priv);
+ intel_power_domains_enable(display);
intel_gvt_resume(dev_priv);
@@ -1223,6 +1256,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gt *gt;
int ret, i;
@@ -1282,9 +1316,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
for_each_gt(gt, dev_priv, i)
intel_gt_resume_early(gt);
- intel_display_power_resume_early(dev_priv);
-
- intel_power_domains_resume(dev_priv);
+ intel_display_power_resume_early(display);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1486,7 +1518,7 @@ static int intel_runtime_suspend(struct device *kdev)
for_each_gt(gt, dev_priv, i)
intel_uncore_suspend(gt->uncore);
- intel_display_power_suspend(dev_priv);
+ intel_display_power_suspend(display);
ret = vlv_suspend_complete(dev_priv);
if (ret) {
@@ -1580,7 +1612,7 @@ static int intel_runtime_resume(struct device *kdev)
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");
- intel_display_power_resume(dev_priv);
+ intel_display_power_resume(display);
ret = vlv_resume_prepare(dev_priv, true);
@@ -1785,7 +1817,6 @@ static const struct drm_driver i915_drm_driver = {
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 94a70d8ec5d5..4b67ad9a61cd 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -15,7 +15,6 @@ struct drm_printer;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20230929"
#define DRIVER_TIMESTAMP 1695980603
extern const struct dev_pm_ops i915_pm_ops;
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index f58682505491..168d7375304b 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -102,6 +102,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
for_each_memory_region(mr, i915, id)
drm_print_memory_stats(p,
&stats[id],
+ DRM_GEM_OBJECT_ACTIVE |
DRM_GEM_OBJECT_RESIDENT |
DRM_GEM_OBJECT_PURGEABLE,
mr->uabi_name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7b1a061d92fb..b96b8de12756 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -101,14 +101,6 @@ struct i915_dsm {
resource_size_t usable_size;
};
-struct i915_suspend_saved_registers {
- u32 saveDSPARB;
- u32 saveSWF0[16];
- u32 saveSWF1[16];
- u32 saveSWF3[3];
- u16 saveGCDGMBUS;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -236,9 +228,17 @@ struct drm_i915_private {
spinlock_t irq_lock;
bool irqs_enabled;
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex sbi_lock;
+
+ /* VLV/CHV IOSF sideband */
+ struct {
+ struct mutex lock; /* protect sideband access */
+ struct pm_qos_request qos;
+ } vlv_iosf_sb;
+
/* Sideband mailbox protection */
struct mutex sb_lock;
- struct pm_qos_request sb_qos;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask;
@@ -291,7 +291,6 @@ struct drm_i915_private {
struct i915_gpu_error gpu_error;
u32 suspend_count;
- struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
struct dram_info {
@@ -550,6 +549,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G11)
#define IS_DG2_G12(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G12)
+#define IS_DG2_D(i915) \
+ IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_D)
#define IS_RAPTORLAKE_S(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
#define IS_ALDERLAKE_P_N(i915) \
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a9662cc6ed1e..25295eb626dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -71,7 +71,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space
* @ww: An optional struct i915_gem_ww_ctx.
- * @node: the &struct drm_mm_node (typically i915_vma.mode)
+ * @node: the &struct drm_mm_node (typically i915_vma.node)
* @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
* @offset: where to insert inside the GTT,
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index a62405787e77..be8149e46281 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -2,9 +2,9 @@
* SPDX-License-Identifier: MIT
*/
+#include "display/intel_overlay.h"
#include "gem/i915_gem_mman.h"
#include "gt/intel_engine_user.h"
-
#include "pxp/intel_pxp.h"
#include "i915_cmd_parser.h"
@@ -16,6 +16,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = &i915->display;
struct pci_dev *pdev = to_pci_dev(dev->dev);
const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
drm_i915_getparam_t *param = data;
@@ -38,7 +39,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = to_gt(i915)->ggtt->num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
- value = !!i915->display.overlay;
+ value = intel_overlay_available(display);
break;
case I915_PARAM_HAS_BSD:
value = !!intel_engine_lookup_user(i915,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 71c0daef1996..819ab933bb10 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -841,7 +841,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "Kernel: %s %s\n",
init_utsname()->release,
init_utsname()->machine);
- err_printf(m, "Driver: %s\n", DRIVER_DATE);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f75cbf5b8a1c..7920ad9585ae 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -658,8 +658,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -723,8 +722,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -740,8 +738,7 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_postinstall(dev_priv);
+ vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
@@ -794,8 +791,7 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_postinstall(dev_priv);
+ vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index f5c97a620962..76e2801619f0 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -143,8 +143,8 @@ int remap_io_sg(struct vm_area_struct *vma,
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
- while (offset >= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT) {
- offset -= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT;
+ while (offset >= r.sgt.max >> PAGE_SHIFT) {
+ offset -= r.sgt.max >> PAGE_SHIFT;
r.sgt = __sgt_iter(__sg_next(r.sgt.sgp), use_dma(iobase));
if (!r.sgt.sgp)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 93fbf53578da..e55db036be1b 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -302,7 +302,7 @@ void i915_pmu_gt_parked(struct intel_gt *gt)
{
struct i915_pmu *pmu = &gt->i915->pmu;
- if (!pmu->base.event_init)
+ if (!pmu->registered)
return;
spin_lock_irq(&pmu->lock);
@@ -324,7 +324,7 @@ void i915_pmu_gt_unparked(struct intel_gt *gt)
{
struct i915_pmu *pmu = &gt->i915->pmu;
- if (!pmu->base.event_init)
+ if (!pmu->registered)
return;
spin_lock_irq(&pmu->lock);
@@ -626,7 +626,7 @@ static int i915_pmu_event_init(struct perf_event *event)
struct drm_i915_private *i915 = pmu_to_i915(pmu);
int ret;
- if (pmu->closed)
+ if (!pmu->registered)
return -ENODEV;
if (event->attr.type != event->pmu->type)
@@ -724,7 +724,7 @@ static void i915_pmu_event_read(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
u64 prev, new;
- if (pmu->closed) {
+ if (!pmu->registered) {
event->hw.state = PERF_HES_STOPPED;
return;
}
@@ -850,7 +850,7 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
{
struct i915_pmu *pmu = event_to_pmu(event);
- if (pmu->closed)
+ if (!pmu->registered)
return;
i915_pmu_enable(event);
@@ -861,7 +861,7 @@ static void i915_pmu_event_stop(struct perf_event *event, int flags)
{
struct i915_pmu *pmu = event_to_pmu(event);
- if (pmu->closed)
+ if (!pmu->registered)
goto out;
if (flags & PERF_EF_UPDATE)
@@ -877,7 +877,7 @@ static int i915_pmu_event_add(struct perf_event *event, int flags)
{
struct i915_pmu *pmu = event_to_pmu(event);
- if (pmu->closed)
+ if (!pmu->registered)
return -ENODEV;
if (flags & PERF_EF_START)
@@ -1177,8 +1177,6 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
- GEM_BUG_ON(!pmu->base.event_init);
-
/* Select the first online CPU as a designated reader. */
if (cpumask_empty(&i915_pmu_cpumask))
cpumask_set_cpu(cpu, &i915_pmu_cpumask);
@@ -1191,13 +1189,11 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
unsigned int target = i915_pmu_target_cpu;
- GEM_BUG_ON(!pmu->base.event_init);
-
/*
* Unregistering an instance generates a CPU offline event which we must
* ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
*/
- if (pmu->closed)
+ if (!pmu->registered)
return 0;
if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
@@ -1218,7 +1214,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
}
-static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
+static enum cpuhp_state cpuhp_state = CPUHP_INVALID;
int i915_pmu_init(void)
{
@@ -1232,28 +1228,28 @@ int i915_pmu_init(void)
pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
ret);
else
- cpuhp_slot = ret;
+ cpuhp_state = ret;
return 0;
}
void i915_pmu_exit(void)
{
- if (cpuhp_slot != CPUHP_INVALID)
- cpuhp_remove_multi_state(cpuhp_slot);
+ if (cpuhp_state != CPUHP_INVALID)
+ cpuhp_remove_multi_state(cpuhp_state);
}
static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
- if (cpuhp_slot == CPUHP_INVALID)
+ if (cpuhp_state == CPUHP_INVALID)
return -EINVAL;
- return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node);
+ return cpuhp_state_add_instance(cpuhp_state, &pmu->cpuhp.node);
}
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
- cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node);
+ cpuhp_state_remove_instance(cpuhp_state, &pmu->cpuhp.node);
}
void i915_pmu_register(struct drm_i915_private *i915)
@@ -1265,7 +1261,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
&i915_pmu_cpumask_attr_group,
NULL
};
-
int ret = -ENOMEM;
spin_lock_init(&pmu->lock);
@@ -1316,6 +1311,8 @@ void i915_pmu_register(struct drm_i915_private *i915)
if (ret)
goto err_unreg;
+ pmu->registered = true;
+
return;
err_unreg:
@@ -1323,7 +1320,6 @@ err_unreg:
err_groups:
kfree(pmu->base.attr_groups);
err_attr:
- pmu->base.event_init = NULL;
free_event_attributes(pmu);
err_name:
if (IS_DGFX(i915))
@@ -1336,23 +1332,17 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
- if (!pmu->base.event_init)
+ if (!pmu->registered)
return;
- /*
- * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
- * ensures all currently executing ones will have exited before we
- * proceed with unregistration.
- */
- pmu->closed = true;
- synchronize_rcu();
+ /* Disconnect the PMU callbacks */
+ pmu->registered = false;
hrtimer_cancel(&pmu->timer);
i915_pmu_unregister_cpuhp_state(pmu);
perf_pmu_unregister(&pmu->base);
- pmu->base.event_init = NULL;
kfree(pmu->base.attr_groups);
if (IS_DGFX(i915))
kfree(pmu->name);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 41af038c3738..8e66d63d0c9f 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -68,9 +68,9 @@ struct i915_pmu {
*/
struct pmu base;
/**
- * @closed: i915 is unregistering.
+ * @registered: PMU is registered and not in the unregistering process.
*/
- bool closed;
+ bool registered;
/**
* @name: Name as registered with perf core.
*/
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 22be4a731d27..765e6c0528fb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -144,8 +144,6 @@
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
-#define _VGA_MSR_WRITE _MMIO(0x3c2)
-
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
@@ -1069,11 +1067,6 @@
#define CLKGATE_DIS_PSL_EXT(pipe) \
_MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
-/* DDI Buffer Control */
-#define _DDI_CLK_VALFREQ_A 0x64030
-#define _DDI_CLK_VALFREQ_B 0x64130
-#define DDI_CLK_VALFREQ(port) _MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
-
/*
* Display engine regs
*/
@@ -1147,53 +1140,6 @@
#define _TRANS_MULT_B 0x6102c
#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
-/* VGA port control */
-#define ADPA _MMIO(0x61100)
-#define PCH_ADPA _MMIO(0xe1100)
-#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
-#define ADPA_DAC_ENABLE (1 << 31)
-#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SEL_SHIFT 30
-#define ADPA_PIPE_SEL_MASK (1 << 30)
-#define ADPA_PIPE_SEL(pipe) ((pipe) << 30)
-#define ADPA_PIPE_SEL_SHIFT_CPT 29
-#define ADPA_PIPE_SEL_MASK_CPT (3 << 29)
-#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0 << 24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3 << 24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2 << 24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1 << 23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0 << 22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1 << 22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0 << 21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1 << 21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0 << 20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1 << 20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0 << 18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1 << 18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2 << 18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3 << 18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0 << 17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1 << 17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16)
-#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
-#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1 << 10)
-#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1 << 11)
-#define ADPA_HSYNC_CNTL_ENABLE 0
-#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
-#define ADPA_VSYNC_ACTIVE_LOW 0
-#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
-#define ADPA_HSYNC_ACTIVE_LOW 0
-#define ADPA_DPMS_MASK (~(3 << 10))
-#define ADPA_DPMS_ON (0 << 10)
-#define ADPA_DPMS_SUSPEND (1 << 10)
-#define ADPA_DPMS_STANDBY (2 << 10)
-#define ADPA_DPMS_OFF (3 << 10)
-
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
#define PORTB_HOTPLUG_INT_EN (1 << 29)
@@ -1786,180 +1732,6 @@
#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
-#define DSPARB(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
-#define DSPARB_CSTART_MASK (0x7f << 7)
-#define DSPARB_CSTART_SHIFT 7
-#define DSPARB_BSTART_MASK (0x7f)
-#define DSPARB_BSTART_SHIFT 0
-#define DSPARB_BEND_SHIFT 9 /* on 855 */
-#define DSPARB_AEND_SHIFT 0
-#define DSPARB_SPRITEA_SHIFT_VLV 0
-#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
-#define DSPARB_SPRITEB_SHIFT_VLV 8
-#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
-#define DSPARB_SPRITEC_SHIFT_VLV 16
-#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
-#define DSPARB_SPRITED_SHIFT_VLV 24
-#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
-#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
-#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
-#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
-#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
-#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
-#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
-#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
-#define DSPARB_SPRITED_HI_SHIFT_VLV 12
-#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
-#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
-#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
-#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
-#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
-#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
-#define DSPARB_SPRITEE_SHIFT_VLV 0
-#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
-#define DSPARB_SPRITEF_SHIFT_VLV 8
-#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
-
-/* pnv/gen4/g4x/vlv/chv */
-#define DSPFW1(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
-#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff << 23)
-#define DSPFW_CURSORB_SHIFT 16
-#define DSPFW_CURSORB_MASK (0x3f << 16)
-#define DSPFW_PLANEB_SHIFT 8
-#define DSPFW_PLANEB_MASK (0x7f << 8)
-#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
-#define DSPFW_PLANEA_SHIFT 0
-#define DSPFW_PLANEA_MASK (0x7f << 0)
-#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW2(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
-#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
-#define DSPFW_FBC_SR_SHIFT 28
-#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
-#define DSPFW_FBC_HPLL_SR_SHIFT 24
-#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
-#define DSPFW_SPRITEB_SHIFT (16)
-#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
-#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
-#define DSPFW_CURSORA_SHIFT 8
-#define DSPFW_CURSORA_MASK (0x3f << 8)
-#define DSPFW_PLANEC_OLD_SHIFT 0
-#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
-#define DSPFW_SPRITEA_SHIFT 0
-#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
-#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW3(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
-#define DSPFW_HPLL_SR_EN (1 << 31)
-#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
-#define DSPFW_CURSOR_SR_SHIFT 24
-#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
-#define DSPFW_HPLL_CURSOR_SHIFT 16
-#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
-#define DSPFW_HPLL_SR_SHIFT 0
-#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
-
-/* vlv/chv */
-#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
-#define DSPFW_SPRITEB_WM1_SHIFT 16
-#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
-#define DSPFW_CURSORA_WM1_SHIFT 8
-#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
-#define DSPFW_SPRITEA_WM1_SHIFT 0
-#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
-#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
-#define DSPFW_PLANEB_WM1_SHIFT 24
-#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
-#define DSPFW_PLANEA_WM1_SHIFT 16
-#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
-#define DSPFW_CURSORB_WM1_SHIFT 8
-#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
-#define DSPFW_CURSOR_SR_WM1_SHIFT 0
-#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
-#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
-#define DSPFW_SR_WM1_SHIFT 0
-#define DSPFW_SR_WM1_MASK (0x1ff << 0)
-#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
-#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
-#define DSPFW_SPRITED_WM1_SHIFT 24
-#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
-#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
-#define DSPFW_SPRITEC_WM1_SHIFT 8
-#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
-#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
-#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
-#define DSPFW_SPRITEF_WM1_SHIFT 24
-#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
-#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
-#define DSPFW_SPRITEE_WM1_SHIFT 8
-#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
-#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
-#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
-#define DSPFW_PLANEC_WM1_SHIFT 24
-#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
-#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
-#define DSPFW_CURSORC_WM1_SHIFT 8
-#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
-#define DSPFW_CURSORC_SHIFT 0
-#define DSPFW_CURSORC_MASK (0x3f << 0)
-
-/* vlv/chv high order bits */
-#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
-#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
-#define DSPFW_SPRITEF_HI_SHIFT 23
-#define DSPFW_SPRITEF_HI_MASK (1 << 23)
-#define DSPFW_SPRITEE_HI_SHIFT 22
-#define DSPFW_SPRITEE_HI_MASK (1 << 22)
-#define DSPFW_PLANEC_HI_SHIFT 21
-#define DSPFW_PLANEC_HI_MASK (1 << 21)
-#define DSPFW_SPRITED_HI_SHIFT 20
-#define DSPFW_SPRITED_HI_MASK (1 << 20)
-#define DSPFW_SPRITEC_HI_SHIFT 16
-#define DSPFW_SPRITEC_HI_MASK (1 << 16)
-#define DSPFW_PLANEB_HI_SHIFT 12
-#define DSPFW_PLANEB_HI_MASK (1 << 12)
-#define DSPFW_SPRITEB_HI_SHIFT 8
-#define DSPFW_SPRITEB_HI_MASK (1 << 8)
-#define DSPFW_SPRITEA_HI_SHIFT 4
-#define DSPFW_SPRITEA_HI_MASK (1 << 4)
-#define DSPFW_PLANEA_HI_SHIFT 0
-#define DSPFW_PLANEA_HI_MASK (1 << 0)
-#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
-#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
-#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
-#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
-#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
-#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
-#define DSPFW_PLANEC_WM1_HI_SHIFT 21
-#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
-#define DSPFW_SPRITED_WM1_HI_SHIFT 20
-#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
-#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
-#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
-#define DSPFW_PLANEB_WM1_HI_SHIFT 12
-#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
-#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
-#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
-#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
-#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
-#define DSPFW_PLANEA_WM1_HI_SHIFT 0
-#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
-
-/* drain latency register values*/
-#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
-#define DDL_PLANE_SHIFT 0
-#define DDL_PRECISION_HIGH (1 << 7)
-#define DDL_PRECISION_LOW (0 << 7)
-#define DRAIN_LATENCY_MASK 0x7f
-
#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
#define CBR_PND_DEADLINE_DISABLE (1 << 31)
#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
@@ -1967,72 +1739,6 @@
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
-/* FIFO watermark sizes etc */
-#define G4X_FIFO_LINE_SIZE 64
-#define I915_FIFO_LINE_SIZE 64
-#define I830_FIFO_LINE_SIZE 32
-
-#define VALLEYVIEW_FIFO_SIZE 255
-#define G4X_FIFO_SIZE 127
-#define I965_FIFO_SIZE 512
-#define I945_FIFO_SIZE 127
-#define I915_FIFO_SIZE 95
-#define I855GM_FIFO_SIZE 127 /* In cachelines */
-#define I830_FIFO_SIZE 95
-
-#define VALLEYVIEW_MAX_WM 0xff
-#define G4X_MAX_WM 0x3f
-#define I915_MAX_WM 0x3f
-
-#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
-#define PINEVIEW_FIFO_LINE_SIZE 64
-#define PINEVIEW_MAX_WM 0x1ff
-#define PINEVIEW_DFT_WM 0x3f
-#define PINEVIEW_DFT_HPLLOFF_WM 0
-#define PINEVIEW_GUARD_WM 10
-#define PINEVIEW_CURSOR_FIFO 64
-#define PINEVIEW_CURSOR_MAX_WM 0x3f
-#define PINEVIEW_CURSOR_DFT_WM 0
-#define PINEVIEW_CURSOR_GUARD_WM 5
-
-#define VALLEYVIEW_CURSOR_MAX_WM 64
-#define I965_CURSOR_FIFO 64
-#define I965_CURSOR_MAX_WM 32
-#define I965_CURSOR_DFT_WM 8
-
-/* define the Watermark register on Ironlake */
-#define _WM0_PIPEA_ILK 0x45100
-#define _WM0_PIPEB_ILK 0x45104
-#define _WM0_PIPEC_IVB 0x45200
-#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
- _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
-#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
-#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
-#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
-#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x))
-#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x))
-#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x))
-#define WM1_LP_ILK _MMIO(0x45108)
-#define WM2_LP_ILK _MMIO(0x4510c)
-#define WM3_LP_ILK _MMIO(0x45110)
-#define WM_LP_ENABLE REG_BIT(31)
-#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24)
-#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19)
-#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20)
-#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8)
-#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0)
-#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x))
-#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x))
-#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x))
-#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x))
-#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x))
-#define WM1S_LP_ILK _MMIO(0x45120)
-#define WM2S_LP_IVB _MMIO(0x45124)
-#define WM3S_LP_IVB _MMIO(0x45128)
-#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */
-#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0)
-#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x))
-
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2802,7 +2508,7 @@
#define _CHICKEN_TRANS_C 0x420c8
#define _CHICKEN_TRANS_EDP 0x420cc
#define _CHICKEN_TRANS_D 0x420d8
-#define CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
[TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
[TRANSCODER_A] = _CHICKEN_TRANS_A, \
[TRANSCODER_B] = _CHICKEN_TRANS_B, \
@@ -2810,9 +2516,10 @@
[TRANSCODER_D] = _CHICKEN_TRANS_D))
#define _MTL_CHICKEN_TRANS_A 0x604e0
#define _MTL_CHICKEN_TRANS_B 0x614e0
-#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
+#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
@@ -2863,11 +2570,16 @@
#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
+#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
-#define LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
-#define LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
-#define LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define _LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
+#define _LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
+#define _LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define LATENCY_REPORTING_REMOVED(pipe) _PICK((pipe), \
+ _LATENCY_REPORTING_REMOVED_PIPE_A, \
+ _LATENCY_REPORTING_REMOVED_PIPE_B, \
+ _LATENCY_REPORTING_REMOVED_PIPE_C, \
+ _LATENCY_REPORTING_REMOVED_PIPE_D)
#define ICL_DELAY_PMRSP REG_BIT(22)
#define DISABLE_FLR_SRC REG_BIT(15)
#define MASK_WAKEMEM REG_BIT(13)
@@ -3619,6 +3331,7 @@
#define POWER_SETUP_I1_WATTS REG_BIT(31)
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define POWER_SETUP_SUBCOMMAND_G8_ENABLE 0x6
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
@@ -3819,6 +3532,7 @@ enum skl_power_gate {
#define TRANS_DDI_PVSYNC (1 << 17)
#define TRANS_DDI_PHSYNC (1 << 16)
#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
+#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
@@ -3863,25 +3577,26 @@ enum skl_power_gate {
#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
-#define DP_TP_CTL_ENABLE (1 << 31)
-#define DP_TP_CTL_FEC_ENABLE (1 << 30)
-#define DP_TP_CTL_MODE_SST (0 << 27)
-#define DP_TP_CTL_MODE_MST (1 << 27)
-#define DP_TP_CTL_FORCE_ACT (1 << 25)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK (3 << 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A (0 << 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B (1 << 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C (2 << 19)
-#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18)
-#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15)
-#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT1 (0 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT2 (1 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 (4 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT4 (5 << 8)
-#define DP_TP_CTL_LINK_TRAIN_IDLE (2 << 8)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL (3 << 8)
-#define DP_TP_CTL_SCRAMBLE_DISABLE (1 << 7)
+#define DP_TP_CTL_ENABLE REG_BIT(31)
+#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
+#define DP_TP_CTL_MODE_MASK REG_BIT(27)
+#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
+#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
+#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
+#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
+#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
+#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
+#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
@@ -3889,14 +3604,15 @@ enum skl_power_gate {
#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
-#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
-#define DP_TP_STATUS_IDLE_DONE (1 << 25)
-#define DP_TP_STATUS_ACT_SENT (1 << 24)
-#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1 << 12)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
+#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
+#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
+#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
+#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
+#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
/* DDI Buffer Control */
#define _DDI_BUF_CTL_A 0x64000
@@ -4444,14 +4160,6 @@ enum skl_power_gate {
#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
-#define WM_MISC _MMIO(0x45260)
-#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
-
-#define WM_DBG _MMIO(0x45280)
-#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
-#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
-#define WM_DBG_DISALLOW_SPRITE (1 << 2)
-
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
deleted file mode 100644
index f18f1acf2158..000000000000
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- *
- * Copyright 2008 (c) Intel Corporation
- * Jesse Barnes <jbarnes@virtuousgeek.org>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "display/intel_de.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_vga.h"
-
-#include "i915_drv.h"
-#include "i915_reg.h"
-#include "i915_suspend.h"
-#include "intel_pci_config.h"
-
-static void intel_save_swf(struct drm_i915_private *dev_priv)
-{
- int i;
-
- /* Scratch space */
- if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
- for (i = 0; i < 7; i++) {
- dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv,
- SWF0(dev_priv, i));
- dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv,
- SWF1(dev_priv, i));
- }
- for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv,
- SWF3(dev_priv, i));
- } else if (GRAPHICS_VER(dev_priv) == 2) {
- for (i = 0; i < 7; i++)
- dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv,
- SWF1(dev_priv, i));
- } else if (HAS_GMCH(dev_priv)) {
- for (i = 0; i < 16; i++) {
- dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv,
- SWF0(dev_priv, i));
- dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv,
- SWF1(dev_priv, i));
- }
- for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv,
- SWF3(dev_priv, i));
- }
-}
-
-static void intel_restore_swf(struct drm_i915_private *dev_priv)
-{
- int i;
-
- /* Scratch space */
- if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
- for (i = 0; i < 7; i++) {
- intel_de_write(dev_priv, SWF0(dev_priv, i),
- dev_priv->regfile.saveSWF0[i]);
- intel_de_write(dev_priv, SWF1(dev_priv, i),
- dev_priv->regfile.saveSWF1[i]);
- }
- for (i = 0; i < 3; i++)
- intel_de_write(dev_priv, SWF3(dev_priv, i),
- dev_priv->regfile.saveSWF3[i]);
- } else if (GRAPHICS_VER(dev_priv) == 2) {
- for (i = 0; i < 7; i++)
- intel_de_write(dev_priv, SWF1(dev_priv, i),
- dev_priv->regfile.saveSWF1[i]);
- } else if (HAS_GMCH(dev_priv)) {
- for (i = 0; i < 16; i++) {
- intel_de_write(dev_priv, SWF0(dev_priv, i),
- dev_priv->regfile.saveSWF0[i]);
- intel_de_write(dev_priv, SWF1(dev_priv, i),
- dev_priv->regfile.saveSWF1[i]);
- }
- for (i = 0; i < 3; i++)
- intel_de_write(dev_priv, SWF3(dev_priv, i),
- dev_priv->regfile.saveSWF3[i]);
- }
-}
-
-void i915_save_display(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- /* Display arbitration control */
- if (GRAPHICS_VER(dev_priv) <= 4)
- dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv,
- DSPARB(dev_priv));
-
- if (GRAPHICS_VER(dev_priv) == 4)
- pci_read_config_word(pdev, GCDGMBUS,
- &dev_priv->regfile.saveGCDGMBUS);
-
- intel_save_swf(dev_priv);
-}
-
-void i915_restore_display(struct drm_i915_private *dev_priv)
-{
- struct intel_display *display = &dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- intel_restore_swf(dev_priv);
-
- if (GRAPHICS_VER(dev_priv) == 4)
- pci_write_config_word(pdev, GCDGMBUS,
- dev_priv->regfile.saveGCDGMBUS);
-
- /* Display arbitration */
- if (GRAPHICS_VER(dev_priv) <= 4)
- intel_de_write(dev_priv, DSPARB(dev_priv),
- dev_priv->regfile.saveDSPARB);
-
- intel_vga_redisable(display);
-
- intel_gmbus_reset(display);
-}
diff --git a/drivers/gpu/drm/i915/i915_suspend.h b/drivers/gpu/drm/i915/i915_suspend.h
deleted file mode 100644
index e5a611ee3d15..000000000000
--- a/drivers/gpu/drm/i915/i915_suspend.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __I915_SUSPEND_H__
-#define __I915_SUSPEND_H__
-
-struct drm_i915_private;
-
-void i915_save_display(struct drm_i915_private *i915);
-void i915_restore_display(struct drm_i915_private *i915);
-
-#endif /* __I915_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 09d89bdf82f4..7ed41ce9b708 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -642,34 +642,6 @@ DEFINE_EVENT(i915_request, i915_request_wait_end,
TP_ARGS(rq)
);
-TRACE_EVENT_CONDITION(i915_reg_rw,
- TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
-
- TP_ARGS(write, reg, val, len, trace),
-
- TP_CONDITION(trace),
-
- TP_STRUCT__entry(
- __field(u64, val)
- __field(u32, reg)
- __field(u16, write)
- __field(u16, len)
- ),
-
- TP_fast_assign(
- __entry->val = (u64)val;
- __entry->reg = i915_mmio_reg_offset(reg);
- __entry->write = write;
- __entry->len = len;
- ),
-
- TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
- __entry->write ? "write" : "read",
- __entry->reg, __entry->len,
- (u32)(__entry->val & 0xffffffff),
- (u32)(__entry->val >> 32))
-);
-
/**
* DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
*
diff --git a/drivers/gpu/drm/i915/intel_cpu_info.c b/drivers/gpu/drm/i915/intel_cpu_info.c
new file mode 100644
index 000000000000..e52d0ac713a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cpu_info.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ *
+ * Avoid INTEL_<PLATFORM> name collisions between asm/intel-family.h and
+ * intel_device_info.h by having a separate file.
+ */
+
+#include "intel_cpu_info.h"
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+static const struct x86_cpu_id g8_cpu_ids[] = {
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, NULL),
+ {}
+};
+
+/**
+ * intel_match_g8_cpu - match current CPU against g8_cpu_ids
+ *
+ * This matches current CPU against g8_cpu_ids, which are applicable
+ * for G8 workaround.
+ *
+ * Returns: %true if matches, %false otherwise.
+ */
+bool intel_match_g8_cpu(void)
+{
+ return x86_match_cpu(g8_cpu_ids);
+}
+#else /* CONFIG_X86 */
+
+bool intel_match_g8_cpu(void) { return false; }
+
+#endif /* CONFIG_X86 */
diff --git a/drivers/gpu/drm/i915/intel_cpu_info.h b/drivers/gpu/drm/i915/intel_cpu_info.h
new file mode 100644
index 000000000000..d898fb463d31
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cpu_info.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _INTEL_CPU_INFO_H_
+#define _INTEL_CPU_INFO_H_
+
+#include <linux/types.h>
+
+bool intel_match_g8_cpu(void);
+
+#endif /* _INTEL_CPU_INFO_H_ */
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 856b30fa37dc..bbe3a24fe3d9 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -200,6 +200,10 @@ static const u16 subplatform_g12_ids[] = {
INTEL_DG2_G12_IDS(ID),
};
+static const u16 subplatform_dg2_d_ids[] = {
+ INTEL_DG2_D_IDS(ID),
+};
+
static const u16 subplatform_arl_h_ids[] = {
INTEL_ARL_H_IDS(ID),
};
@@ -280,6 +284,11 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
mask = BIT(INTEL_SUBPLATFORM_ARL_S);
}
+ /* DG2_D ids span across multiple DG2 subplatforms */
+ if (find_devid(devid, subplatform_dg2_d_ids,
+ ARRAY_SIZE(subplatform_dg2_d_ids)))
+ mask |= BIT(INTEL_SUBPLATFORM_D);
+
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index ef84eea9ba0b..9387385cb418 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -95,9 +95,11 @@ enum intel_platform {
/*
* Subplatform bits share the same namespace per parent platform. In other words
* it is fine for the same bit to be used on multiple parent platforms.
+ * Devices can belong to multiple subplatforms if needed, so it's possible to set
+ * multiple bits for same device.
*/
-#define INTEL_SUBPLATFORM_BITS (3)
+#define INTEL_SUBPLATFORM_BITS (4)
#define INTEL_SUBPLATFORM_MASK (BIT(INTEL_SUBPLATFORM_BITS) - 1)
/* HSW/BDW/SKL/KBL/CFL */
@@ -114,6 +116,7 @@ enum intel_platform {
#define INTEL_SUBPLATFORM_G10 0
#define INTEL_SUBPLATFORM_G11 1
#define INTEL_SUBPLATFORM_G12 2
+#define INTEL_SUBPLATFORM_D 3
/* ADL */
#define INTEL_SUBPLATFORM_RPL 0
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index aa51f366626c..ee1cd2126f97 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -5,9 +5,11 @@
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
+#include "display/i9xx_wm_regs.h"
#include "display/intel_audio_regs.h"
#include "display/intel_backlight_regs.h"
#include "display/intel_color_regs.h"
+#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c
index 5c6e517c73f4..41e85ac773dc 100644
--- a/drivers/gpu/drm/i915/intel_sbi.c
+++ b/drivers/gpu/drm/i915/intel_sbi.c
@@ -17,7 +17,7 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
struct intel_uncore *uncore = &i915->uncore;
u32 cmd;
- lockdep_assert_held(&i915->sb_lock);
+ lockdep_assert_held(&i915->sbi_lock);
if (intel_wait_for_register_fw(uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
@@ -57,6 +57,16 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
return 0;
}
+void intel_sbi_lock(struct drm_i915_private *i915)
+{
+ mutex_lock(&i915->sbi_lock);
+}
+
+void intel_sbi_unlock(struct drm_i915_private *i915)
+{
+ mutex_unlock(&i915->sbi_lock);
+}
+
u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
enum intel_sbi_destination destination)
{
@@ -72,3 +82,13 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
{
intel_sbi_rw(i915, reg, destination, &value, false);
}
+
+void intel_sbi_init(struct drm_i915_private *i915)
+{
+ mutex_init(&i915->sbi_lock);
+}
+
+void intel_sbi_fini(struct drm_i915_private *i915)
+{
+ mutex_destroy(&i915->sbi_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_sbi.h b/drivers/gpu/drm/i915/intel_sbi.h
index f5a862210454..85161a4f13b8 100644
--- a/drivers/gpu/drm/i915/intel_sbi.h
+++ b/drivers/gpu/drm/i915/intel_sbi.h
@@ -15,6 +15,10 @@ enum intel_sbi_destination {
SBI_MPHY,
};
+void intel_sbi_init(struct drm_i915_private *i915);
+void intel_sbi_fini(struct drm_i915_private *i915);
+void intel_sbi_lock(struct drm_i915_private *i915);
+void intel_sbi_unlock(struct drm_i915_private *i915);
u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 6aa179a3e92a..eed4937c3ff3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -31,12 +31,17 @@
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
-#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_uncore_trace.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
#define GT_FIFO_TIMEOUT_MS 10
+struct intel_uncore *to_intel_uncore(struct drm_device *drm)
+{
+ return &to_i915(drm)->uncore;
+}
+
#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
static void
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index f419c311a0de..e39582950627 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -501,6 +501,8 @@ static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
return uncore->regs;
}
+struct intel_uncore *to_intel_uncore(struct drm_device *drm);
+
/*
* The raw_reg_{read,write} macros are intended as a micro-optimization for
* interrupt handlers so that the pointer indirection on uncore->regs can
diff --git a/drivers/gpu/drm/i915/intel_uncore_trace.c b/drivers/gpu/drm/i915/intel_uncore_trace.c
new file mode 100644
index 000000000000..86f0c3942b1d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore_trace.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "intel_uncore_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore_trace.h b/drivers/gpu/drm/i915/intel_uncore_trace.h
new file mode 100644
index 000000000000..f13ff71edf2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore_trace.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+
+#if !defined(__INTEL_UNCORE_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __INTEL_UNCORE_TRACE_H__
+
+#include "i915_reg_defs.h"
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT_CONDITION(i915_reg_rw,
+ TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
+
+ TP_ARGS(write, reg, val, len, trace),
+
+ TP_CONDITION(trace),
+
+ TP_STRUCT__entry(
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->val = (u64)val;
+ __entry->reg = i915_mmio_reg_offset(reg);
+ __entry->write = write;
+ __entry->len = len;
+ ),
+
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
+);
+#endif /* __INTEL_UNCORE_TRACE_H__ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_FILE intel_uncore_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index acae30a04a94..88870844b5bd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -492,7 +492,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
for (n = 0; n < ncpus; n++) {
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/%d", n);
+ worker = kthread_run_worker(0, "igt/%d", n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
ncpus = n;
@@ -1645,7 +1645,7 @@ static int live_parallel_engines(void *arg)
for_each_uabi_engine(engine, i915) {
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/parallel:%s",
+ worker = kthread_run_worker(0, "igt/parallel:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
@@ -1806,7 +1806,7 @@ static int live_breadcrumbs_smoketest(void *arg)
unsigned int i = idx * ncpus + n;
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
+ worker = kthread_run_worker(0, "igt/%d.%d", idx, n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
goto out_flush;
@@ -3219,7 +3219,7 @@ static int perf_parallel_engines(void *arg)
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
- worker = kthread_create_worker(0, "igt:%s",
+ worker = kthread_run_worker(0, "igt:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 29110abb4fe0..c383d31d46b0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -19,12 +19,22 @@ int igt_flush_test(struct drm_i915_private *i915)
int ret = 0;
for_each_gt(gt, i915, i) {
+ struct intel_engine_cs *engine;
+ unsigned long timeout_ms = 0;
+ unsigned int id;
+
if (intel_gt_is_wedged(gt))
ret = -EIO;
+ for_each_engine(engine, gt, id) {
+ if (engine->props.preempt_timeout_ms > timeout_ms)
+ timeout_ms = engine->props.preempt_timeout_ms;
+ }
+
cond_resched();
- if (intel_gt_wait_for_idle(gt, HZ * 3) == -ETIME) {
+ /* 2x longest preempt timeout, experimentally determined */
+ if (intel_gt_wait_for_idle(gt, HZ * timeout_ms / 500) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0));
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index ae57eb03dfca..a77e5b26542c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -180,7 +180,7 @@ struct drm_i915_private *mock_gem_device(void)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, &mock_info);
- intel_display_device_probe(i915);
+ intel_display_device_probe(pdev);
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index 68291412f4cb..114ae8eb9cd5 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -43,7 +43,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
* to the Valleyview P-unit and not all sideband communications.
*/
if (IS_VALLEYVIEW(i915)) {
- cpu_latency_qos_update_request(&i915->sb_qos, 0);
+ cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos, 0);
on_each_cpu(ping, NULL, 1);
}
}
@@ -51,7 +51,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
static void __vlv_punit_put(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915))
- cpu_latency_qos_update_request(&i915->sb_qos,
+ cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos,
PM_QOS_DEFAULT_VALUE);
iosf_mbi_punit_release();
@@ -62,12 +62,12 @@ void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
if (ports & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_get(i915);
- mutex_lock(&i915->sb_lock);
+ mutex_lock(&i915->vlv_iosf_sb.lock);
}
void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
{
- mutex_unlock(&i915->sb_lock);
+ mutex_unlock(&i915->vlv_iosf_sb.lock);
if (ports & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_put(i915);
@@ -81,7 +81,7 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
int err;
- lockdep_assert_held(&i915->sb_lock);
+ lockdep_assert_held(&i915->vlv_iosf_sb.lock);
if (port == IOSF_PORT_PUNIT)
iosf_mbi_assert_punit_acquired();
@@ -249,3 +249,21 @@ void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
reg, &val);
}
+
+void vlv_iosf_sb_init(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ mutex_init(&i915->vlv_iosf_sb.lock);
+
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_add_request(&i915->vlv_iosf_sb.qos, PM_QOS_DEFAULT_VALUE);
+}
+
+void vlv_iosf_sb_fini(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_remove_request(&i915->vlv_iosf_sb.qos);
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ mutex_destroy(&i915->vlv_iosf_sb.lock);
+}
diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
index c20cf41b2d39..31813e07c56f 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.h
+++ b/drivers/gpu/drm/i915/vlv_sideband.h
@@ -25,6 +25,9 @@ enum {
VLV_IOSF_SB_PUNIT,
};
+void vlv_iosf_sb_init(struct drm_i915_private *i915);
+void vlv_iosf_sb_fini(struct drm_i915_private *i915);
+
void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index 94595dde2b96..fc9f311ea1db 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -13,6 +13,7 @@
#include "i915_trace.h"
#include "i915_utils.h"
#include "intel_clock_gating.h"
+#include "intel_uncore_trace.h"
#include "vlv_suspend.h"
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index 85ee9abd1811..0639502137b4 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -1387,7 +1387,6 @@ static struct drm_driver pvr_drm_driver = {
.name = PVR_DRIVER_NAME,
.desc = PVR_DRIVER_DESC,
- .date = PVR_DRIVER_DATE,
.major = PVR_DRIVER_MAJOR,
.minor = PVR_DRIVER_MINOR,
.patchlevel = PVR_DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/imagination/pvr_drv.h b/drivers/gpu/drm/imagination/pvr_drv.h
index 378fe477b759..7fa147312dd1 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.h
+++ b/drivers/gpu/drm/imagination/pvr_drv.h
@@ -9,7 +9,6 @@
#define PVR_DRIVER_NAME "powervr"
#define PVR_DRIVER_DESC "Imagination PowerVR (Series 6 and later) & IMG Graphics"
-#define PVR_DRIVER_DATE "20230904"
/*
* Driver interface version:
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 63a335c62296..3633e8f3aff6 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -3,11 +3,11 @@
* Copyright 2019 NXP.
*/
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -34,7 +34,6 @@ static const struct drm_driver dcss_kms_driver = {
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
- .date = "20190917",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
index 5f2c93c3c288..ec5fd9a01f1e 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
@@ -13,9 +13,9 @@
#include <video/imx-ipu-v3.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -163,7 +163,6 @@ static const struct drm_driver imx_drm_driver = {
.fops = &imx_drm_driver_fops,
.name = "imx-drm",
.desc = "i.MX DRM graphics",
- .date = "20120507",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index fa7d44623c52..8d6a0bb31c48 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: 2020 Marian Cichy <M.Cichy@pengutronix.de>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -352,7 +352,6 @@ static struct drm_driver imx_lcdc_drm_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "imx-lcdc",
.desc = "i.MX LCDC driver",
- .date = "20200716",
};
static const struct of_device_id imx_lcdc_of_dev_id[] = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 8469e1e5e582..c23ee2d214de 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -20,11 +20,11 @@
#include <linux/pm.h>
#include <linux/regmap.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
@@ -953,7 +953,6 @@ static const struct drm_driver ingenic_drm_driver_data = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "ingenic-drm",
.desc = "DRM module for Ingenic SoCs",
- .date = "20200716",
.major = 1,
.minor = 1,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index a3d31de761cb..32cda134ae3e 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -13,8 +13,8 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -445,7 +445,6 @@ static const struct drm_driver kmb_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "kmb-drm",
.desc = "KEEMBAY DISPLAY DRIVER",
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index bf085e95b28f..1f0c10d317fe 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -16,7 +16,6 @@
#define KMB_MIN_WIDTH 1920 /*Max width in pixels */
#define KMB_MIN_HEIGHT 1080 /*Max height in pixels */
-#define DRIVER_DATE "20210223"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index fb3062c872b3..2067c5b65c57 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -271,7 +271,6 @@ static const struct drm_driver lima_drm_driver = {
.fops = &lima_drm_driver_fops,
.name = "lima",
.desc = "lima DRM",
- .date = "20191231",
.major = 1,
.minor = 1,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index fb9de5e0bc0e..204b0fee55d0 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -15,8 +15,8 @@
#include <linux/regmap.h>
#include <linux/types.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -52,7 +52,6 @@ static struct drm_driver logicvc_drm_driver = {
.fops = &logicvc_drm_fops,
.name = "logicvc-drm",
.desc = "Xylon LogiCVC DRM driver",
- .date = "20200403",
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
index b350bdcf1645..12193d2a301a 100644
--- a/drivers/gpu/drm/loongson/lsdc_drv.c
+++ b/drivers/gpu/drm/loongson/lsdc_drv.c
@@ -7,9 +7,9 @@
#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -26,7 +26,6 @@
#define DRIVER_AUTHOR "Sui Jingfeng <suijingfeng@loongson.cn>"
#define DRIVER_NAME "loongson"
#define DRIVER_DESC "drm driver for loongson graphics"
-#define DRIVER_DATE "20220701"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -39,7 +38,6 @@ static const struct drm_driver lsdc_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -232,9 +230,9 @@ lsdc_create_device(struct pci_dev *pdev,
lsdc_gem_init(ddev);
/* Bar 0 of the DC device contains the MMIO register's base address */
- ldev->reg_base = pcim_iomap(pdev, 0, 0);
- if (!ldev->reg_base)
- return ERR_PTR(-ENODEV);
+ ldev->reg_base = pcim_iomap_region(pdev, 0, "lsdc");
+ if (IS_ERR(ldev->reg_base))
+ return ldev->reg_base;
spin_lock_init(&ldev->reglock);
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index c4d51f5f038d..5f2c462bad7e 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -65,9 +65,9 @@
#include <linux/slab.h>
#include <linux/delay.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fbdev_dma.h>
@@ -208,7 +208,6 @@ static const struct drm_driver mcde_drm_driver = {
.fops = &drm_fops,
.name = "mcde",
.desc = DRIVER_DESC,
- .date = "20180529",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 3b0993abfdc3..cd385ba4c66a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -1138,6 +1138,18 @@ static void mtk_dp_digital_sw_reset(struct mtk_dp *mtk_dp)
0, DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0);
}
+static void mtk_dp_sdp_path_reset(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ SDP_RESET_SW_DP_ENC0_P0,
+ SDP_RESET_SW_DP_ENC0_P0);
+
+ /* Wait for sdp path reset to complete */
+ usleep_range(1000, 5000);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ 0, SDP_RESET_SW_DP_ENC0_P0);
+}
+
static void mtk_dp_set_lanes(struct mtk_dp *mtk_dp, int lanes)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35F0,
@@ -1168,17 +1180,25 @@ static void mtk_dp_get_calibration_data(struct mtk_dp *mtk_dp)
buf = (u32 *)nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
- if (IS_ERR(buf) || ((len / sizeof(u32)) != 4)) {
+ if (IS_ERR(buf)) {
dev_warn(dev, "Failed to read nvmem_cell_read\n");
-
- if (!IS_ERR(buf))
- kfree(buf);
-
goto use_default_val;
}
+ /* The cell length is in bytes. Convert it to be compatible with u32 buffer. */
+ len /= sizeof(u32);
+
for (i = 0; i < MTK_DP_CAL_MAX; i++) {
fmt = &mtk_dp->data->efuse_fmt[i];
+
+ if (fmt->idx >= len) {
+ dev_warn(mtk_dp->dev,
+ "Out-of-bound efuse data access, fmt idx = %d, buf len = %zu\n",
+ fmt->idx, len);
+ kfree(buf);
+ goto use_default_val;
+ }
+
cal_data[i] = (buf[fmt->idx] >> fmt->shift) & fmt->mask;
if (cal_data[i] < fmt->min_val || cal_data[i] > fmt->max_val) {
@@ -2399,6 +2419,9 @@ static void mtk_dp_bridge_atomic_disable(struct drm_bridge *bridge,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
+ /* SDP path reset sw*/
+ mtk_dp_sdp_path_reset(mtk_dp);
+
/* Ensure the sink is muted */
msleep(20);
}
@@ -2623,7 +2646,6 @@ static const struct hdmi_codec_ops mtk_dp_audio_codec_ops = {
.audio_shutdown = mtk_dp_audio_shutdown,
.get_eld = mtk_dp_audio_get_eld,
.hook_plugged_cb = mtk_dp_audio_hook_plugged_cb,
- .no_capture_mute = 1,
};
static int mtk_dp_register_audio_driver(struct device *dev)
@@ -2634,6 +2656,7 @@ static int mtk_dp_register_audio_driver(struct device *dev)
.max_i2s_channels = 8,
.i2s = 1,
.data = mtk_dp,
+ .no_capture_mute = 1,
};
mtk_dp->audio_pdev = platform_device_register_data(dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_dp_reg.h b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
index 709b79480693..8ad7a9cc259e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp_reg.h
+++ b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
@@ -86,6 +86,7 @@
#define MTK_DP_ENC0_P0_3004 0x3004
#define VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK BIT(8)
#define DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0 BIT(9)
+#define SDP_RESET_SW_DP_ENC0_P0 BIT(13)
#define MTK_DP_ENC0_P0_3010 0x3010
#define HTOTAL_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
#define MTK_DP_ENC0_P0_3014 0x3014
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b037616d3342..cd25e5afe55a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -12,9 +12,9 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -33,7 +33,6 @@
#define DRIVER_NAME "mediatek"
#define DRIVER_DESC "Mediatek SoC DRM"
-#define DRIVER_DATE "20150513"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -618,7 +617,6 @@ static const struct drm_driver mtk_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 70dc1d4460ad..ca82bc829cb9 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1660,7 +1660,6 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.mute_stream = mtk_hdmi_audio_mute,
.get_eld = mtk_hdmi_audio_get_eld,
.hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
- .no_capture_mute = 1,
};
static int mtk_hdmi_register_audio_driver(struct device *dev)
@@ -1671,6 +1670,7 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
.max_i2s_channels = 2,
.i2s = 1,
.data = hdmi,
+ .no_capture_mute = 1,
};
struct platform_device *pdev;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 0f5a1a54544e..81d2ee37e773 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -16,8 +16,8 @@
#include <linux/platform_device.h>
#include <linux/soc/amlogic/meson-canvas.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -105,7 +105,6 @@ static const struct drm_driver meson_driver = {
.fops = &fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = "20161109",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 97fd7eb765b4..069fdd2dc8f6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
@@ -97,7 +97,6 @@ static const struct drm_driver mgag200_driver = {
.fops = &mgag200_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 988967eafbf2..0608fc63e588 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -25,7 +25,6 @@
#define DRIVER_NAME "mgag200"
#define DRIVER_DESC "MGA G200 SE"
-#define DRIVER_DATE "20110418"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index f274d9430cc3..5df20cbeafb8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -78,6 +78,7 @@ msm-display-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_hw_catalog.o \
disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_cwb.o \
disp/dpu1/dpu_hw_dsc.o \
disp/dpu1/dpu_hw_dsc_1_2.o \
disp/dpu1/dpu_hw_interrupts.o \
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 50c490b492f0..f1b18a6663f7 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -251,8 +251,8 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
/* Disable L2 bypass to avoid UCHE out of bounds errors */
- gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
- gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
@@ -693,6 +693,8 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
if (ret)
goto fail;
+ adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull;
+
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index ee89db72e36e..71dca78cd7a5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -750,10 +750,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
/* Disable L2 bypass in the UCHE */
- gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
- gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
- gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
- gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
/* Set the GMEM VA range (0 to gpu->gmem) */
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
@@ -1760,11 +1760,6 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
unsigned int nr_rings;
int ret;
- if (!pdev) {
- DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
- return ERR_PTR(-ENXIO);
- }
-
a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
if (!a5xx_gpu)
return ERR_PTR(-ENOMEM);
@@ -1805,5 +1800,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
adreno_gpu->ubwc_config.macrotile_mode = 0;
adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+ adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 0c560e84ad5a..edffb7737a97 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -1388,6 +1388,17 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
},
.address_space_size = SZ_16G,
.preempt_record_size = 4192 * SZ_1K,
@@ -1432,6 +1443,17 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(2),
+ .perfmode_bw = 10687500,
+ },
+ { /* sentinel */ },
+ },
},
.address_space_size = SZ_16G,
.preempt_record_size = 3572 * SZ_1K,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 14db7376c712..65d38b25c070 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -9,6 +9,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
#include <drm/drm_gem.h>
#include "a6xx_gpu.h"
@@ -109,9 +110,11 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 perf_index;
+ u32 bw_index = 0;
unsigned long gpu_freq;
int ret = 0;
@@ -124,6 +127,37 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
if (gpu_freq == gmu->gpu_freqs[perf_index])
break;
+ /* If enabled, find the corresponding DDR bandwidth index */
+ if (info->bcms && gmu->nr_gpu_bws > 1) {
+ unsigned int bw = dev_pm_opp_get_bw(opp, true, 0);
+
+ for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) {
+ if (bw == gmu->gpu_bw_table[bw_index])
+ break;
+ }
+
+ /* Vote AB as a fraction of the max bandwidth, starting from A750 */
+ if (bw && adreno_is_a750_family(adreno_gpu)) {
+ u64 tmp;
+
+ /* For now, vote for 25% of the bandwidth */
+ tmp = bw * 25;
+ do_div(tmp, 100);
+
+ /*
+ * The AB vote consists of a 16 bit wide quantized level
+ * against the maximum supported bandwidth.
+ * Quantization can be calculated as below:
+ * vote = (bandwidth * 2^16) / max bandwidth
+ */
+ tmp *= MAX_AB_VOTE;
+ do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]);
+
+ bw_index |= AB_VOTE(clamp(tmp, 1, MAX_AB_VOTE));
+ bw_index |= AB_VOTE_ENABLE;
+ }
+ }
+
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
@@ -139,8 +173,10 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
return;
if (!gmu->legacy) {
- a6xx_hfi_set_freq(gmu, perf_index);
- dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+ a6xx_hfi_set_freq(gmu, perf_index, bw_index);
+ /* With Bandwidth voting, we now vote for all resources, so skip OPP set */
+ if (!bw_index)
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
return;
}
@@ -729,6 +765,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
const struct block_header *blk;
u32 reg_offset;
+ u32 ver;
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
@@ -775,6 +812,12 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
}
}
+ ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION);
+ DRM_INFO("Loaded GMU firmware v%u.%u.%u\n",
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
+
return 0;
}
@@ -1265,7 +1308,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
bo->virt = msm_gem_get_vaddr(bo->obj);
bo->size = size;
- msm_gem_object_set_name(bo->obj, name);
+ msm_gem_object_set_name(bo->obj, "%s", name);
return 0;
}
@@ -1287,6 +1330,104 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return 0;
}
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
+ * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
+ * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved field
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
+static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu,
+ const struct a6xx_info *info,
+ struct a6xx_gmu *gmu)
+{
+ const struct bcm_db *bcm_data[GMU_MAX_BCMS] = { 0 };
+ unsigned int bcm_index, bw_index, bcm_count = 0;
+
+ /* Retrieve BCM data from cmd-db */
+ for (bcm_index = 0; bcm_index < GMU_MAX_BCMS; bcm_index++) {
+ const struct a6xx_bcm *bcm = &info->bcms[bcm_index];
+ size_t count;
+
+ /* Stop at NULL terminated bcm entry */
+ if (!bcm->name)
+ break;
+
+ bcm_data[bcm_index] = cmd_db_read_aux_data(bcm->name, &count);
+ if (IS_ERR(bcm_data[bcm_index]))
+ return PTR_ERR(bcm_data[bcm_index]);
+
+ if (!count) {
+ dev_err(gmu->dev, "invalid BCM '%s' aux data size\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ bcm_count++;
+ }
+
+ /* Generate BCM votes values for each bandwidth & BCM */
+ for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) {
+ u32 *data = gmu->gpu_ib_votes[bw_index];
+ u32 bw = gmu->gpu_bw_table[bw_index];
+
+ /* Calculations loosely copied from bcm_aggregate() & tcs_cmd_gen() */
+ for (bcm_index = 0; bcm_index < bcm_count; bcm_index++) {
+ const struct a6xx_bcm *bcm = &info->bcms[bcm_index];
+ bool commit = false;
+ u64 peak;
+ u32 vote;
+
+ if (bcm_index == bcm_count - 1 ||
+ (bcm_data[bcm_index + 1] &&
+ bcm_data[bcm_index]->vcd != bcm_data[bcm_index + 1]->vcd))
+ commit = true;
+
+ if (!bw) {
+ data[bcm_index] = BCM_TCS_CMD(commit, false, 0, 0);
+ continue;
+ }
+
+ if (bcm->fixed) {
+ u32 perfmode = 0;
+
+ /* GMU on A6xx votes perfmode on all valid bandwidth */
+ if (!adreno_is_a7xx(adreno_gpu) ||
+ (bcm->perfmode_bw && bw >= bcm->perfmode_bw))
+ perfmode = bcm->perfmode;
+
+ data[bcm_index] = BCM_TCS_CMD(commit, true, 0, perfmode);
+ continue;
+ }
+
+ /* Multiply the bandwidth by the width of the connection */
+ peak = (u64)bw * le16_to_cpu(bcm_data[bcm_index]->width);
+ do_div(peak, bcm->buswidth);
+
+ /* Input bandwidth value is in KBps, scale the value to BCM unit */
+ peak *= 1000;
+ do_div(peak, le32_to_cpu(bcm_data[bcm_index]->unit));
+
+ vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK);
+
+ /* GMUs on A7xx votes on both x & y */
+ if (adreno_is_a7xx(adreno_gpu))
+ data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote);
+ else
+ data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote);
+ }
+ }
+
+ return 0;
+}
+
/* Return the 'arc-level' for the given frequency */
static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
unsigned long freq)
@@ -1390,12 +1531,15 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
* The GMU votes with the RPMh for itself and on behalf of the GPU but we need
* to construct the list of votes on the CPU and send it over. Query the RPMh
* voltage levels and build the votes
+ * The GMU can also vote for DDR interconnects, use the OPP bandwidth entries
+ * and BCM parameters to build the votes.
*/
static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
@@ -1407,6 +1551,10 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
+ /* Build the interconnect votes */
+ if (info->bcms && gmu->nr_gpu_bws > 1)
+ ret |= a6xx_gmu_rpmh_bw_votes_init(adreno_gpu, info, gmu);
+
return ret;
}
@@ -1442,10 +1590,43 @@ static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
return index;
}
+static int a6xx_gmu_build_bw_table(struct device *dev, unsigned long *bandwidths,
+ u32 size)
+{
+ int count = dev_pm_opp_get_opp_count(dev);
+ struct dev_pm_opp *opp;
+ int i, index = 0;
+ unsigned int bandwidth = 1;
+
+ /*
+ * The OPP table doesn't contain the "off" bandwidth level so we need to
+ * add 1 to the table size to account for it
+ */
+
+ if (WARN(count + 1 > size,
+ "The GMU bandwidth table is being truncated\n"))
+ count = size - 1;
+
+ /* Set the "off" bandwidth */
+ bandwidths[index++] = 0;
+
+ for (i = 0; i < count; i++) {
+ opp = dev_pm_opp_find_bw_ceil(dev, &bandwidth, 0);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ bandwidths[index++] = bandwidth++;
+ }
+
+ return index;
+}
+
static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret = 0;
@@ -1472,6 +1653,14 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
+ /*
+ * The GMU also handles GPU Interconnect Votes so build a list
+ * of DDR bandwidths from the GPU OPP table
+ */
+ if (info->bcms)
+ gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev,
+ gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table));
+
/* Build the list of RPMh votes that we'll send to the GMU */
return a6xx_gmu_rpmh_votes_init(gmu);
}
@@ -1603,7 +1792,9 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, true);
+ ret = of_dma_configure(gmu->dev, node, true);
+ if (ret)
+ return ret;
pm_runtime_enable(gmu->dev);
@@ -1668,7 +1859,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, true);
+ ret = of_dma_configure(gmu->dev, node, true);
+ if (ret)
+ return ret;
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index b4a79f88ccf4..0c888b326cfb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -19,6 +19,18 @@ struct a6xx_gmu_bo {
u64 iova;
};
+#define GMU_MAX_GX_FREQS 16
+#define GMU_MAX_CX_FREQS 4
+#define GMU_MAX_BCMS 3
+
+struct a6xx_bcm {
+ char *name;
+ unsigned int buswidth;
+ bool fixed;
+ unsigned int perfmode;
+ unsigned int perfmode_bw;
+};
+
/*
* These define the different GMU wake up options - these define how both the
* CPU and the GMU bring up the hardware
@@ -79,12 +91,16 @@ struct a6xx_gmu {
int current_perf_index;
int nr_gpu_freqs;
- unsigned long gpu_freqs[16];
- u32 gx_arc_votes[16];
+ unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
+ u32 gx_arc_votes[GMU_MAX_GX_FREQS];
+
+ int nr_gpu_bws;
+ unsigned long gpu_bw_table[GMU_MAX_GX_FREQS];
+ u32 gpu_ib_votes[GMU_MAX_GX_FREQS][GMU_MAX_BCMS];
int nr_gmu_freqs;
- unsigned long gmu_freqs[4];
- u32 cx_arc_votes[4];
+ unsigned long gmu_freqs[GMU_MAX_CX_FREQS];
+ u32 cx_arc_votes[GMU_MAX_CX_FREQS];
unsigned long freq;
@@ -193,7 +209,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
-int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 perf_index, u32 bw_index);
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 019610341df1..0ae29a7c8a4d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1123,12 +1123,12 @@ static int hw_init(struct msm_gpu *gpu)
/* Disable L2 bypass in the UCHE */
if (adreno_is_a7xx(adreno_gpu)) {
- gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
} else {
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
- gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, adreno_gpu->uche_trap_base + 0xfc0);
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
}
if (!(adreno_is_a650_family(adreno_gpu) ||
@@ -2533,6 +2533,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
}
}
+ adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
+
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
a6xx_fault_handler);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 4aceffb6aae8..9201a53dd341 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -44,6 +44,7 @@ struct a6xx_info {
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
+ const struct a6xx_bcm *bcms;
};
struct a6xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index cb8844ed46b2..0989aee3dd2c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
@@ -259,6 +260,48 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
NULL, 0);
}
+static void a6xx_generate_bw_table(const struct a6xx_info *info, struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_bw_table *msg)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < GMU_MAX_BCMS; i++) {
+ if (!info->bcms[i].name)
+ break;
+ msg->ddr_cmds_addrs[i] = cmd_db_read_addr(info->bcms[i].name);
+ }
+ msg->ddr_cmds_num = i;
+
+ for (i = 0; i < gmu->nr_gpu_bws; ++i)
+ for (j = 0; j < msg->ddr_cmds_num; j++)
+ msg->ddr_cmds_data[i][j] = gmu->gpu_ib_votes[i][j];
+ msg->bw_level_num = gmu->nr_gpu_bws;
+
+ /* Compute the wait bitmask with each BCM having the commit bit */
+ msg->ddr_wait_bitmask = 0;
+ for (j = 0; j < msg->ddr_cmds_num; j++)
+ if (msg->ddr_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
+ msg->ddr_wait_bitmask |= BIT(j);
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU
+ * The 'CN0' BCM is used on all targets, and votes are basically
+ * 'off' and 'on' states with first bit to enable the path.
+ */
+
+ msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
+ msg->cnoc_cmds_num = 1;
+
+ msg->cnoc_cmds_data[0][0] = BCM_TCS_CMD(true, false, 0, 0);
+ msg->cnoc_cmds_data[1][0] = BCM_TCS_CMD(true, true, 0, BIT(0));
+
+ /* Compute the wait bitmask with each BCM having the commit bit */
+ msg->cnoc_wait_bitmask = 0;
+ for (j = 0; j < msg->cnoc_cmds_num; j++)
+ if (msg->cnoc_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
+ msg->cnoc_wait_bitmask |= BIT(j);
+}
+
static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
@@ -664,6 +707,7 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
struct a6xx_hfi_msg_bw_table *msg;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
if (gmu->bw_table)
goto send;
@@ -672,7 +716,9 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
if (!msg)
return -ENOMEM;
- if (adreno_is_a618(adreno_gpu))
+ if (info->bcms && gmu->nr_gpu_bws > 1)
+ a6xx_generate_bw_table(info, gmu, msg);
+ else if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(msg);
else if (adreno_is_a619(adreno_gpu))
a619_build_bw_table(msg);
@@ -726,13 +772,13 @@ static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
sizeof(msg), NULL, 0);
}
-int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 freq_index, u32 bw_index)
{
struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
msg.ack_type = 1; /* blocking */
- msg.freq = index;
- msg.bw = 0; /* TODO: bus scaling */
+ msg.freq = freq_index;
+ msg.bw = bw_index;
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
sizeof(msg), NULL, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 528110169398..52ba4a07d7b9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -173,6 +173,11 @@ struct a6xx_hfi_gx_bw_perf_vote_cmd {
u32 bw;
};
+#define AB_VOTE_MASK GENMASK(31, 16)
+#define MAX_AB_VOTE (FIELD_MAX(AB_VOTE_MASK) - 1)
+#define AB_VOTE(vote) FIELD_PREP(AB_VOTE_MASK, (vote))
+#define AB_VOTE_ENABLE BIT(8)
+
#define HFI_H2F_MSG_PREPARE_SLUMBER 33
struct a6xx_hfi_prep_slumber_cmd {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 75f5367e73ca..1238f3265978 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -310,10 +310,11 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct drm_device *drm = gpu->dev;
/* No pointer params yet */
if (*len != 0)
- return -EINVAL;
+ return UERR(EINVAL, drm, "invalid len");
switch (param) {
case MSM_PARAM_GPU_ID:
@@ -365,12 +366,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
return 0;
case MSM_PARAM_VA_START:
if (ctx->aspace == gpu->aspace)
- return -EINVAL;
+ return UERR(EINVAL, drm, "requires per-process pgtables");
*value = ctx->aspace->va_start;
return 0;
case MSM_PARAM_VA_SIZE:
if (ctx->aspace == gpu->aspace)
- return -EINVAL;
+ return UERR(EINVAL, drm, "requires per-process pgtables");
*value = ctx->aspace->va_size;
return 0;
case MSM_PARAM_HIGHEST_BANK_BIT:
@@ -385,15 +386,19 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_MACROTILE_MODE:
*value = adreno_gpu->ubwc_config.macrotile_mode;
return 0;
+ case MSM_PARAM_UCHE_TRAP_BASE:
+ *value = adreno_gpu->uche_trap_base;
+ return 0;
default:
- DBG("%s: invalid param: %u", gpu->name, param);
- return -EINVAL;
+ return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t value, uint32_t len)
{
+ struct drm_device *drm = gpu->dev;
+
switch (param) {
case MSM_PARAM_COMM:
case MSM_PARAM_CMDLINE:
@@ -401,11 +406,11 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
* that should be a reasonable upper bound
*/
if (len > PAGE_SIZE)
- return -EINVAL;
+ return UERR(EINVAL, drm, "invalid len");
break;
default:
if (len != 0)
- return -EINVAL;
+ return UERR(EINVAL, drm, "invalid len");
}
switch (param) {
@@ -434,11 +439,10 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
}
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ return UERR(EPERM, drm, "invalid permissions");
return msm_file_private_set_sysprof(ctx, gpu, value);
default:
- DBG("%s: invalid param: %u", gpu->name, param);
- return -EINVAL;
+ return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index e71f420f8b3a..dcf454629ce0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -253,6 +253,8 @@ struct adreno_gpu {
bool gmu_is_wrapper;
bool has_ray_tracing;
+
+ u64 uche_trap_base;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -559,6 +561,11 @@ static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
gpu->info->family == ADRENO_7XX_GEN3;
}
+static inline int adreno_is_a750_family(struct adreno_gpu *gpu)
+{
+ return gpu->info->family == ADRENO_7XX_GEN3;
+}
+
static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
{
/* Update with non-fake (i.e. non-A702) Gen 7 GPUs */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index eb5dfff2ec4f..bcb39807fe61 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
@@ -252,25 +254,25 @@ static const struct dpu_pingpong_cfg sm8650_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_8", .id = PINGPONG_8,
+ .name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
.base = 0x7e000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
}, {
- .name = "pingpong_9", .id = PINGPONG_9,
+ .name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
.base = 0x7e400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
@@ -350,6 +352,25 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
},
};
+static const struct dpu_cwb_cfg sm8650_cwb[] = {
+ {
+ .name = "cwb_0", .id = CWB_0,
+ .base = 0x66200, .len = 0x8,
+ },
+ {
+ .name = "cwb_1", .id = CWB_1,
+ .base = 0x66600, .len = 0x8,
+ },
+ {
+ .name = "cwb_2", .id = CWB_2,
+ .base = 0x7E200, .len = 0x8,
+ },
+ {
+ .name = "cwb_3", .id = CWB_3,
+ .base = 0x7E600, .len = 0x8,
+ },
+};
+
static const struct dpu_intf_cfg sm8650_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -447,6 +468,8 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = {
.merge_3d = sm8650_merge_3d,
.wb_count = ARRAY_SIZE(sm8650_wb),
.wb = sm8650_wb,
+ .cwb_count = ARRAY_SIZE(sm8650_cwb),
+ .cwb = sm8650_cwb,
.intf_count = ARRAY_SIZE(sm8650_intf),
.intf = sm8650_intf,
.vbif_count = ARRAY_SIZE(sm8650_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index cbbdaebe357e..daef07924886 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -65,6 +65,54 @@ static const struct dpu_sspp_cfg sdm670_sspp[] = {
},
};
+static const struct dpu_lm_cfg sdm670_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_dspp_cfg sdm670_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
static const struct dpu_dsc_cfg sdm670_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
@@ -88,8 +136,10 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm670_sspp),
.sspp = sdm670_sspp,
- .mixer_count = ARRAY_SIZE(sdm845_lm),
- .mixer = sdm845_lm,
+ .mixer_count = ARRAY_SIZE(sdm670_lm),
+ .mixer = sdm670_lm,
+ .dspp_count = ARRAY_SIZE(sdm670_dspp),
+ .dspp = sdm670_dspp,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
.dsc_count = ARRAY_SIZE(sdm670_dsc),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 6ccfde82fecd..421afacb7248 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -164,6 +164,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -171,6 +172,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index bab19ddd1d4f..641023b102bf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -163,6 +163,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -170,6 +171,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
new file mode 100644
index 000000000000..621a2140f675
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_5_3_SM6150_H
+#define _DPU_5_3_SM6150_H
+
+static const struct dpu_caps sm6150_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x9,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sm6150_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = 0,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm6150_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm6150_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_2_4,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sm6150_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ .lm_pair = LM_1,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_1,
+ .lm_pair = LM_0,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_2,
+ },
+};
+
+static const struct dpu_dspp_cfg sm6150_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm6150_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ },
+};
+
+static const struct dpu_intf_cfg sm6150_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sm6150_perf_data = {
+ .max_bw_low = 4800000,
+ .max_bw_high = 4800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm8150_qos_linear),
+ .entries = sm8150_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm6150_mdss_ver = {
+ .core_major_ver = 5,
+ .core_minor_ver = 3,
+};
+
+const struct dpu_mdss_cfg dpu_sm6150_cfg = {
+ .mdss_ver = &sm6150_mdss_ver,
+ .caps = &sm6150_dpu_caps,
+ .mdp = &sm6150_mdp,
+ .ctl_count = ARRAY_SIZE(sm6150_ctl),
+ .ctl = sm6150_ctl,
+ .sspp_count = ARRAY_SIZE(sm6150_sspp),
+ .sspp = sm6150_sspp,
+ .mixer_count = ARRAY_SIZE(sm6150_lm),
+ .mixer = sm6150_lm,
+ .dspp_count = ARRAY_SIZE(sm6150_dspp),
+ .dspp = sm6150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm6150_pp),
+ .pingpong = sm6150_pp,
+ .intf_count = ARRAY_SIZE(sm6150_intf),
+ .intf = sm6150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm6150_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index a57d50b1f028..e8916ae826a6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index aced16e350da..f7c08e89c882 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index a1779c5597ae..08742472f9cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -257,13 +257,13 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 907b4d7ceb47..76ec72a32378 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -256,13 +256,13 @@ static const struct dpu_pingpong_cfg sa8775p_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_6", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_7", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index ad48defa154f..4d3787fceb72 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@@ -251,13 +253,13 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index a3e60ac70689..6b112e3d17da 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -159,6 +159,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -166,6 +167,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@@ -251,13 +253,13 @@ static const struct dpu_pingpong_cfg x1e80100_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
@@ -389,8 +391,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
- .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
- .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 9f6ffd344693..7191b1a6d41b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -732,6 +732,13 @@ static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
int i;
+ /* if we cannot merge 2 LMs (no 3d mux) better to fail earlier
+ * before even checking the width after the split
+ */
+ if (!dpu_kms->catalog->caps->has_3d_merge &&
+ adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
+ return -E2BIG;
+
for (i = 0; i < cstate->num_mixers; i++) {
struct drm_rect *r = &cstate->lm_bounds[i];
r->x1 = crtc_split_width * i;
@@ -1182,6 +1189,49 @@ static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
return false;
}
+static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
+{
+ int total_planes = crtc->dev->mode_config.num_total_plane;
+ struct drm_atomic_state *state = crtc_state->state;
+ struct dpu_global_state *global_state;
+ struct drm_plane_state **states;
+ struct drm_plane *plane;
+ int ret;
+
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ dpu_rm_release_all_sspp(global_state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ states = kcalloc(total_planes, sizeof(*states), GFP_KERNEL);
+ if (!states)
+ return -ENOMEM;
+
+ drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto done;
+ }
+
+ states[plane_state->normalized_zpos] = plane_state;
+ }
+
+ ret = dpu_assign_plane_resources(global_state, state, crtc, states, total_planes);
+
+done:
+ kfree(states);
+ return ret;
+
+ return 0;
+}
+
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1197,6 +1247,13 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+ if (dpu_use_virtual_planes &&
+ (crtc_state->planes_changed || crtc_state->zpos_changed)) {
+ rc = dpu_crtc_reassign_planes(crtc, crtc_state);
+ if (rc < 0)
+ return rc;
+ }
+
if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, crtc_state->enable,
@@ -1251,6 +1308,12 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
{
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ /* if there is no 3d_mux block we cannot merge LMs so we cannot
+ * split the large layer into 2 LMs, filter out such modes
+ */
+ if (!dpu_kms->catalog->caps->has_3d_merge &&
+ mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
+ return MODE_BAD_HVALUE;
/*
* max crtc width is equal to the max mixer width * 2 and max height is 4K
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 83de7564e2c1..5172ab4dea99 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -800,7 +800,7 @@ static int dpu_encoder_virt_atomic_check(
if (!crtc_state->active_changed || crtc_state->enable)
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
- drm_enc, crtc_state, topology);
+ drm_enc, crtc_state, &topology);
if (!ret)
dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
global_state, crtc_state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 2cbf41f33cc0..0b342c043875 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -765,6 +765,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
#include "catalog/dpu_5_2_sm7150.h"
+#include "catalog/dpu_5_3_sm6150.h"
#include "catalog/dpu_5_4_sm6125.h"
#include "catalog/dpu_6_0_sm8250.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index c701d18c3522..4cea19e1a203 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -613,6 +613,16 @@ struct dpu_wb_cfg {
enum dpu_clk_ctrl_type clk_ctrl;
};
+/*
+ * struct dpu_cwb_cfg : MDP CWB mux instance info
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_cwb_cfg {
+ DPU_HW_BLK_INFO;
+};
+
/**
* struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
* @pps pixel per seconds
@@ -815,6 +825,9 @@ struct dpu_mdss_cfg {
u32 dspp_count;
const struct dpu_dspp_cfg *dspp;
+ u32 cwb_count;
+ const struct dpu_cwb_cfg *cwb;
+
/* Add additional block data structures here */
const struct dpu_perf_cfg *perf;
@@ -839,6 +852,7 @@ extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
extern const struct dpu_mdss_cfg dpu_sc7180_cfg;
extern const struct dpu_mdss_cfg dpu_sm6115_cfg;
extern const struct dpu_mdss_cfg dpu_sm6125_cfg;
+extern const struct dpu_mdss_cfg dpu_sm6150_cfg;
extern const struct dpu_mdss_cfg dpu_sm6350_cfg;
extern const struct dpu_mdss_cfg dpu_qcm2290_cfg;
extern const struct dpu_mdss_cfg dpu_sm6375_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c
new file mode 100644
index 000000000000..ae785f4ff0d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#include <drm/drm_managed.h>
+#include "dpu_hw_cwb.h"
+
+#include <linux/bitfield.h>
+
+#define CWB_MUX 0x000
+#define CWB_MODE 0x004
+
+/* CWB mux block bit definitions */
+#define CWB_MUX_MASK GENMASK(3, 0)
+#define CWB_MODE_MASK GENMASK(2, 0)
+
+static void dpu_hw_cwb_config(struct dpu_hw_cwb *ctx,
+ struct dpu_hw_cwb_setup_cfg *cwb_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int cwb_mux_cfg = 0xF;
+ enum dpu_pingpong pp;
+ enum cwb_mode_input input;
+
+ if (!cwb_cfg)
+ return;
+
+ input = cwb_cfg->input;
+ pp = cwb_cfg->pp_idx;
+
+ if (input >= INPUT_MODE_MAX)
+ return;
+
+ /*
+ * The CWB_MUX register takes the pingpong index for the real-time
+ * display
+ */
+ if ((pp != PINGPONG_NONE) && (pp < PINGPONG_MAX))
+ cwb_mux_cfg = FIELD_PREP(CWB_MUX_MASK, pp - PINGPONG_0);
+
+ input = FIELD_PREP(CWB_MODE_MASK, input);
+
+ DPU_REG_WRITE(c, CWB_MUX, cwb_mux_cfg);
+ DPU_REG_WRITE(c, CWB_MODE, input);
+}
+
+/**
+ * dpu_hw_cwb_init() - Initializes the writeback hw driver object with cwb.
+ * @dev: Corresponding device for devres management
+ * @cfg: wb_path catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_wb context
+ */
+struct dpu_hw_cwb *dpu_hw_cwb_init(struct drm_device *dev,
+ const struct dpu_cwb_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_cwb *c;
+
+ if (!addr)
+ return ERR_PTR(-EINVAL);
+
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_CWB;
+
+ c->idx = cfg->id;
+ c->ops.config_cwb = dpu_hw_cwb_config;
+
+ return c;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
new file mode 100644
index 000000000000..96b6edf6b2bb
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#ifndef _DPU_HW_CWB_H
+#define _DPU_HW_CWB_H
+
+#include "dpu_hw_util.h"
+
+struct dpu_hw_cwb;
+
+enum cwb_mode_input {
+ INPUT_MODE_LM_OUT,
+ INPUT_MODE_DSPP_OUT,
+ INPUT_MODE_MAX
+};
+
+/**
+ * struct dpu_hw_cwb_setup_cfg : Describes configuration for CWB mux
+ * @pp_idx: Index of the real-time pinpong that the CWB mux will
+ * feed the CWB mux
+ * @input: Input tap point
+ */
+struct dpu_hw_cwb_setup_cfg {
+ enum dpu_pingpong pp_idx;
+ enum cwb_mode_input input;
+};
+
+/**
+ *
+ * struct dpu_hw_cwb_ops : Interface to the cwb hw driver functions
+ * @config_cwb: configure CWB mux
+ */
+struct dpu_hw_cwb_ops {
+ void (*config_cwb)(struct dpu_hw_cwb *ctx,
+ struct dpu_hw_cwb_setup_cfg *cwb_cfg);
+};
+
+/**
+ * struct dpu_hw_cwb : CWB mux driver object
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: CWB index
+ * @ops: handle to operations possible for this CWB
+ */
+struct dpu_hw_cwb {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ enum dpu_cwb idx;
+
+ struct dpu_hw_cwb_ops ops;
+};
+
+/**
+ * dpu_hw_cwb - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cwb *to_dpu_hw_cwb(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cwb, base);
+}
+
+struct dpu_hw_cwb *dpu_hw_cwb_init(struct drm_device *dev,
+ const struct dpu_cwb_cfg *cfg,
+ void __iomem *addr);
+
+#endif /*_DPU_HW_CWB_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index f8806a4d317b..ba7bb05efe9b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_MDSS_H
@@ -181,10 +183,10 @@ enum dpu_pingpong {
PINGPONG_3,
PINGPONG_4,
PINGPONG_5,
- PINGPONG_6,
- PINGPONG_7,
- PINGPONG_8,
- PINGPONG_9,
+ PINGPONG_CWB_0,
+ PINGPONG_CWB_1,
+ PINGPONG_CWB_2,
+ PINGPONG_CWB_3,
PINGPONG_S0,
PINGPONG_MAX
};
@@ -350,6 +352,7 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_DSPP (1 << 10)
#define DPU_DBG_MASK_DSC (1 << 11)
#define DPU_DBG_MASK_CDM (1 << 12)
+#define DPU_DBG_MASK_CWB (1 << 13)
/**
* struct dpu_hw_tear_check - Struct contains parameters to configure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index fb9f90957762..4853e516c487 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -173,7 +173,9 @@ static void dpu_hw_wb_bind_pingpong_blk(
mux_cfg = DPU_REG_READ(c, WB_MUX);
mux_cfg &= ~0xf;
- if (pp)
+ if (pp >= PINGPONG_CWB_0)
+ mux_cfg |= (pp < PINGPONG_CWB_2) ? 0xd : 0xb;
+ else if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 8b251f87a052..97e9cb8c2b09 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -51,6 +51,9 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+bool dpu_use_virtual_planes;
+module_param(dpu_use_virtual_planes, bool, 0);
+
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@@ -829,8 +832,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
type, catalog->sspp[i].features,
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
- plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
- (1UL << max_crtc_count) - 1);
+ if (dpu_use_virtual_planes)
+ plane = dpu_plane_init_virtual(dev, type, (1UL << max_crtc_count) - 1);
+ else
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
+ (1UL << max_crtc_count) - 1);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -932,12 +938,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump CTL sub-blocks HW regs info */
for (i = 0; i < cat->ctl_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
- dpu_kms->mmio + cat->ctl[i].base, cat->ctl[i].name);
+ dpu_kms->mmio + cat->ctl[i].base, "%s",
+ cat->ctl[i].name);
/* dump DSPP sub-blocks HW regs info */
for (i = 0; i < cat->dspp_count; i++) {
base = dpu_kms->mmio + cat->dspp[i].base;
- msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base, cat->dspp[i].name);
+ msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base,
+ "%s", cat->dspp[i].name);
if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len,
@@ -949,13 +957,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump INTF sub-blocks HW regs info */
for (i = 0; i < cat->intf_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
- dpu_kms->mmio + cat->intf[i].base, cat->intf[i].name);
+ dpu_kms->mmio + cat->intf[i].base, "%s",
+ cat->intf[i].name);
/* dump PP sub-blocks HW regs info */
for (i = 0; i < cat->pingpong_count; i++) {
base = dpu_kms->mmio + cat->pingpong[i].base;
msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base,
- cat->pingpong[i].name);
+ "%s", cat->pingpong[i].name);
/* TE2 sub-block has length of 0, so will not print it */
@@ -969,7 +978,8 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump SSPP sub-blocks HW regs info */
for (i = 0; i < cat->sspp_count; i++) {
base = dpu_kms->mmio + cat->sspp[i].base;
- msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base, cat->sspp[i].name);
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base,
+ "%s", cat->sspp[i].name);
if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len,
@@ -987,12 +997,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump LM sub-blocks HW regs info */
for (i = 0; i < cat->mixer_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
- dpu_kms->mmio + cat->mixer[i].base, cat->mixer[i].name);
+ dpu_kms->mmio + cat->mixer[i].base,
+ "%s", cat->mixer[i].name);
/* dump WB sub-blocks HW regs info */
for (i = 0; i < cat->wb_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
- dpu_kms->mmio + cat->wb[i].base, cat->wb[i].name);
+ dpu_kms->mmio + cat->wb[i].base, "%s",
+ cat->wb[i].name);
if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
@@ -1004,10 +1016,16 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
dpu_kms->mmio + cat->mdp[0].base, "top");
}
+ /* dump CWB sub-blocks HW regs info */
+ for (i = 0; i < cat->cwb_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->cwb[i].len,
+ dpu_kms->mmio + cat->cwb[i].base, cat->cwb[i].name);
+
/* dump DSC sub-blocks HW regs info */
for (i = 0; i < cat->dsc_count; i++) {
base = dpu_kms->mmio + cat->dsc[i].base;
- msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base, cat->dsc[i].name);
+ msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base,
+ "%s", cat->dsc[i].name);
if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
@@ -1022,7 +1040,16 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
if (cat->cdm)
msm_disp_snapshot_add_block(disp_state, cat->cdm->len,
- dpu_kms->mmio + cat->cdm->base, cat->cdm->name);
+ dpu_kms->mmio + cat->cdm->base,
+ "%s", cat->cdm->name);
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ msm_disp_snapshot_add_block(disp_state, vbif->len,
+ dpu_kms->vbif[vbif->id] + vbif->base,
+ "%s", vbif->name);
+ }
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
@@ -1478,6 +1505,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
{ .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
{ .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
+ { .compatible = "qcom,sm6150-dpu", .data = &dpu_sm6150_cfg, },
{ .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
{ .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
{ .compatible = "qcom,sm7150-dpu", .data = &dpu_sm7150_cfg, },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 88d64d43ea1a..547cdb2c0c78 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -54,6 +54,8 @@
#define ktime_compare_safe(A, B) \
ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+extern bool dpu_use_virtual_planes;
+
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
@@ -128,6 +130,8 @@ struct dpu_global_state {
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
uint32_t cdm_to_enc_id;
+
+ uint32_t sspp_to_crtc_id[SSPP_MAX - SSPP_NONE];
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 3ffac24333a2..098abc2c0003 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -20,7 +20,6 @@
#include "msm_drv.h"
#include "msm_mdss.h"
#include "dpu_kms.h"
-#include "dpu_formats.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_util.h"
#include "dpu_trace.h"
@@ -878,7 +877,7 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
drm_rect_rotate_inv(&pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
- if (r_pipe_cfg->src_rect.x1 != 0)
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
@@ -888,6 +887,32 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return 0;
}
+static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
+ drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect))
+ return false;
+
+ if (pipe_cfg->rotation & DRM_MODE_ROTATE_90)
+ return false;
+
+ if (MSM_FORMAT_IS_YUV(fmt))
+ return false;
+
+ if (MSM_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
+ return false;
+
+ if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
+ !test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
+ return false;
+
+ return true;
+}
+
static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct drm_atomic_state *state,
const struct drm_crtc_state *crtc_state)
@@ -901,7 +926,6 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
const struct msm_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
- uint32_t max_linewidth;
uint32_t supported_rotations;
const struct dpu_sspp_cfg *pipe_hw_caps;
const struct dpu_sspp_sub_blks *sblk;
@@ -923,8 +947,6 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
fmt = msm_framebuffer_format(new_plane_state->fb);
- max_linewidth = pdpu->catalog->caps->max_linewidth;
-
supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
@@ -940,48 +962,43 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
return ret;
if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
- /*
- * In parallel multirect case only the half of the usual width
- * is supported for tiled formats. If we are here, we know that
- * full width is more than max_linewidth, thus each rect is
- * wider than allowed.
- */
- if (MSM_FORMAT_IS_UBWC(fmt) &&
- drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
- }
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
+ &crtc_state->adjusted_mode);
+ if (ret)
+ return ret;
+ }
- if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
- drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) ||
- (!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) &&
- !test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) ||
- pipe_cfg->rotation & DRM_MODE_ROTATE_90 ||
- MSM_FORMAT_IS_YUV(fmt)) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
- }
+ return 0;
+}
+
+static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dpu_sw_pipe_cfg *pipe_cfg,
+ struct dpu_sw_pipe *r_pipe, struct dpu_sw_pipe_cfg *r_pipe_cfg,
+ struct dpu_hw_sspp *sspp, const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ r_pipe->sspp = NULL;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
+ if (!dpu_plane_is_multirect_parallel_capable(pipe->sspp, pipe_cfg, fmt, max_linewidth) ||
+ !dpu_plane_is_multirect_parallel_capable(pipe->sspp, r_pipe_cfg, fmt, max_linewidth))
+ return false;
+
+ r_pipe->sspp = pipe->sspp;
- /*
- * Use multirect for wide plane. We do not support dynamic
- * assignment of SSPPs, so we know the configuration.
- */
pipe->multirect_index = DPU_SSPP_RECT_0;
pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
- r_pipe->sspp = pipe->sspp;
r_pipe->multirect_index = DPU_SSPP_RECT_1;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
-
- ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
- &crtc_state->adjusted_mode);
- if (ret)
- return ret;
}
- return 0;
+ return true;
}
static int dpu_plane_atomic_check(struct drm_plane *plane,
@@ -995,14 +1012,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
const struct drm_crtc_state *crtc_state = NULL;
+ uint32_t max_linewidth = dpu_kms->catalog->caps->max_linewidth;
if (new_plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe);
- r_pipe->sspp = NULL;
+
+ if (!pipe->sspp)
+ return -EINVAL;
ret = dpu_plane_atomic_check_nosspp(plane, new_plane_state, crtc_state);
if (ret)
@@ -1011,14 +1033,155 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->visible)
return 0;
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(new_plane_state->fb),
+ max_linewidth)) {
+ DPU_DEBUG_PLANE(pdpu, "invalid " DRM_RECT_FMT " /" DRM_RECT_FMT
+ " max_line:%u, can't use split source\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect),
+ DRM_RECT_ARG(&r_pipe_cfg->src_rect),
+ max_linewidth);
+ return -E2BIG;
+ }
return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
}
+static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ ret = dpu_plane_atomic_check_nosspp(plane, plane_state, crtc_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->visible) {
+ /*
+ * resources are freed by dpu_crtc_assign_plane_resources(),
+ * but clean them here.
+ */
+ pstate->pipe.sspp = NULL;
+ pstate->r_pipe.sspp = NULL;
+
+ return 0;
+ }
+
+ /*
+ * Force resource reallocation if the format of FB or src/dst have
+ * changed. We might need to allocate different SSPP or SSPPs for this
+ * plane than the one used previously.
+ */
+ if (!old_plane_state || !old_plane_state->fb ||
+ old_plane_state->src_w != plane_state->src_w ||
+ old_plane_state->src_h != plane_state->src_h ||
+ old_plane_state->src_w != plane_state->src_w ||
+ old_plane_state->crtc_h != plane_state->crtc_h ||
+ msm_framebuffer_format(old_plane_state->fb) !=
+ msm_framebuffer_format(plane_state->fb))
+ crtc_state->planes_changed = true;
+
+ return 0;
+}
+
+static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
+ struct dpu_global_state *global_state,
+ struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state)
+{
+ const struct drm_crtc_state *crtc_state = NULL;
+ struct drm_plane *plane = plane_state->plane;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ struct dpu_rm_sspp_requirements reqs;
+ struct dpu_plane_state *pstate;
+ struct dpu_sw_pipe *pipe;
+ struct dpu_sw_pipe *r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg;
+ const struct msm_format *fmt;
+
+ if (plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ pstate = to_dpu_plane_state(plane_state);
+ pipe = &pstate->pipe;
+ r_pipe = &pstate->r_pipe;
+ pipe_cfg = &pstate->pipe_cfg;
+ r_pipe_cfg = &pstate->r_pipe_cfg;
+
+ pipe->sspp = NULL;
+ r_pipe->sspp = NULL;
+
+ if (!plane_state->fb)
+ return -EINVAL;
+
+ fmt = msm_framebuffer_format(plane_state->fb);
+ reqs.yuv = MSM_FORMAT_IS_YUV(fmt);
+ reqs.scale = (plane_state->src_w >> 16 != plane_state->crtc_w) ||
+ (plane_state->src_h >> 16 != plane_state->crtc_h);
+
+ reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation);
+
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
+ return -ENODEV;
+
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(plane_state->fb),
+ dpu_kms->catalog->caps->max_linewidth)) {
+ /* multirect is not possible, use two SSPP blocks */
+ r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!r_pipe->sspp)
+ return -ENODEV;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
+
+ return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
+}
+
+int dpu_assign_plane_resources(struct dpu_global_state *global_state,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_plane_state **states,
+ unsigned int num_planes)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_planes; i++) {
+ struct drm_plane_state *plane_state = states[i];
+
+ if (!plane_state ||
+ !plane_state->visible)
+ continue;
+
+ ret = dpu_plane_virtual_assign_resources(crtc, global_state,
+ state, plane_state);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
{
const struct msm_format *format =
@@ -1335,12 +1498,15 @@ static void dpu_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tstage=%d\n", pstate->stage);
- drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
- drm_printf(p, "\tmultirect_mode[0]=%s\n", dpu_get_multirect_mode(pipe->multirect_mode));
- drm_printf(p, "\tmultirect_index[0]=%s\n",
- dpu_get_multirect_index(pipe->multirect_index));
- drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
- drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
+ if (pipe->sspp) {
+ drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
+ drm_printf(p, "\tmultirect_mode[0]=%s\n",
+ dpu_get_multirect_mode(pipe->multirect_mode));
+ drm_printf(p, "\tmultirect_index[0]=%s\n",
+ dpu_get_multirect_index(pipe->multirect_index));
+ drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
+ drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
+ }
if (r_pipe->sspp) {
drm_printf(p, "\tsspp[1]=%s\n", r_pipe->sspp->cap->name);
@@ -1433,39 +1599,29 @@ static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
.atomic_update = dpu_plane_atomic_update,
};
-/**
- * dpu_plane_init - create new dpu plane for the given pipe
- * @dev: Pointer to DRM device
- * @pipe: dpu hardware pipe identifier
- * @type: Plane type - PRIMARY/OVERLAY/CURSOR
- * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
- *
- * Initialize the plane.
- */
-struct drm_plane *dpu_plane_init(struct drm_device *dev,
- uint32_t pipe, enum drm_plane_type type,
- unsigned long possible_crtcs)
+static const struct drm_plane_helper_funcs dpu_plane_virtual_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_virtual_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+/* initialize plane */
+static struct drm_plane *dpu_plane_init_common(struct drm_device *dev,
+ enum drm_plane_type type,
+ unsigned long possible_crtcs,
+ bool inline_rotation,
+ const uint32_t *format_list,
+ uint32_t num_formats,
+ enum dpu_sspp pipe)
{
struct drm_plane *plane = NULL;
- const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
- struct dpu_hw_sspp *pipe_hw;
- uint32_t num_formats;
uint32_t supported_rotations;
int ret;
- /* initialize underlying h/w driver */
- pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
- if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
- DPU_ERROR("[%u]SSPP is invalid\n", pipe);
- return ERR_PTR(-EINVAL);
- }
-
- format_list = pipe_hw->cap->sblk->format_list;
- num_formats = pipe_hw->cap->sblk->num_formats;
-
pdpu = drmm_universal_plane_alloc(dev, struct dpu_plane, base,
0xff, &dpu_plane_funcs,
format_list, num_formats,
@@ -1491,7 +1647,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
- if (pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ if (inline_rotation)
supported_rotations |= DRM_MODE_ROTATE_MASK;
drm_plane_create_rotation_property(plane,
@@ -1499,10 +1655,98 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
drm_plane_enable_fb_damage_clips(plane);
- /* success! finalize initialization */
+ DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
+ pipe, plane->base.id);
+ return plane;
+}
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ *
+ * Initialize the plane.
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, enum drm_plane_type type,
+ unsigned long possible_crtcs)
+{
+ struct drm_plane *plane = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
+ struct dpu_hw_sspp *pipe_hw;
+
+ /* initialize underlying h/w driver */
+ pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
+ if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP is invalid\n", pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ plane = dpu_plane_init_common(dev, type, possible_crtcs,
+ pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION),
+ pipe_hw->cap->sblk->format_list,
+ pipe_hw->cap->sblk->num_formats,
+ pipe);
+ if (IS_ERR(plane))
+ return plane;
+
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
+
+ return plane;
+}
+
+/**
+ * dpu_plane_init_virtual - create new virtualized DPU plane
+ * @dev: Pointer to DRM device
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ *
+ * Initialize the virtual plane with no backing SSPP / pipe.
+ */
+struct drm_plane *dpu_plane_init_virtual(struct drm_device *dev,
+ enum drm_plane_type type,
+ unsigned long possible_crtcs)
+{
+ struct drm_plane *plane = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
+ bool has_inline_rotation = false;
+ const u32 *format_list = NULL;
+ u32 num_formats = 0;
+ int i;
+
+ /* Determine the largest configuration that we can implement */
+ for (i = 0; i < kms->catalog->sspp_count; i++) {
+ const struct dpu_sspp_cfg *cfg = &kms->catalog->sspp[i];
+
+ if (test_bit(DPU_SSPP_INLINE_ROTATION, &cfg->features))
+ has_inline_rotation = true;
+
+ if (!format_list ||
+ cfg->sblk->csc_blk.len) {
+ format_list = cfg->sblk->format_list;
+ num_formats = cfg->sblk->num_formats;
+ }
+ }
+
+ plane = dpu_plane_init_common(dev, type, possible_crtcs,
+ has_inline_rotation,
+ format_list,
+ num_formats,
+ SSPP_NONE);
+ if (IS_ERR(plane))
+ return plane;
+
+ drm_plane_helper_add(plane, &dpu_plane_virtual_helper_funcs);
+
+ DPU_DEBUG("%s created virtual id:%u\n", plane->name, plane->base.id);
+
return plane;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 97090ca7842b..acd5725175cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -62,10 +62,23 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs);
+struct drm_plane *dpu_plane_init_virtual(struct drm_device *dev,
+ enum drm_plane_type type,
+ unsigned long possible_crtcs);
+
+int dpu_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
#ifdef CONFIG_DEBUG_FS
void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable);
#else
static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {}
#endif
+int dpu_assign_plane_resources(struct dpu_global_state *global_state,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_plane_state **states,
+ unsigned int num_planes);
+
#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index c247af03dc8e..5baf9df702b8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
@@ -9,6 +9,7 @@
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_cdm.h"
+#include "dpu_hw_cwb.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_intf.h"
@@ -27,14 +28,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
}
/**
- * struct dpu_rm_requirements - Reservation requirements parameter bundle
- * @topology: selected topology for the display
- */
-struct dpu_rm_requirements {
- struct msm_display_topology topology;
-};
-
-/**
* dpu_rm_init - Read hardware catalog and create reservation tracking objects
* for all HW blocks.
* @dev: Corresponding device for devres management
@@ -130,6 +123,19 @@ int dpu_rm_init(struct drm_device *dev,
rm->hw_wb[wb->id - WB_0] = hw;
}
+ for (i = 0; i < cat->cwb_count; i++) {
+ struct dpu_hw_cwb *hw;
+ const struct dpu_cwb_cfg *cwb = &cat->cwb[i];
+
+ hw = dpu_hw_cwb_init(dev, cwb, mmio);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed cwb object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->cwb_blks[cwb->id - CWB_0] = &hw->base;
+ }
+
for (i = 0; i < cat->ctl_count; i++) {
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
@@ -241,14 +247,13 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
* mixer in rm->pingpong_blks[].
* @dspp_idx: output parameter, index of dspp block attached to the layer
* mixer in rm->dspp_blks[].
- * @reqs: input parameter, rm requirements for HW blocks needed in the
- * datapath.
+ * @topology: selected topology for the display
* Return: true if lm matches all requirements, false otherwise
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
- struct dpu_rm_requirements *reqs)
+ struct msm_display_topology *topology)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
@@ -273,7 +278,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
}
*pp_idx = idx;
- if (!reqs->topology.num_dspp)
+ if (!topology->num_dspp)
return true;
idx = lm_cfg->dspp - DSPP_0;
@@ -295,7 +300,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id,
- struct dpu_rm_requirements *reqs)
+ struct msm_display_topology *topology)
{
int lm_idx[MAX_BLOCKS];
@@ -303,14 +308,14 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
int dspp_idx[MAX_BLOCKS] = {0};
int i, lm_count = 0;
- if (!reqs->topology.num_lm) {
- DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
+ if (!topology->num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", topology->num_lm);
return -EINVAL;
}
/* Find a primary mixer */
for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
- lm_count < reqs->topology.num_lm; i++) {
+ lm_count < topology->num_lm; i++) {
if (!rm->mixer_blks[i])
continue;
@@ -319,14 +324,14 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
enc_id, i, &pp_idx[lm_count],
- &dspp_idx[lm_count], reqs)) {
+ &dspp_idx[lm_count], topology)) {
continue;
}
++lm_count;
/* Valid primary mixer found, find matching peers */
- if (lm_count < reqs->topology.num_lm) {
+ if (lm_count < topology->num_lm) {
int j = _dpu_rm_get_lm_peer(rm, i);
/* ignore the peer if there is an error or if the peer was already processed */
@@ -339,7 +344,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
global_state, enc_id, j,
&pp_idx[lm_count], &dspp_idx[lm_count],
- reqs)) {
+ topology)) {
continue;
}
@@ -348,7 +353,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
}
}
- if (lm_count != reqs->topology.num_lm) {
+ if (lm_count != topology->num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
@@ -357,7 +362,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
global_state->dspp_to_enc_id[dspp_idx[i]] =
- reqs->topology.num_dspp ? enc_id : 0;
+ topology->num_dspp ? enc_id : 0;
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
pp_idx[i] + PINGPONG_0);
@@ -594,28 +599,28 @@ static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc,
- struct dpu_rm_requirements *reqs)
+ struct msm_display_topology *topology)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
+ ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, topology);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
- &reqs->topology);
+ topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
+ ret = _dpu_rm_reserve_dsc(rm, global_state, enc, topology);
if (ret)
return ret;
- if (reqs->topology.needs_cdm) {
+ if (topology->needs_cdm) {
ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
if (ret) {
DPU_ERROR("unable to find CDM blk\n");
@@ -626,20 +631,6 @@ static int _dpu_rm_make_reservation(
return ret;
}
-static int _dpu_rm_populate_requirements(
- struct drm_encoder *enc,
- struct dpu_rm_requirements *reqs,
- struct msm_display_topology req_topology)
-{
- reqs->topology = req_topology;
-
- DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n",
- reqs->topology.num_lm, reqs->topology.num_dsc,
- reqs->topology.num_intf, reqs->topology.needs_cdm);
-
- return 0;
-}
-
static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
uint32_t enc_id)
{
@@ -693,9 +684,8 @@ int dpu_rm_reserve(
struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology)
+ struct msm_display_topology *topology)
{
- struct dpu_rm_requirements reqs;
int ret;
/* Check if this is just a page-flip */
@@ -710,13 +700,11 @@ int dpu_rm_reserve(
DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
enc->base.id, crtc_state->crtc->base.id);
- ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
- if (ret) {
- DPU_ERROR("failed to populate hw requirements\n");
- return ret;
- }
+ DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
+ topology->num_lm, topology->num_dsc,
+ topology->num_intf);
- ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
+ ret = _dpu_rm_make_reservation(rm, global_state, enc, topology);
if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
@@ -725,6 +713,88 @@ int dpu_rm_reserve(
return ret;
}
+static struct dpu_hw_sspp *dpu_rm_try_sspp(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs,
+ unsigned int type)
+{
+ uint32_t crtc_id = crtc->base.id;
+ struct dpu_hw_sspp *hw_sspp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) {
+ if (!rm->hw_sspp[i])
+ continue;
+
+ if (global_state->sspp_to_crtc_id[i])
+ continue;
+
+ hw_sspp = rm->hw_sspp[i];
+
+ if (hw_sspp->cap->type != type)
+ continue;
+
+ if (reqs->scale && !hw_sspp->cap->sblk->scaler_blk.len)
+ continue;
+
+ // TODO: QSEED2 and RGB scalers are not yet supported
+ if (reqs->scale && !hw_sspp->ops.setup_scaler)
+ continue;
+
+ if (reqs->yuv && !hw_sspp->cap->sblk->csc_blk.len)
+ continue;
+
+ if (reqs->rot90 && !(hw_sspp->cap->features & DPU_SSPP_INLINE_ROTATION))
+ continue;
+
+ global_state->sspp_to_crtc_id[i] = crtc_id;
+
+ return rm->hw_sspp[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * dpu_rm_reserve_sspp - Reserve the required SSPP for the provided CRTC
+ * @rm: DPU Resource Manager handle
+ * @global_state: private global state
+ * @crtc: DRM CRTC handle
+ * @reqs: SSPP required features
+ */
+struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs)
+{
+ struct dpu_hw_sspp *hw_sspp = NULL;
+
+ if (!reqs->scale && !reqs->yuv)
+ hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
+ if (!hw_sspp && reqs->scale)
+ hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
+ if (!hw_sspp)
+ hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
+
+ return hw_sspp;
+}
+
+/**
+ * dpu_rm_release_all_sspp - Given the CRTC, release all SSPP
+ * blocks previously reserved for that use case.
+ * @global_state: resources shared across multiple kms objects
+ * @crtc: DRM CRTC handle
+ */
+void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
+ struct drm_crtc *crtc)
+{
+ uint32_t crtc_id = crtc->base.id;
+
+ _dpu_rm_clear_mapping(global_state->sspp_to_crtc_id,
+ ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id);
+}
+
/**
* dpu_rm_get_assigned_resources - Get hw resources of the given type that are
* assigned to this encoder
@@ -859,4 +929,11 @@ void dpu_rm_print_state(struct drm_printer *p,
dpu_rm_print_state_helper(p, rm->cdm_blk,
global_state->cdm_to_enc_id);
drm_puts(p, "\n");
+
+ drm_puts(p, "\tsspp=");
+ /* skip SSPP_NONE and start from the next index */
+ for (i = SSPP_NONE + 1; i < ARRAY_SIZE(global_state->sspp_to_crtc_id); i++)
+ dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL,
+ global_state->sspp_to_crtc_id[i]);
+ drm_puts(p, "\n");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index ea0e49cb7b0d..99bd594ee0d1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -20,6 +20,7 @@ struct dpu_global_state;
* @ctl_blks: array of ctl hardware resources
* @hw_intf: array of intf hardware resources
* @hw_wb: array of wb hardware resources
+ * @hw_cwb: array of cwb hardware resources
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
* @cdm_blk: cdm hardware resource
@@ -30,6 +31,7 @@ struct dpu_rm {
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
struct dpu_hw_wb *hw_wb[WB_MAX - WB_0];
+ struct dpu_hw_blk *cwb_blks[CWB_MAX - CWB_0];
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
@@ -37,6 +39,12 @@ struct dpu_rm {
struct dpu_hw_blk *cdm_blk;
};
+struct dpu_rm_sspp_requirements {
+ bool yuv;
+ bool scale;
+ bool rot90;
+};
+
/**
* struct msm_display_topology - defines a display topology pipeline
* @num_lm: number of layer mixers used
@@ -63,11 +71,19 @@ int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology);
+ struct msm_display_topology *topology);
void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc);
+struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs);
+
+void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
+ struct drm_crtc *crtc);
+
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 16f144cbc0c9..8ff496082902 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -42,9 +42,6 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
if (!conn_state || !conn_state->connector) {
DPU_ERROR("invalid connector state\n");
return -EINVAL;
- } else if (conn_state->connector->status != connector_status_connected) {
- DPU_ERROR("connector not connected %d\n", conn_state->connector->status);
- return -EINVAL;
}
crtc = conn_state->crtc;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 576995ddce37..8bbc7fb881d5 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -389,7 +389,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
/* TODO: different regulators in other cases? */
mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v";
- mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v";
+ mdp4_lcdc_encoder->regs[1].supply = "lvds-pll-vdda";
mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda";
ret = devm_regulator_bulk_get(dev->dev,
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
index e75b97127c0d..2be00b11e557 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -109,7 +109,7 @@ int msm_disp_snapshot_init(struct drm_device *drm_dev)
mutex_init(&kms->dump_mutex);
- kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
+ kms->dump_worker = kthread_run_worker(0, "%s", "disp_snapshot");
if (IS_ERR(kms->dump_worker))
DRM_ERROR("failed to create disp state task\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 74e01a5dd419..70fdc9fe228a 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -14,6 +14,7 @@
#include "dp_catalog.h"
#include "dp_audio.h"
#include "dp_panel.h"
+#include "dp_reg.h"
#include "dp_display.h"
#include "dp_utils.h"
@@ -28,251 +29,64 @@ struct msm_dp_audio_private {
struct msm_dp_audio msm_dp_audio;
};
-static u32 msm_dp_audio_get_header(struct msm_dp_catalog *catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header)
-{
- return msm_dp_catalog_audio_get_header(catalog, sdp, header);
-}
-
-static void msm_dp_audio_set_header(struct msm_dp_catalog *catalog,
- u32 data,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header)
-{
- msm_dp_catalog_audio_set_header(catalog, sdp, header, data);
-}
-
static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x02;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
- new_value = value;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
-
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
-
- new_value = audio->channels - 1;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
-
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x02,
+ .HB2 = 0x00,
+ .HB3 = audio->channels - 1,
+ };
+
+ msm_dp_catalog_write_audio_stream(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x1;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x17;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
-
- new_value = (0x0 | (0x11 << 2));
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x01,
+ .HB2 = 0x17,
+ .HB3 = 0x0 | (0x11 << 2),
+ };
+
+ msm_dp_catalog_write_audio_timestamp(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x84;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x1b;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
-
- new_value = (0x0 | (0x11 << 2));
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- new_value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x84,
+ .HB2 = 0x1b,
+ .HB3 = 0x0 | (0x11 << 2),
+ };
+
+ msm_dp_catalog_write_audio_infoframe(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x05;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x0F;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
-
- new_value = 0x0;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x05,
+ .HB2 = 0x0f,
+ .HB3 = 0x00,
+ };
+
+ msm_dp_catalog_write_audio_copy_mgmt(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x06;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x0F;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x06,
+ .HB2 = 0x0f,
+ .HB3 = 0x00,
+ };
+
+ msm_dp_catalog_write_audio_isrc(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
@@ -329,10 +143,10 @@ static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
safe_to_exit_level = 5;
break;
default:
+ safe_to_exit_level = 14;
drm_dbg_dp(audio->drm_dev,
"setting the default safe_to_exit_level = %u\n",
safe_to_exit_level);
- safe_to_exit_level = 14;
break;
}
@@ -414,8 +228,10 @@ static int msm_dp_audio_get_eld(struct device *dev,
return -ENODEV;
}
+ mutex_lock(&msm_dp_display->connector->eld_mutex);
memcpy(buf, msm_dp_display->connector->eld,
min(sizeof(msm_dp_display->connector->eld), len));
+ mutex_unlock(&msm_dp_display->connector->eld_mutex);
return 0;
}
@@ -537,14 +353,13 @@ int msm_dp_register_audio_driver(struct device *dev,
}
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_panel *panel,
struct msm_dp_catalog *catalog)
{
int rc = 0;
struct msm_dp_audio_private *audio;
struct msm_dp_audio *msm_dp_audio;
- if (!pdev || !panel || !catalog) {
+ if (!pdev || !catalog) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
@@ -561,8 +376,6 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
msm_dp_audio = &audio->msm_dp_audio;
- msm_dp_catalog_audio_init(catalog);
-
return msm_dp_audio;
error:
return ERR_PTR(rc);
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index 1c9efaaa40e5..beea34cbab77 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -8,7 +8,6 @@
#include <linux/platform_device.h>
-#include "dp_panel.h"
#include "dp_catalog.h"
#include <sound/hdmi-codec.h>
@@ -28,14 +27,12 @@ struct msm_dp_audio {
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
- * @panel: an instance of msm_dp_panel module.
* @catalog: an instance of msm_dp_catalog module.
*
* Returns the error code in case of failure, otherwize
* an instance of newly created msm_dp_module.
*/
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_panel *panel,
struct msm_dp_catalog *catalog);
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index b4c8856fb25d..7b7eadb2f83b 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -79,7 +79,6 @@ struct msm_dp_catalog_private {
struct device *dev;
struct drm_device *drm_dev;
struct dss_io_data io;
- u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
struct msm_dp_catalog msm_dp_catalog;
};
@@ -276,43 +275,6 @@ int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_
min(wait_us, 2000), wait_us);
}
-static void dump_regs(void __iomem *base, int len)
-{
- int i;
- u32 x0, x4, x8, xc;
- u32 addr_off = 0;
-
- len = DIV_ROUND_UP(len, 16);
- for (i = 0; i < len; i++) {
- x0 = readl_relaxed(base + addr_off);
- x4 = readl_relaxed(base + addr_off + 0x04);
- x8 = readl_relaxed(base + addr_off + 0x08);
- xc = readl_relaxed(base + addr_off + 0x0c);
-
- pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc);
- addr_off += 16;
- }
-}
-
-void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- struct dss_io_data *io = &catalog->io;
-
- pr_info("AHB regs\n");
- dump_regs(io->ahb.base, io->ahb.len);
-
- pr_info("AUXCLK regs\n");
- dump_regs(io->aux.base, io->aux.len);
-
- pr_info("LCLK regs\n");
- dump_regs(io->link.base, io->link.len);
-
- pr_info("P0CLK regs\n");
- dump_regs(io->p0.base, io->p0.len);
-}
-
u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog)
{
struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
@@ -1036,7 +998,6 @@ void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
display_hctl = (hsync_end_x << 16) | hsync_start_x;
- msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
hsync_period);
@@ -1160,38 +1121,75 @@ struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev)
return &catalog->msm_dp_catalog;
}
-u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header)
+void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
{
- struct msm_dp_catalog_private *catalog;
- u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
+
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_1, header[1]);
+}
+
+void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
+{
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- sdp_map = catalog->audio_map;
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
- return msm_dp_read_link(catalog, sdp_map[sdp][header]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
}
-void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header,
- u32 data)
+void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
{
- struct msm_dp_catalog_private *catalog;
- u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- if (!msm_dp_catalog)
- return;
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
+}
+
+void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
+{
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- sdp_map = catalog->audio_map;
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
- msm_dp_write_link(catalog, sdp_map[sdp][header], data);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
+}
+
+void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
+{
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ struct dp_sdp_header tmp = *sdp_hdr;
+ u32 header[2];
+ u32 reg;
+
+ /* XXX: is it necessary to preserve this field? */
+ reg = msm_dp_read_link(catalog, MMSS_DP_AUDIO_ISRC_1);
+ tmp.HB3 = FIELD_GET(HEADER_3_MASK, reg);
+
+ msm_dp_utils_pack_sdp_header(&tmp, header);
+
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_1, header[1]);
}
void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select)
@@ -1277,47 +1275,6 @@ void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog)
msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
-void msm_dp_catalog_audio_init(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
-
- static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
- {
- MMSS_DP_AUDIO_STREAM_0,
- MMSS_DP_AUDIO_STREAM_1,
- MMSS_DP_AUDIO_STREAM_1,
- },
- {
- MMSS_DP_AUDIO_TIMESTAMP_0,
- MMSS_DP_AUDIO_TIMESTAMP_1,
- MMSS_DP_AUDIO_TIMESTAMP_1,
- },
- {
- MMSS_DP_AUDIO_INFOFRAME_0,
- MMSS_DP_AUDIO_INFOFRAME_1,
- MMSS_DP_AUDIO_INFOFRAME_1,
- },
- {
- MMSS_DP_AUDIO_COPYMANAGEMENT_0,
- MMSS_DP_AUDIO_COPYMANAGEMENT_1,
- MMSS_DP_AUDIO_COPYMANAGEMENT_1,
- },
- {
- MMSS_DP_AUDIO_ISRC_0,
- MMSS_DP_AUDIO_ISRC_1,
- MMSS_DP_AUDIO_ISRC_1,
- },
- };
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- catalog->audio_map = sdp_map;
-}
-
void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level)
{
struct msm_dp_catalog_private *catalog;
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index e932b17eecbf..6678b0ac9a67 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -31,22 +31,6 @@
#define DP_HW_VERSION_1_0 0x10000000
#define DP_HW_VERSION_1_2 0x10020000
-enum msm_dp_catalog_audio_sdp_type {
- DP_AUDIO_SDP_STREAM,
- DP_AUDIO_SDP_TIMESTAMP,
- DP_AUDIO_SDP_INFOFRAME,
- DP_AUDIO_SDP_COPYMANAGEMENT,
- DP_AUDIO_SDP_ISRC,
- DP_AUDIO_SDP_MAX,
-};
-
-enum msm_dp_catalog_audio_header_type {
- DP_AUDIO_SDP_HEADER_1,
- DP_AUDIO_SDP_HEADER_2,
- DP_AUDIO_SDP_HEADER_3,
- DP_AUDIO_SDP_HEADER_MAX,
-};
-
struct msm_dp_catalog {
bool wide_bus_en;
};
@@ -104,7 +88,6 @@ int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 t
u32 sync_start, u32 width_blanking, u32 msm_dp_active);
void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp);
void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog);
void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
struct drm_display_mode *drm_mode);
void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
@@ -112,17 +95,19 @@ void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev);
/* DP Audio APIs */
-u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header);
-void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header,
- u32 data);
+void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select);
void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable);
void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog);
-void msm_dp_catalog_audio_init(struct msm_dp_catalog *catalog);
void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level);
#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index bc2ca8133b79..9c463ae2f8fa 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -178,7 +178,6 @@ static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl
u32 cc, tb;
msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog);
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
msm_dp_catalog_setup_peripheral_flush(ctrl->catalog);
msm_dp_ctrl_config_ctrl(ctrl);
@@ -2071,6 +2070,7 @@ void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl)
msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ dev_pm_opp_set_rate(ctrl->dev, 0);
msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index aff51bb973eb..24dd37f1682b 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -722,9 +722,6 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
{
int rc = 0;
struct device *dev = &dp->msm_dp_display.pdev->dev;
- struct msm_dp_panel_in panel_in = {
- .dev = dev,
- };
struct phy *phy;
phy = devm_phy_get(dev, "dp");
@@ -765,11 +762,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_link;
}
- panel_in.aux = dp->aux;
- panel_in.catalog = dp->catalog;
- panel_in.link = dp->link;
-
- dp->panel = msm_dp_panel_get(&panel_in);
+ dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->catalog);
if (IS_ERR(dp->panel)) {
rc = PTR_ERR(dp->panel);
DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
@@ -787,7 +780,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_ctrl;
}
- dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->panel, dp->catalog);
+ dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->catalog);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 5d7eaa31bf31..92415bf8aa16 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -317,17 +317,6 @@ static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
return 0;
}
-void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel)
-{
- struct msm_dp_catalog *catalog;
- struct msm_dp_panel_private *panel;
-
- panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
-
- msm_dp_catalog_dump_regs(catalog);
-}
-
int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
{
u32 data, total_ver, total_hor;
@@ -486,25 +475,26 @@ static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel)
return 0;
}
-struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in)
+struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
+ struct msm_dp_link *link, struct msm_dp_catalog *catalog)
{
struct msm_dp_panel_private *panel;
struct msm_dp_panel *msm_dp_panel;
int ret;
- if (!in->dev || !in->catalog || !in->aux || !in->link) {
+ if (!dev || !catalog || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
- panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return ERR_PTR(-ENOMEM);
- panel->dev = in->dev;
- panel->aux = in->aux;
- panel->catalog = in->catalog;
- panel->link = in->link;
+ panel->dev = dev;
+ panel->aux = aux;
+ panel->catalog = catalog;
+ panel->link = link;
msm_dp_panel = &panel->msm_dp_panel;
msm_dp_panel->max_bw_code = DP_LINK_BW_8_1;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 0e944db3adf2..4906f4f09f24 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -21,13 +21,6 @@ struct msm_dp_display_mode {
bool out_fmt_is_yuv_420;
};
-struct msm_dp_panel_in {
- struct device *dev;
- struct drm_dp_aux *aux;
- struct msm_dp_link *link;
- struct msm_dp_catalog *catalog;
-};
-
struct msm_dp_panel_psr {
u8 version;
u8 capabilities;
@@ -55,7 +48,6 @@ struct msm_dp_panel {
int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel);
-void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp,
@@ -92,6 +84,7 @@ static inline bool is_lane_count_valid(u32 lane_count)
lane_count == 4);
}
-struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in);
+struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
+ struct msm_dp_link *link, struct msm_dp_catalog *catalog);
void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel);
#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.c b/drivers/gpu/drm/msm/dp/dp_utils.c
index 2a40f07fe2d5..4a5ebb0c33b8 100644
--- a/drivers/gpu/drm/msm/dp/dp_utils.c
+++ b/drivers/gpu/drm/msm/dp/dp_utils.c
@@ -74,14 +74,8 @@ u8 msm_dp_utils_calculate_parity(u32 data)
return parity_byte;
}
-ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff)
+void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2])
{
- size_t length;
-
- length = sizeof(header_buff);
- if (length < DP_SDP_HEADER_SIZE)
- return -ENOSPC;
-
header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) |
FIELD_PREP(PARITY_0_MASK, msm_dp_utils_calculate_parity(sdp_header->HB0)) |
FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) |
@@ -91,6 +85,4 @@ ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *head
FIELD_PREP(PARITY_2_MASK, msm_dp_utils_calculate_parity(sdp_header->HB2)) |
FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) |
FIELD_PREP(PARITY_3_MASK, msm_dp_utils_calculate_parity(sdp_header->HB3));
-
- return length;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.h b/drivers/gpu/drm/msm/dp/dp_utils.h
index 88d53157f5b5..2e4f98a863c4 100644
--- a/drivers/gpu/drm/msm/dp/dp_utils.h
+++ b/drivers/gpu/drm/msm/dp/dp_utils.h
@@ -31,6 +31,6 @@
u8 msm_dp_utils_get_g0_value(u8 data);
u8 msm_dp_utils_get_g1_value(u8 data);
u8 msm_dp_utils_calculate_parity(u32 data);
-ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff);
+void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2]);
#endif /* _DP_UTILS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 10ba7d153d1c..7754dcec33d0 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -286,6 +286,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_1,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 4c9b4b37681b..120cb65164c1 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -23,6 +23,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
+#define MSM_DSI_6G_VER_MINOR_V2_3_1 0x20030001
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a98d24b7cb00..007311c21fda 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1831,7 +1831,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL);
ret = 0;
- if (of_property_read_bool(np, "syscon-sfpb")) {
+ if (of_property_present(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index dd58bc0a49eb..c0bcc6828963 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -567,6 +567,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_14nm_8953_cfgs },
{ .compatible = "qcom,sm6125-dsi-phy-14nm",
.data = &dsi_phy_14nm_2290_cfgs },
+ { .compatible = "qcom,sm6150-dsi-phy-14nm",
+ .data = &dsi_phy_14nm_6150_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 4953459edd63..8985818bb2e0 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -46,6 +46,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 1723f0e4faa4..2c3cbe0f2870 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -1032,6 +1032,10 @@ static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
{ .supply = "vcca", .init_load_uA = 73400 },
};
+static const struct regulator_bulk_data dsi_phy_14nm_36mA_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 36000 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_14nm_17mA_regulators,
@@ -1097,3 +1101,20 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = {
.io_start = { 0x5e94400 },
.num_dsi_phy = 1,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_14nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_36mA_regulators),
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0xae94400 },
+ .num_dsi_phy = 1,
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
index a719fd33d9d8..33bb48ae58a2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -137,7 +137,7 @@ static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
base <<= (digclk_divsel == 2 ? 1 : 0);
- return (base <= 2046 ? base : 2046);
+ return base;
}
static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 9c45d641b521..a7a2384044ff 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -115,7 +115,7 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
timer->kms = kms;
timer->crtc_idx = crtc_idx;
- timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
+ timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx);
if (IS_ERR(timer->worker)) {
int ret = PTR_ERR(timer->worker);
timer->worker = NULL;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ffbcc97b5018..ff7a7a9f7b0d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -11,7 +11,7 @@
#include <linux/of_address.h>
#include <linux/uaccess.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -538,7 +538,7 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
/* Only supported if per-process address space is supported: */
if (priv->gpu->aspace == ctx->aspace)
- return -EOPNOTSUPP;
+ return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
@@ -910,7 +910,6 @@ static const struct drm_driver msm_driver = {
.fops = &fops,
.name = "msm",
.desc = "MSM Snapdragon DRM",
- .date = "20130625",
.major = MSM_VERSION_MAJOR,
.minor = MSM_VERSION_MINOR,
.patchlevel = MSM_VERSION_PATCHLEVEL,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d8c9a1b19263..fee31680a6d5 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -28,6 +28,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_dsc.h>
#include <drm/msm_drm.h>
@@ -506,6 +507,12 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
clockid_t clock_id,
enum hrtimer_mode mode);
+/* Helper for returning a UABI error with optional logging which can make
+ * it easier for userspace to understand what it is doing wrong.
+ */
+#define UERR(err, drm, fmt, ...) \
+ ({ DRM_DEV_DEBUG_DRIVER((drm)->dev, fmt, ##__VA_ARGS__); -(err); })
+
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index fba78193127d..dee470403036 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -20,8 +20,8 @@
/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
* error msgs for debugging, but we don't spam dmesg by default
*/
-#define SUBMIT_ERROR(submit, fmt, ...) \
- DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__)
+#define SUBMIT_ERROR(err, submit, fmt, ...) \
+ UERR(err, (submit)->dev, fmt, ##__VA_ARGS__)
/*
* Cmdstream submission:
@@ -142,8 +142,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
!(submit_bo.flags & MANDATORY_FLAGS)) {
- SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid flags: %x\n", submit_bo.flags);
i = 0;
goto out;
}
@@ -162,8 +161,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
goto out_unlock;
}
@@ -206,14 +204,12 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
- SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type);
- return -EINVAL;
+ return SUBMIT_ERROR(EINVAL, submit, "invalid type: %08x\n", submit_cmd.type);
}
if (submit_cmd.size % 4) {
- SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n",
- submit_cmd.size);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
goto out;
}
@@ -371,9 +367,8 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct drm_gem_object **obj, uint64_t *iova)
{
if (idx >= submit->nr_bos) {
- SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n",
- idx, submit->nr_bos);
- return -EINVAL;
+ return SUBMIT_ERROR(EINVAL, submit, "invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
}
if (obj)
@@ -392,10 +387,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint32_t *ptr;
int ret = 0;
- if (offset % 4) {
- SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset);
- return -EINVAL;
- }
+ if (offset % 4)
+ return SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer: %u\n", offset);
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
@@ -414,9 +407,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint64_t iova;
if (submit_reloc.submit_offset % 4) {
- SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n",
- submit_reloc.submit_offset);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned reloc offset: %u\n",
+ submit_reloc.submit_offset);
goto out;
}
@@ -425,8 +417,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
- SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid offset %u at reloc %u\n", off, i);
goto out;
}
@@ -513,12 +504,12 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
if (syncobj_desc.point &&
!drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
- ret = -EOPNOTSUPP;
+ ret = SUBMIT_ERROR(EOPNOTSUPP, submit, "syncobj timeline unsupported");
break;
}
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = -EINVAL;
+ ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
break;
}
@@ -531,7 +522,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
syncobjs[i] =
drm_syncobj_find(file, syncobj_desc.handle);
if (!syncobjs[i]) {
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj handle: %u", i);
break;
}
}
@@ -588,14 +579,14 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
post_deps[i].point = syncobj_desc.point;
if (syncobj_desc.flags) {
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid syncobj flags");
break;
}
if (syncobj_desc.point) {
if (!drm_core_check_feature(dev,
DRIVER_SYNCOBJ_TIMELINE)) {
- ret = -EOPNOTSUPP;
+ ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
break;
}
@@ -609,7 +600,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
post_deps[i].syncobj =
drm_syncobj_find(file, syncobj_desc.handle);
if (!post_deps[i].syncobj) {
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid syncobj handle");
break;
}
}
@@ -677,10 +668,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* be more clever to dispatch to appropriate gpu module:
*/
if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
- return -EINVAL;
+ return UERR(EINVAL, dev, "invalid pipe");
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
- return -EINVAL;
+ return UERR(EINVAL, dev, "invalid flags");
if (args->flags & MSM_SUBMIT_SUDO) {
if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
@@ -724,7 +715,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid in-fence");
goto out_unlock;
}
@@ -787,10 +778,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
if (!submit->cmd[i].size ||
- ((submit->cmd[i].size + submit->cmd[i].offset) >
- obj->size / 4)) {
- SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
- ret = -EINVAL;
+ (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
+ ret = UERR(EINVAL, dev, "invalid cmdstream size: %u\n",
+ submit->cmd[i].size * 4);
goto out;
}
@@ -800,8 +790,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
continue;
if (!gpu->allow_relocs) {
- SUBMIT_ERROR(submit, "relocs not allowed\n");
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "relocs not allowed\n");
goto out;
}
@@ -827,7 +816,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
(!args->fence || idr_find(&queue->fence_idr, args->fence))) {
spin_unlock(&queue->idr_lock);
idr_preload_end();
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid in-fence-sn");
goto out;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 0d4a3744cfcb..8557998e0c92 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -859,7 +859,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->funcs = funcs;
gpu->name = name;
- gpu->worker = kthread_create_worker(0, "gpu-worker");
+ gpu->worker = kthread_run_worker(0, "gpu-worker");
if (IS_ERR(gpu->worker)) {
ret = PTR_ERR(gpu->worker);
gpu->worker = NULL;
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index f3326d09bdbc..38965e12a6bf 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -244,7 +244,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
- priv->kms = NULL;
return ret;
}
@@ -269,7 +268,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
/* initialize event thread */
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
- ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
+ ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
ret = PTR_ERR(ev_thread->worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 76b6ae35a3cb..dcb49fd30402 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -166,22 +166,32 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
- writel_relaxed(data->ubwc_static, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
+ if (data->ubwc_bank_spread)
+ value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
+
+ if (data->ubwc_enc_version == UBWC_1_0)
+ value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1);
+
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
- u32 value = (data->ubwc_swizzle & 0x1) |
- (data->highest_bank_bit & 0x3) << 4 |
- (data->macrotile_mode & 0x1) << 12;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+
+ if (data->macrotile_mode)
+ value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
if (data->ubwc_enc_version == UBWC_3_0)
- value |= BIT(10);
+ value |= MDSS_UBWC_STATIC_UBWC_AMSBC;
if (data->ubwc_enc_version == UBWC_1_0)
- value |= BIT(8);
+ value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1);
writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
@@ -189,10 +199,14 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
- u32 value = (data->ubwc_swizzle & 0x7) |
- (data->ubwc_static & 0x1) << 3 |
- (data->highest_bank_bit & 0x7) << 4 |
- (data->macrotile_mode & 0x1) << 12;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+
+ if (data->ubwc_bank_spread)
+ value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
+
+ if (data->macrotile_mode)
+ value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
@@ -572,16 +586,17 @@ static const struct msm_mdss_data sa8775p_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 4,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 0,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 74000,
};
static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
- .ubwc_static = 0x1e,
+ .ubwc_swizzle = 6,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
};
@@ -590,9 +605,9 @@ static const struct msm_mdss_data sc7280_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 1,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 74000,
};
@@ -600,7 +615,7 @@ static const struct msm_mdss_data sc8180x_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 76800,
};
@@ -608,9 +623,9 @@ static const struct msm_mdss_data sc8280xp_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 76800,
};
@@ -632,7 +647,7 @@ static const struct msm_mdss_data sm6350_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 6,
- .ubwc_static = 0x1e,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 1,
.reg_bus_bw = 76800,
};
@@ -655,7 +670,7 @@ static const struct msm_mdss_data sm6115_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 7,
- .ubwc_static = 0x11f,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
};
@@ -667,14 +682,21 @@ static const struct msm_mdss_data sm6125_data = {
.highest_bank_bit = 1,
};
+static const struct msm_mdss_data sm6150_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
+};
+
static const struct msm_mdss_data sm8250_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 76800,
};
@@ -682,10 +704,10 @@ static const struct msm_mdss_data sm8350_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 74000,
};
@@ -693,10 +715,10 @@ static const struct msm_mdss_data sm8550_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 57000,
};
@@ -704,10 +726,10 @@ static const struct msm_mdss_data x1e80100_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
/* TODO: Add reg_bus_bw with real value */
};
@@ -724,6 +746,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
{ .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
{ .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
+ { .compatible = "qcom,sm6150-mdss", .data = &sm6150_data },
{ .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm7150-mdss", .data = &sm7150_data },
diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
index 3afef4b1786d..14dc53704314 100644
--- a/drivers/gpu/drm/msm/msm_mdss.h
+++ b/drivers/gpu/drm/msm/msm_mdss.h
@@ -11,9 +11,9 @@ struct msm_mdss_data {
/* can be read from register 0x58 */
u32 ubwc_dec_version;
u32 ubwc_swizzle;
- u32 ubwc_static;
u32 highest_bank_bit;
- u32 macrotile_mode;
+ bool ubwc_bank_spread;
+ bool macrotile_mode;
u32 reg_bus_bw;
};
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 2fc3eaf81f44..7fed1de63b5d 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -18,7 +18,7 @@ int msm_file_private_set_sysprof(struct msm_file_private *ctx,
switch (sysprof) {
default:
- return -EINVAL;
+ return UERR(EINVAL, gpu->dev, "Invalid sysprof: %d", sysprof);
case 2:
pm_runtime_get_sync(&gpu->pdev->dev);
fallthrough;
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
index 6531749d30f4..3d2cc339b8f1 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
@@ -52,6 +52,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x23fd" name="GMU_DCVS_PERF_SETTING"/>
<reg32 offset="0x23fe" name="GMU_DCVS_BW_SETTING"/>
<reg32 offset="0x23ff" name="GMU_DCVS_RETURN"/>
+ <reg32 offset="0x2bf8" name="GMU_CORE_FW_VERSION">
+ <bitfield name="MAJOR" low="28" high="31"/>
+ <bitfield name="MINOR" low="16" high="27"/>
+ <bitfield name="STEP" low="0" high="15"/>
+ </reg32>
<reg32 offset="0x4c00" name="GMU_ICACHE_CONFIG"/>
<reg32 offset="0x4c01" name="GMU_DCACHE_CONFIG"/>
<reg32 offset="0x4c0f" name="GMU_SYS_BUS_CONFIG"/>
diff --git a/drivers/gpu/drm/msm/registers/display/mdss.xml b/drivers/gpu/drm/msm/registers/display/mdss.xml
index ac85caf1575c..6e9f81cd4690 100644
--- a/drivers/gpu/drm/msm/registers/display/mdss.xml
+++ b/drivers/gpu/drm/msm/registers/display/mdss.xml
@@ -21,7 +21,16 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00058" name="UBWC_DEC_HW_VERSION"/>
- <reg32 offset="0x00144" name="UBWC_STATIC"/>
+ <reg32 offset="0x00144" name="UBWC_STATIC">
+ <bitfield name="UBWC_SWIZZLE" low="0" high="2"/>
+ <bitfield name="UBWC_BANK_SPREAD" pos="3"/>
+ <!-- high=5 for UBWC < 4.0 -->
+ <bitfield name="HIGHEST_BANK_BIT" low="4" high="6"/>
+ <bitfield name="UBWC_MIN_ACC_LEN" low="8" high="9"/>
+ <bitfield name="UBWC_AMSBC" pos="10"/>
+ <bitfield name="MACROTILE_MODE" pos="12"/>
+ </reg32>
+
<reg32 offset="0x00150" name="UBWC_CTRL_2"/>
<reg32 offset="0x00154" name="UBWC_PREDICTION_MODE"/>
</domain>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 51ae0b51b1e8..8ee00f59ca82 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -14,9 +14,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fbdev_dma.h>
@@ -248,7 +248,6 @@ static const struct drm_driver lcdif_driver = {
.fops = &fops,
.name = "imx-lcdif",
.desc = "i.MX LCDIF Controller DRM",
- .date = "20220417",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 6b95e4eb3e4e..59020862cf65 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -17,9 +17,9 @@
#include <linux/property.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -336,7 +336,6 @@ static const struct drm_driver mxsfb_driver = {
.fops = &fops,
.name = "mxsfb-drm",
.desc = "MXSFB Controller DRM",
- .date = "20160824",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index eed579a6c858..62d72b7a8d04 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -992,8 +992,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
if (!mst_state->pbn_div.full) {
struct nouveau_encoder *outp = mstc->mstm->outp;
- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
- outp->dp.link_bw, outp->dp.link_nr);
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(outp->dp.link_bw, outp->dp.link_nr);
}
slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
@@ -1265,8 +1264,8 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
mstc->mstm = mstm;
mstc->port = port;
- ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_dynamic_init(dev, &mstc->connector, &nv50_mstc,
+ DRM_MODE_CONNECTOR_DisplayPort, NULL);
if (ret) {
kfree(*pmstc);
*pmstc = NULL;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/log.h b/drivers/gpu/drm/nouveau/include/nvif/log.h
new file mode 100644
index 000000000000..64f6f8fc6141
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/log.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __NVIF_LOG_H__
+#define __NVIF_LOG_H__
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * nvif_log - structure for tracking logging buffers
+ * @entry: an entry in a list of struct nvif_logs
+ * @shutdown: pointer to function to call to clean up
+ *
+ * Structure used to track logging buffers so that they can be cleaned up
+ * when the module exits.
+ *
+ * The @shutdown function is called when the module exits. It should free all
+ * backing resources, such as logging buffers.
+ */
+struct nvif_log {
+ struct list_head entry;
+ void (*shutdown)(struct nvif_log *log);
+};
+
+/**
+ * nvif_logs - linked list of nvif_log objects
+ */
+struct nvif_logs {
+ struct list_head head;
+};
+
+#define NVIF_LOGS_DECLARE(logs) \
+ struct nvif_logs logs = { LIST_HEAD_INIT(logs.head) }
+
+static inline void nvif_log_shutdown(struct nvif_logs *logs)
+{
+ if (!list_empty(&logs->head)) {
+ struct nvif_log *log, *n;
+
+ list_for_each_entry_safe(log, n, &logs->head, entry) {
+ /* shutdown() should also delete the log entry */
+ log->shutdown(log);
+ }
+ }
+}
+
+extern struct nvif_logs gsp_logs;
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index a2055f2a014a..5c5f4607fcc9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -5,10 +5,13 @@
#include <core/falcon.h>
#include <core/firmware.h>
+#include <linux/debugfs.h>
+
#define GSP_PAGE_SHIFT 12
#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
struct nvkm_gsp_mem {
+ struct device *dev;
size_t size;
void *data;
dma_addr_t addr;
@@ -219,6 +222,24 @@ struct nvkm_gsp {
/* The size of the registry RPC */
size_t registry_rpc_size;
+
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * Logging buffers in debugfs. The wrapper objects need to remain
+ * in memory until the dentry is deleted.
+ */
+ struct {
+ struct dentry *parent;
+ struct dentry *init;
+ struct dentry *rm;
+ struct dentry *intr;
+ struct dentry *pmu;
+ } debugfs;
+ struct debugfs_blob_wrapper blob_init;
+ struct debugfs_blob_wrapper blob_intr;
+ struct debugfs_blob_wrapper blob_rm;
+ struct debugfs_blob_wrapper blob_pmu;
+#endif
};
static inline bool
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8f0c69aad248..21b56cc7605c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -384,7 +384,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
if (ret < 0)
return NULL;
- return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ return edid;
}
bool nouveau_acpi_video_backlight_use_native(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index e83db051e851..200e65a7cefc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -313,3 +313,19 @@ nouveau_debugfs_fini(struct nouveau_drm *drm)
kfree(drm->debugfs);
drm->debugfs = NULL;
}
+
+int
+nouveau_module_debugfs_init(void)
+{
+ nouveau_debugfs_root = debugfs_create_dir("nouveau", NULL);
+ if (IS_ERR(nouveau_debugfs_root))
+ return PTR_ERR(nouveau_debugfs_root);
+
+ return 0;
+}
+
+void
+nouveau_module_debugfs_fini(void)
+{
+ debugfs_remove(nouveau_debugfs_root);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index 77f0323b38ba..b7617b344ee2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -21,6 +21,11 @@ nouveau_debugfs(struct drm_device *dev)
extern void nouveau_drm_debugfs_init(struct drm_minor *);
extern int nouveau_debugfs_init(struct nouveau_drm *);
extern void nouveau_debugfs_fini(struct nouveau_drm *);
+
+extern struct dentry *nouveau_debugfs_root;
+
+int nouveau_module_debugfs_init(void);
+void nouveau_module_debugfs_fini(void);
#else
static inline void
nouveau_drm_debugfs_init(struct drm_minor *minor)
@@ -37,6 +42,17 @@ nouveau_debugfs_fini(struct nouveau_drm *drm)
{
}
+static inline int
+nouveau_module_debugfs_init(void)
+{
+ return 0;
+}
+
+static inline void
+nouveau_module_debugfs_fini(void)
+{
+}
+
#endif
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 107f63f08bd9..21d2d9ca5e85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -30,8 +30,9 @@
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
#include <linux/dynamic_debug.h>
+#include <linux/debugfs.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -47,6 +48,7 @@
#include <nvif/fifo.h>
#include <nvif/push006c.h>
#include <nvif/user.h>
+#include <nvif/log.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
@@ -113,6 +115,20 @@ static struct drm_driver driver_stub;
static struct drm_driver driver_pci;
static struct drm_driver driver_platform;
+#ifdef CONFIG_DEBUG_FS
+struct dentry *nouveau_debugfs_root;
+
+/**
+ * gsp_logs - list of nvif_log GSP-RM logging buffers
+ *
+ * Head pointer to a a list of nvif_log buffers that is created for each GPU
+ * upon GSP shutdown if the "keep_gsp_logging" command-line parameter is
+ * specified. This is used to track the alternative debugfs entries for the
+ * GSP-RM logs.
+ */
+NVIF_LOGS_DECLARE(gsp_logs);
+#endif
+
static u64
nouveau_pci_name(struct pci_dev *pdev)
{
@@ -1326,11 +1342,6 @@ driver_stub = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
-#ifdef GIT_REVISION
- .date = GIT_REVISION,
-#else
- .date = DRIVER_DATE,
-#endif
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -1423,6 +1434,8 @@ err_free:
static int __init
nouveau_drm_init(void)
{
+ int ret;
+
driver_pci = driver_stub;
driver_platform = driver_stub;
@@ -1436,6 +1449,10 @@ nouveau_drm_init(void)
if (!nouveau_modeset)
return 0;
+ ret = nouveau_module_debugfs_init();
+ if (ret)
+ return ret;
+
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
platform_driver_register(&nouveau_platform_driver);
#endif
@@ -1444,10 +1461,14 @@ nouveau_drm_init(void)
nouveau_backlight_ctor();
#ifdef CONFIG_PCI
- return pci_register_driver(&nouveau_drm_pci_driver);
-#else
- return 0;
+ ret = pci_register_driver(&nouveau_drm_pci_driver);
+ if (ret) {
+ nouveau_module_debugfs_fini();
+ return ret;
+ }
#endif
+
+ return 0;
}
static void __exit
@@ -1467,6 +1488,12 @@ nouveau_drm_exit(void)
#endif
if (IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM))
mmu_notifier_synchronize();
+
+#ifdef CONFIG_DEBUG_FS
+ nvif_log_shutdown(&gsp_logs);
+#endif
+
+ nouveau_module_debugfs_fini();
}
module_init(nouveau_drm_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 685d6ca3d8aa..55abc510067b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -7,7 +7,6 @@
#define DRIVER_NAME "nouveau"
#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla/Tegra K1+"
-#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 4
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 09686d038d60..7cc84472cece 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -387,11 +387,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
+ bool local;
rcu_read_lock();
prev = rcu_dereference(f->channel);
- if (prev && (prev == chan ||
- fctx->sync(f, prev, chan) == 0))
+ local = prev && prev->cli->drm == chan->cli->drm;
+ if (local && (prev == chan ||
+ fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
if (!must_wait)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index 841e3b69fcaf..5a0c9b8a79f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -31,6 +31,7 @@ mcp77_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &nv50_sor_bl,
.hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index d586aea30898..58502102926b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -26,6 +26,7 @@
#include <subdev/vfn.h>
#include <engine/fifo/chan.h>
#include <engine/sec2.h>
+#include <nvif/log.h>
#include <nvfw/fw.h>
@@ -57,6 +58,8 @@
#include <linux/ctype.h>
#include <linux/parser.h>
+extern struct dentry *nouveau_debugfs_root;
+
#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
@@ -121,6 +124,8 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
return mqe->data;
}
+ size = ALIGN(repc + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+
msg = kvmalloc(repc, GFP_KERNEL);
if (!msg)
return ERR_PTR(-ENOMEM);
@@ -129,19 +134,15 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
len = min_t(u32, repc, len);
memcpy(msg, mqe->data, len);
- rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE);
- if (rptr == gsp->msgq.cnt)
- rptr = 0;
-
repc -= len;
if (repc) {
mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
memcpy(msg + len, mqe, repc);
-
- rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE);
}
+ rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
+
mb();
(*gsp->msgq.rptr) = rptr;
return msg;
@@ -163,7 +164,7 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
u64 *end;
u64 csum = 0;
int free, time = 1000000;
- u32 wptr, size;
+ u32 wptr, size, step;
u32 off = 0;
argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
@@ -197,7 +198,9 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
}
cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
- size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE);
+ step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
+ size = min_t(u32, argc, step * GSP_PAGE_SIZE);
+
memcpy(cqe, (u8 *)cmd + off, size);
wptr += DIV_ROUND_UP(size, 0x1000);
@@ -1000,7 +1003,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
}
static void
-nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
{
if (mem->data) {
/*
@@ -1009,19 +1012,35 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
*/
memset(mem->data, 0xFF, mem->size);
- dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+ dma_free_coherent(mem->dev, mem->size, mem->data, mem->addr);
+ put_device(mem->dev);
+
memset(mem, 0, sizeof(*mem));
}
}
+/**
+ * nvkm_gsp_mem_ctor - constructor for nvkm_gsp_mem objects
+ * @gsp: gsp pointer
+ * @size: number of bytes to allocate
+ * @mem: nvkm_gsp_mem object to initialize
+ *
+ * Allocates a block of memory for use with GSP.
+ *
+ * This memory block can potentially out-live the driver's remove() callback,
+ * so we take a device reference to ensure its lifetime. The reference is
+ * dropped in the destructor.
+ */
static int
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
{
- mem->size = size;
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
if (WARN_ON(!mem->data))
return -ENOMEM;
+ mem->size = size;
+ mem->dev = get_device(gsp->subdev.device->dev);
+
return 0;
}
@@ -1054,8 +1073,8 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
nvkm_wr32(device, 0x110004, 0x00000040);
/* Release the DMA buffers that were needed only for boot and init */
- nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
- nvkm_gsp_mem_dtor(gsp, &gsp->libos);
+ nvkm_gsp_mem_dtor(&gsp->boot.fw);
+ nvkm_gsp_mem_dtor(&gsp->libos);
return ret;
}
@@ -2060,6 +2079,215 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * If GSP-RM load fails, then the GSP nvkm object will be deleted, the logging
+ * debugfs entries will be deleted, and it will not be possible to debug the
+ * load failure. The keep_gsp_logging parameter tells Nouveau to copy the
+ * logging buffers to new debugfs entries, and these entries are retained
+ * until the driver unloads.
+ */
+static bool keep_gsp_logging;
+module_param(keep_gsp_logging, bool, 0444);
+MODULE_PARM_DESC(keep_gsp_logging,
+ "Migrate the GSP-RM logging debugfs entries upon exit");
+
+/*
+ * GSP-RM uses a pseudo-class mechanism to define of a variety of per-"engine"
+ * data structures, and each engine has a "class ID" genererated by a
+ * pre-processor. This is the class ID for the PMU.
+ */
+#define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU 0xf3d722
+
+/**
+ * rpc_ucode_libos_print_v1E_08 - RPC payload for libos print buffers
+ * @ucode_eng_desc: the engine descriptor
+ * @libos_print_buf_size: the size of the libos_print_buf[]
+ * @libos_print_buf: the actual buffer
+ *
+ * The engine descriptor is divided into 31:8 "class ID" and 7:0 "instance
+ * ID". We only care about messages from PMU.
+ */
+struct rpc_ucode_libos_print_v1e_08 {
+ u32 ucode_eng_desc;
+ u32 libos_print_buf_size;
+ u8 libos_print_buf[];
+};
+
+/**
+ * r535_gsp_msg_libos_print - capture log message from the PMU
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
+ *
+ * Called when we receive a UCODE_LIBOS_PRINT event RPC from GSP-RM. This RPC
+ * contains the contents of the libos print buffer from PMU. It is typically
+ * only written to when PMU encounters an error.
+ *
+ * Technically this RPC can be used to pass print buffers from any number of
+ * GSP-RM engines, but we only expect to receive them for the PMU.
+ *
+ * For the PMU, the buffer is 4K in size and the RPC always contains the full
+ * contents.
+ */
+static int
+r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc)
+{
+ struct nvkm_gsp *gsp = priv;
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct rpc_ucode_libos_print_v1e_08 *rpc = repv;
+ unsigned int class = rpc->ucode_eng_desc >> 8;
+
+ nvkm_debug(subdev, "received libos print from class 0x%x for %u bytes\n",
+ class, rpc->libos_print_buf_size);
+
+ if (class != NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU) {
+ nvkm_warn(subdev,
+ "received libos print from unknown class 0x%x\n",
+ class);
+ return -ENOMSG;
+ }
+
+ if (rpc->libos_print_buf_size > GSP_PAGE_SIZE) {
+ nvkm_error(subdev, "libos print is too large (%u bytes)\n",
+ rpc->libos_print_buf_size);
+ return -E2BIG;
+ }
+
+ memcpy(gsp->blob_pmu.data, rpc->libos_print_buf, rpc->libos_print_buf_size);
+
+ return 0;
+}
+
+/**
+ * create_debufgs - create a blob debugfs entry
+ * @gsp: gsp pointer
+ * @name: name of this dentry
+ * @blob: blob wrapper
+ *
+ * Creates a debugfs entry for a logging buffer with the name 'name'.
+ */
+static struct dentry *create_debugfs(struct nvkm_gsp *gsp, const char *name,
+ struct debugfs_blob_wrapper *blob)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_blob(name, 0444, gsp->debugfs.parent, blob);
+ if (IS_ERR(dent)) {
+ nvkm_error(&gsp->subdev,
+ "failed to create %s debugfs entry\n", name);
+ return NULL;
+ }
+
+ /*
+ * For some reason, debugfs_create_blob doesn't set the size of the
+ * dentry, so do that here. See [1]
+ *
+ * [1] https://lore.kernel.org/r/linux-fsdevel/20240207200619.3354549-1-ttabi@nvidia.com/
+ */
+ i_size_write(d_inode(dent), blob->size);
+
+ return dent;
+}
+
+/**
+ * r535_gsp_libos_debugfs_init - create logging debugfs entries
+ * @gsp: gsp pointer
+ *
+ * Create the debugfs entries. This exposes the log buffers to userspace so
+ * that an external tool can parse it.
+ *
+ * The 'logpmu' contains exception dumps from the PMU. It is written via an
+ * RPC sent from GSP-RM and must be only 4KB. We create it here because it's
+ * only useful if there is a debugfs entry to expose it. If we get the PMU
+ * logging RPC and there is no debugfs entry, the RPC is just ignored.
+ *
+ * The blob_init, blob_rm, and blob_pmu objects can't be transient
+ * because debugfs_create_blob doesn't copy them.
+ *
+ * NOTE: OpenRM loads the logging elf image and prints the log messages
+ * in real-time. We may add that capability in the future, but that
+ * requires loading ELF images that are not distributed with the driver and
+ * adding the parsing code to Nouveau.
+ *
+ * Ideally, this should be part of nouveau_debugfs_init(), but that function
+ * is called too late. We really want to create these debugfs entries before
+ * r535_gsp_booter_load() is called, so that if GSP-RM fails to initialize,
+ * there could still be a log to capture.
+ */
+static void
+r535_gsp_libos_debugfs_init(struct nvkm_gsp *gsp)
+{
+ struct device *dev = gsp->subdev.device->dev;
+
+ /* Create a new debugfs directory with a name unique to this GPU. */
+ gsp->debugfs.parent = debugfs_create_dir(dev_name(dev), nouveau_debugfs_root);
+ if (IS_ERR(gsp->debugfs.parent)) {
+ nvkm_error(&gsp->subdev,
+ "failed to create %s debugfs root\n", dev_name(dev));
+ return;
+ }
+
+ gsp->blob_init.data = gsp->loginit.data;
+ gsp->blob_init.size = gsp->loginit.size;
+ gsp->blob_intr.data = gsp->logintr.data;
+ gsp->blob_intr.size = gsp->logintr.size;
+ gsp->blob_rm.data = gsp->logrm.data;
+ gsp->blob_rm.size = gsp->logrm.size;
+
+ gsp->debugfs.init = create_debugfs(gsp, "loginit", &gsp->blob_init);
+ if (!gsp->debugfs.init)
+ goto error;
+
+ gsp->debugfs.intr = create_debugfs(gsp, "logintr", &gsp->blob_intr);
+ if (!gsp->debugfs.intr)
+ goto error;
+
+ gsp->debugfs.rm = create_debugfs(gsp, "logrm", &gsp->blob_rm);
+ if (!gsp->debugfs.rm)
+ goto error;
+
+ /*
+ * Since the PMU buffer is copied from an RPC, it doesn't need to be
+ * a DMA buffer.
+ */
+ gsp->blob_pmu.size = GSP_PAGE_SIZE;
+ gsp->blob_pmu.data = kzalloc(gsp->blob_pmu.size, GFP_KERNEL);
+ if (!gsp->blob_pmu.data)
+ goto error;
+
+ gsp->debugfs.pmu = create_debugfs(gsp, "logpmu", &gsp->blob_pmu);
+ if (!gsp->debugfs.pmu) {
+ kfree(gsp->blob_pmu.data);
+ goto error;
+ }
+
+ i_size_write(d_inode(gsp->debugfs.init), gsp->blob_init.size);
+ i_size_write(d_inode(gsp->debugfs.intr), gsp->blob_intr.size);
+ i_size_write(d_inode(gsp->debugfs.rm), gsp->blob_rm.size);
+ i_size_write(d_inode(gsp->debugfs.pmu), gsp->blob_pmu.size);
+
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT,
+ r535_gsp_msg_libos_print, gsp);
+
+ nvkm_debug(&gsp->subdev, "created debugfs GSP-RM logging entries\n");
+
+ if (keep_gsp_logging) {
+ nvkm_info(&gsp->subdev,
+ "logging buffers will be retained on failure\n");
+ }
+
+ return;
+
+error:
+ debugfs_remove(gsp->debugfs.parent);
+ gsp->debugfs.parent = NULL;
+}
+
+#endif
+
static inline u64
r535_gsp_libos_id8(const char *name)
{
@@ -2110,7 +2338,11 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
* written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
*
* The physical address map for the log buffer is stored in the buffer
- * itself, starting with offset 1. Offset 0 contains the "put" pointer.
+ * itself, starting with offset 1. Offset 0 contains the "put" pointer (pp).
+ * Initially, pp is equal to 0. If the buffer has valid logging data in it,
+ * then pp points to index into the buffer where the next logging entry will
+ * be written. Therefore, the logging data is valid if:
+ * 1 <= pp < sizeof(buffer)/sizeof(u64)
*
* The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
* configured for a larger page size (e.g. 64K pages), we need to give
@@ -2181,6 +2413,11 @@ r535_gsp_libos_init(struct nvkm_gsp *gsp)
args[3].size = gsp->rmargs.size;
args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ r535_gsp_libos_debugfs_init(gsp);
+#endif
+
return 0;
}
@@ -2234,8 +2471,8 @@ static void
nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
{
nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2);
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
+ nvkm_gsp_mem_dtor(&rx3->lvl1);
+ nvkm_gsp_mem_dtor(&rx3->lvl0);
}
/**
@@ -2323,9 +2560,9 @@ nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
if (ret) {
lvl2_fail:
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
+ nvkm_gsp_mem_dtor(&rx3->lvl1);
lvl1_fail:
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
+ nvkm_gsp_mem_dtor(&rx3->lvl0);
}
return ret;
@@ -2417,7 +2654,7 @@ r535_gsp_init(struct nvkm_gsp *gsp)
done:
if (gsp->sr.meta.data) {
- nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta);
+ nvkm_gsp_mem_dtor(&gsp->sr.meta);
nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
return ret;
@@ -2491,6 +2728,222 @@ r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
gsp->fws.rm = NULL;
}
+#ifdef CONFIG_DEBUG_FS
+
+struct r535_gsp_log {
+ struct nvif_log log;
+
+ /*
+ * Logging buffers in debugfs. The wrapper objects need to remain
+ * in memory until the dentry is deleted.
+ */
+ struct dentry *debugfs_logging_dir;
+ struct debugfs_blob_wrapper blob_init;
+ struct debugfs_blob_wrapper blob_intr;
+ struct debugfs_blob_wrapper blob_rm;
+ struct debugfs_blob_wrapper blob_pmu;
+};
+
+/**
+ * r535_debugfs_shutdown - delete GSP-RM logging buffers for one GPU
+ * @_log: nvif_log struct for this GPU
+ *
+ * Called when the driver is shutting down, to clean up the retained GSP-RM
+ * logging buffers.
+ */
+static void r535_debugfs_shutdown(struct nvif_log *_log)
+{
+ struct r535_gsp_log *log = container_of(_log, struct r535_gsp_log, log);
+
+ debugfs_remove(log->debugfs_logging_dir);
+
+ kfree(log->blob_init.data);
+ kfree(log->blob_intr.data);
+ kfree(log->blob_rm.data);
+ kfree(log->blob_pmu.data);
+
+ /* We also need to delete the list object */
+ kfree(log);
+}
+
+/**
+ * is_empty - return true if the logging buffer was never written to
+ * @b: blob wrapper with ->data field pointing to logging buffer
+ *
+ * The first 64-bit field of loginit, and logintr, and logrm is the 'put'
+ * pointer, and it is initialized to 0. It's a dword-based index into the
+ * circular buffer, indicating where the next printf write will be made.
+ *
+ * If the pointer is still 0 when GSP-RM is shut down, that means that the
+ * buffer was never written to, so it can be ignored.
+ *
+ * This test also works for logpmu, even though it doesn't have a put pointer.
+ */
+static bool is_empty(const struct debugfs_blob_wrapper *b)
+{
+ u64 *put = b->data;
+
+ return put ? (*put == 0) : true;
+}
+
+/**
+ * r535_gsp_copy_log - preserve the logging buffers in a blob
+ *
+ * When GSP shuts down, the nvkm_gsp object and all its memory is deleted.
+ * To preserve the logging buffers, the buffers need to be copied, but only
+ * if they actually have data.
+ */
+static int r535_gsp_copy_log(struct dentry *parent,
+ const char *name,
+ const struct debugfs_blob_wrapper *s,
+ struct debugfs_blob_wrapper *t)
+{
+ struct dentry *dent;
+ void *p;
+
+ if (is_empty(s))
+ return 0;
+
+ /* The original buffers will be deleted */
+ p = kmemdup(s->data, s->size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ t->data = p;
+ t->size = s->size;
+
+ dent = debugfs_create_blob(name, 0444, parent, t);
+ if (IS_ERR(dent)) {
+ kfree(p);
+ memset(t, 0, sizeof(*t));
+ return PTR_ERR(dent);
+ }
+
+ i_size_write(d_inode(dent), t->size);
+
+ return 0;
+}
+
+/**
+ * r535_gsp_retain_logging - copy logging buffers to new debugfs root
+ * @gsp: gsp pointer
+ *
+ * If keep_gsp_logging is enabled, then we want to preserve the GSP-RM logging
+ * buffers and their debugfs entries, but all those objects would normally
+ * deleted if GSP-RM fails to load.
+ *
+ * To preserve the logging buffers, we need to:
+ *
+ * 1) Allocate new buffers and copy the logs into them, so that the original
+ * DMA buffers can be released.
+ *
+ * 2) Preserve the directories. We don't need to save single dentries because
+ * we're going to delete the parent when the
+ *
+ * If anything fails in this process, then all the dentries need to be
+ * deleted. We don't need to deallocate the original logging buffers because
+ * the caller will do that regardless.
+ */
+static void r535_gsp_retain_logging(struct nvkm_gsp *gsp)
+{
+ struct device *dev = gsp->subdev.device->dev;
+ struct r535_gsp_log *log = NULL;
+ int ret;
+
+ if (!keep_gsp_logging || !gsp->debugfs.parent) {
+ /* Nothing to do */
+ goto exit;
+ }
+
+ /* Check to make sure at least one buffer has data. */
+ if (is_empty(&gsp->blob_init) && is_empty(&gsp->blob_intr) &&
+ is_empty(&gsp->blob_rm) && is_empty(&gsp->blob_rm)) {
+ nvkm_warn(&gsp->subdev, "all logging buffers are empty\n");
+ goto exit;
+ }
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ goto error;
+
+ /*
+ * Since the nvkm_gsp object is going away, the debugfs_blob_wrapper
+ * objects are also being deleted, which means the dentries will no
+ * longer be valid. Delete the existing entries so that we can create
+ * new ones with the same name.
+ */
+ debugfs_remove(gsp->debugfs.init);
+ debugfs_remove(gsp->debugfs.intr);
+ debugfs_remove(gsp->debugfs.rm);
+ debugfs_remove(gsp->debugfs.pmu);
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "loginit", &gsp->blob_init, &log->blob_init);
+ if (ret)
+ goto error;
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "logintr", &gsp->blob_intr, &log->blob_intr);
+ if (ret)
+ goto error;
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "logrm", &gsp->blob_rm, &log->blob_rm);
+ if (ret)
+ goto error;
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "logpmu", &gsp->blob_pmu, &log->blob_pmu);
+ if (ret)
+ goto error;
+
+ /* The nvkm_gsp object is going away, so save the dentry */
+ log->debugfs_logging_dir = gsp->debugfs.parent;
+
+ log->log.shutdown = r535_debugfs_shutdown;
+ list_add(&log->log.entry, &gsp_logs.head);
+
+ nvkm_warn(&gsp->subdev,
+ "logging buffers migrated to /sys/kernel/debug/nouveau/%s\n",
+ dev_name(dev));
+
+ return;
+
+error:
+ nvkm_warn(&gsp->subdev, "failed to migrate logging buffers\n");
+
+exit:
+ debugfs_remove(gsp->debugfs.parent);
+
+ if (log) {
+ kfree(log->blob_init.data);
+ kfree(log->blob_intr.data);
+ kfree(log->blob_rm.data);
+ kfree(log->blob_pmu.data);
+ kfree(log);
+ }
+}
+
+#endif
+
+/**
+ * r535_gsp_libos_debugfs_fini - cleanup/retain log buffers on shutdown
+ * @gsp: gsp pointer
+ *
+ * If the log buffers are exposed via debugfs, the data for those entries
+ * needs to be cleaned up when the GSP device shuts down.
+ */
+static void
+r535_gsp_libos_debugfs_fini(struct nvkm_gsp __maybe_unused *gsp)
+{
+#ifdef CONFIG_DEBUG_FS
+ r535_gsp_retain_logging(gsp);
+
+ /*
+ * Unlike the other buffers, the PMU blob is a kmalloc'd buffer that
+ * exists only if the debugfs entries were created.
+ */
+ kfree(gsp->blob_pmu.data);
+ gsp->blob_pmu.data = NULL;
+#endif
+}
+
void
r535_gsp_dtor(struct nvkm_gsp *gsp)
{
@@ -2498,7 +2951,7 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
mutex_destroy(&gsp->client_id.mutex);
nvkm_gsp_radix3_dtor(gsp, &gsp->radix3);
- nvkm_gsp_mem_dtor(gsp, &gsp->sig);
+ nvkm_gsp_mem_dtor(&gsp->sig);
nvkm_firmware_dtor(&gsp->fw);
nvkm_falcon_fw_dtor(&gsp->booter.unload);
@@ -2509,12 +2962,15 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
r535_gsp_dtor_fws(gsp);
- nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
- nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
- nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
- nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
- nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
- nvkm_gsp_mem_dtor(gsp, &gsp->logrm);
+ nvkm_gsp_mem_dtor(&gsp->rmargs);
+ nvkm_gsp_mem_dtor(&gsp->wpr_meta);
+ nvkm_gsp_mem_dtor(&gsp->shm.mem);
+
+ r535_gsp_libos_debugfs_fini(gsp);
+
+ nvkm_gsp_mem_dtor(&gsp->loginit);
+ nvkm_gsp_mem_dtor(&gsp->logintr);
+ nvkm_gsp_mem_dtor(&gsp->logrm);
}
int
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e27376121606..054b71dba6a7 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -28,7 +28,6 @@
#define DRIVER_NAME MODULE_NAME
#define DRIVER_DESC "OMAP DRM"
-#define DRIVER_DATE "20110917"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -653,7 +652,6 @@ static const struct drm_driver omap_drm_driver = {
.fops = &omapdriver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index f4bd0c6e3f34..7b6396890681 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -6,7 +6,7 @@
#include <linux/fb.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 94a46241dece..f8511fe5fb0d 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1802,6 +1802,12 @@ static const struct panel_delay delay_200_500_e50_po2e200 = {
.powered_on_to_enable = 200,
};
+static const struct panel_delay delay_200_150_e50 = {
+ .hpd_absent = 200,
+ .unprepare = 150,
+ .enable = 50,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.ident = { \
@@ -1913,6 +1919,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
@@ -1963,6 +1970,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1118, &delay_200_500_e50, "KD116N29-30NK-A005"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1212, &delay_200_500_e50, "KD116N0930A16"),
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x1707, &delay_200_150_e50, "KD116N2130B12"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index a9b5dad70bc1..87bbb25d119a 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -9,6 +9,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 4618c892cdd6..e10e469aa7a6 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -400,7 +400,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c)
rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
/* Look up the DSI host. It needs to probe before we do. */
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (!endpoint)
return -ENODEV;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index ed53787d1dea..364f1c9a16d9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -11,6 +11,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index a0e5698275a5..6917ffda5b2b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/media-bus-format.h>
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 222c170dde8b..9b2f128fd309 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3222,6 +3222,33 @@ static const struct panel_desc mitsubishi_aa084xe01 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
+static const struct display_timing multi_inno_mi0700a2t_30_timing = {
+ .pixelclock = { 26400000, 33000000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 204, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 6, 40 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 3, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc multi_inno_mi0700a2t_30 = {
+ .timings = &multi_inno_mi0700a2t_30_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 153,
+ .height = 92,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing multi_inno_mi0700s4t_6_timing = {
.pixelclock = { 29000000, 33000000, 38000000 },
.hactive = { 800, 800, 800 },
@@ -3313,6 +3340,33 @@ static const struct panel_desc multi_inno_mi1010ait_1cp = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing multi_inno_mi1010z1t_1cp11_timing = {
+ .pixelclock = { 40800000, 51200000, 67200000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 30, 110, 130 },
+ .hback_porch = { 30, 110, 130 },
+ .hsync_len = { 30, 100, 116 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 4, 13, 80 },
+ .vback_porch = { 4, 13, 80 },
+ .vsync_len = { 2, 9, 40 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc multi_inno_mi1010z1t_1cp11 = {
+ .timings = &multi_inno_mi1010z1t_1cp11_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 162,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing nec_nl12880bc20_05_timing = {
.pixelclock = { 67000000, 71000000, 75000000 },
.hactive = { 1280, 1280, 1280 },
@@ -4280,6 +4334,45 @@ static const struct panel_desc tianma_tm070jvhg33 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
+/*
+ * The datasheet computes total blanking as back porch + front porch, not
+ * including sync pulse width. This is for both H and V. To make the total
+ * blanking and period correct, subtract the pulse width from the front
+ * porch.
+ *
+ * This works well for the Min and Typ values, but for Max values the sync
+ * pulse width is higher than back porch + front porch, so work around that
+ * by reducing the Max sync length value to 1 and then treating the Max
+ * porches as in the Min and Typ cases.
+ *
+ * Exact datasheet values are added as a comment where they differ from the
+ * ones implemented for the above reason.
+ */
+static const struct display_timing tianma_tm070jdhg34_00_timing = {
+ .pixelclock = { 68400000, 71900000, 78100000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 130, 138, 158 }, /* 131, 139, 159 */
+ .hback_porch = { 5, 5, 5 },
+ .hsync_len = { 1, 1, 1 }, /* 1, 1, 256 */
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 2, 39, 98 }, /* 3, 40, 99 */
+ .vback_porch = { 2, 2, 2 },
+ .vsync_len = { 1, 1, 1 }, /* 1, 1, 128 */
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc tianma_tm070jdhg34_00 = {
+ .timings = &tianma_tm070jdhg34_00_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 150, /* 149.76 */
+ .height = 94, /* 93.60 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing tianma_tm070rvhg71_timing = {
.pixelclock = { 27700000, 29200000, 39600000 },
.hactive = { 800, 800, 800 },
@@ -4361,6 +4454,37 @@ static const struct panel_desc ti_nspire_classic_lcd_panel = {
.bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
+static const struct display_timing topland_tian_g07017_01_timing = {
+ .pixelclock = { 44900000, 51200000, 63000000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 16, 160, 216 },
+ .hback_porch = { 160, 160, 160 },
+ .hsync_len = { 1, 1, 140 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 1, 12, 127 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 1, 20 },
+};
+
+static const struct panel_desc topland_tian_g07017_01 = {
+ .timings = &topland_tian_g07017_01_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 1, /* 6.5 - 150µs PLL wake-up time */
+ .enable = 100, /* 6.4 - Power on: 6 VSyncs */
+ .disable = 84, /* 6.4 - Power off: 5 Vsyncs */
+ .unprepare = 50, /* 6.4 - Power off: 3 Vsyncs */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+};
+
static const struct drm_display_mode toshiba_lt089ac29000_mode = {
.clock = 79500,
.hdisplay = 1280,
@@ -4906,6 +5030,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "mitsubishi,aa084xe01",
.data = &mitsubishi_aa084xe01,
}, {
+ .compatible = "multi-inno,mi0700a2t-30",
+ .data = &multi_inno_mi0700a2t_30,
+ }, {
.compatible = "multi-inno,mi0700s4t-6",
.data = &multi_inno_mi0700s4t_6,
}, {
@@ -4915,6 +5042,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "multi-inno,mi1010ait-1cp",
.data = &multi_inno_mi1010ait_1cp,
}, {
+ .compatible = "multi-inno,mi1010z1t-1cp11",
+ .data = &multi_inno_mi1010z1t_1cp11,
+ }, {
.compatible = "nec,nl12880bc20-05",
.data = &nec_nl12880bc20_05,
}, {
@@ -5023,6 +5153,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
+ .compatible = "tianma,tm070jdhg34-00",
+ .data = &tianma_tm070jdhg34_00,
+ }, {
.compatible = "tianma,tm070jvhg33",
.data = &tianma_tm070jvhg33,
}, {
@@ -5038,6 +5171,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "toshiba,lt089ac29000",
.data = &toshiba_lt089ac29000,
}, {
+ .compatible = "topland,tian-g07017-01",
+ .data = &topland_tian_g07017_01,
+ }, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
}, {
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 272490b9565b..be3a9797fbce 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -193,7 +193,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
- ctx->panel.dev = dev;
ctx->dsi = dsi;
ctx->supplies[0].supply = "vdda";
@@ -201,13 +200,11 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
ctx->supplies[1].supply = "vdd3p3";
ctx->supplies[1].init_load_uA = 13200;
- ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
- ctx->supplies);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
- ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
- "reset", GPIOD_OUT_LOW);
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio %ld\n", PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
@@ -215,8 +212,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &visionox_rm69299_drm_funcs;
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index ee3864476eb9..0f3935556ac7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -636,7 +636,6 @@ static const struct drm_driver panfrost_drm_driver = {
.fops = &panfrost_drm_driver_fops,
.name = "panfrost",
.desc = "panfrost DRM",
- .date = "20180908",
.major = 1,
.minor = 3,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index f5abde3866fb..174e190ba40f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -236,6 +236,10 @@ static const struct panfrost_model gpu_models[] = {
*/
GPU_MODEL(g57, 0x9003,
GPU_REV(g57, 0, 0)),
+
+ /* MediaTek MT8188 Mali-G57 MC3 */
+ GPU_MODEL(g57, 0x9093,
+ GPU_REV(g57, 0, 0)),
};
static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c
index ecc7a52bd688..3686515d368d 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.c
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.c
@@ -243,26 +243,26 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
return 0;
}
-int panthor_devfreq_resume(struct panthor_device *ptdev)
+void panthor_devfreq_resume(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
if (!pdevfreq->devfreq)
- return 0;
+ return;
panthor_devfreq_reset(pdevfreq);
- return devfreq_resume_device(pdevfreq->devfreq);
+ drm_WARN_ON(&ptdev->base, devfreq_resume_device(pdevfreq->devfreq));
}
-int panthor_devfreq_suspend(struct panthor_device *ptdev)
+void panthor_devfreq_suspend(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
if (!pdevfreq->devfreq)
- return 0;
+ return;
- return devfreq_suspend_device(pdevfreq->devfreq);
+ drm_WARN_ON(&ptdev->base, devfreq_suspend_device(pdevfreq->devfreq));
}
void panthor_devfreq_record_busy(struct panthor_device *ptdev)
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.h b/drivers/gpu/drm/panthor/panthor_devfreq.h
index 83a5c9522493..b7631de695f7 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.h
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.h
@@ -12,8 +12,8 @@ struct panthor_devfreq;
int panthor_devfreq_init(struct panthor_device *ptdev);
-int panthor_devfreq_resume(struct panthor_device *ptdev);
-int panthor_devfreq_suspend(struct panthor_device *ptdev);
+void panthor_devfreq_resume(struct panthor_device *ptdev);
+void panthor_devfreq_suspend(struct panthor_device *ptdev);
void panthor_devfreq_record_busy(struct panthor_device *ptdev);
void panthor_devfreq_record_idle(struct panthor_device *ptdev);
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index 6fbff516c1c1..0a37cfeeb181 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -22,6 +22,24 @@
#include "panthor_regs.h"
#include "panthor_sched.h"
+static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
+{
+ ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+
+ if (!ptdev->coherent)
+ return 0;
+
+ /* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
+ * ACE protocol has never been supported for command stream frontend GPUs.
+ */
+ if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
+ GPU_COHERENCY_PROT_BIT(ACE_LITE)))
+ return 0;
+
+ drm_err(&ptdev->base, "Coherency not supported by the device");
+ return -ENOTSUPP;
+}
+
static int panthor_clk_init(struct panthor_device *ptdev)
{
ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
@@ -156,7 +174,9 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
- ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+ ret = panthor_gpu_coherency_init(ptdev);
+ if (ret)
+ return ret;
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
@@ -415,6 +435,22 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *
return 0;
}
+static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
+{
+ int ret;
+
+ panthor_gpu_resume(ptdev);
+ panthor_mmu_resume(ptdev);
+
+ ret = panthor_fw_resume(ptdev);
+ if (!ret)
+ return 0;
+
+ panthor_mmu_suspend(ptdev);
+ panthor_gpu_suspend(ptdev);
+ return ret;
+}
+
int panthor_device_resume(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
@@ -437,22 +473,20 @@ int panthor_device_resume(struct device *dev)
if (ret)
goto err_disable_stacks_clk;
- ret = panthor_devfreq_resume(ptdev);
- if (ret)
- goto err_disable_coregroup_clk;
+ panthor_devfreq_resume(ptdev);
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
- panthor_gpu_resume(ptdev);
- panthor_mmu_resume(ptdev);
- ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
- if (!ret) {
- panthor_sched_resume(ptdev);
- } else {
- panthor_mmu_suspend(ptdev);
- panthor_gpu_suspend(ptdev);
+ ret = panthor_device_resume_hw_components(ptdev);
+ if (ret && ptdev->reset.fast) {
+ drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
+ ptdev->reset.fast = false;
+ ret = panthor_device_resume_hw_components(ptdev);
}
+ if (!ret)
+ panthor_sched_resume(ptdev);
+
drm_dev_exit(cookie);
if (ret)
@@ -476,8 +510,6 @@ int panthor_device_resume(struct device *dev)
err_suspend_devfreq:
panthor_devfreq_suspend(ptdev);
-
-err_disable_coregroup_clk:
clk_disable_unprepare(ptdev->clks.coregroup);
err_disable_stacks_clk:
@@ -488,13 +520,14 @@ err_disable_core_clk:
err_set_suspended:
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ atomic_set(&ptdev->pm.recovery_needed, 1);
return ret;
}
int panthor_device_suspend(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
- int ret, cookie;
+ int cookie;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
return -EINVAL;
@@ -526,36 +559,11 @@ int panthor_device_suspend(struct device *dev)
drm_dev_exit(cookie);
}
- ret = panthor_devfreq_suspend(ptdev);
- if (ret) {
- if (panthor_device_is_initialized(ptdev) &&
- drm_dev_enter(&ptdev->base, &cookie)) {
- panthor_gpu_resume(ptdev);
- panthor_mmu_resume(ptdev);
- drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
- panthor_sched_resume(ptdev);
- drm_dev_exit(cookie);
- }
-
- goto err_set_active;
- }
+ panthor_devfreq_suspend(ptdev);
clk_disable_unprepare(ptdev->clks.coregroup);
clk_disable_unprepare(ptdev->clks.stacks);
clk_disable_unprepare(ptdev->clks.core);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
return 0;
-
-err_set_active:
- /* If something failed and we have to revert back to an
- * active state, we also need to clear the MMIO userspace
- * mappings, so any dumb pages that were mapped while we
- * were trying to suspend gets invalidated.
- */
- mutex_lock(&ptdev->pm.mmio_lock);
- atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
- unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
- DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
- mutex_unlock(&ptdev->pm.mmio_lock);
- return ret;
}
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index 0e68f5a70d20..da6574021664 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -9,6 +9,7 @@
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
@@ -156,6 +157,17 @@ struct panthor_device {
/** @pending: Set to true if a reset is pending. */
atomic_t pending;
+
+ /**
+ * @fast: True if the post_reset logic can proceed with a fast reset.
+ *
+ * A fast reset is just a reset where the driver doesn't reload the FW sections.
+ *
+ * Any time the firmware is properly suspended, a fast reset can take place.
+ * On the other hand, if the halt operation failed, the driver will reload
+ * all FW sections to make sure we start from a fresh state.
+ */
+ bool fast;
} reset;
/** @pm: Power management related data. */
@@ -180,6 +192,9 @@ struct panthor_device {
* is suspended.
*/
struct page *dummy_latest_flush;
+
+ /** @recovery_needed: True when a resume attempt failed. */
+ atomic_t recovery_needed;
} pm;
/** @profile_mask: User-set profiling flags for job accounting. */
@@ -243,6 +258,28 @@ int panthor_device_mmap_io(struct panthor_device *ptdev,
int panthor_device_resume(struct device *dev);
int panthor_device_suspend(struct device *dev);
+static inline int panthor_device_resume_and_get(struct panthor_device *ptdev)
+{
+ int ret = pm_runtime_resume_and_get(ptdev->base.dev);
+
+ /* If the resume failed, we need to clear the runtime_error, which
+ * can done by forcing the RPM state to suspended. If multiple
+ * threads called panthor_device_resume_and_get(), we only want
+ * one of them to update the state, hence the cmpxchg. Note that a
+ * thread might enter panthor_device_resume_and_get() and call
+ * pm_runtime_resume_and_get() after another thread had attempted
+ * to resume and failed. This means we will end up with an error
+ * without even attempting a resume ourselves. The only risk here
+ * is to report an error when the second resume attempt might have
+ * succeeded. Given resume errors are not expected, this is probably
+ * something we can live with.
+ */
+ if (ret && atomic_cmpxchg(&ptdev->pm.recovery_needed, 1, 0) == 1)
+ pm_runtime_set_suspended(ptdev->base.dev);
+
+ return ret;
+}
+
enum drm_panthor_exception_type {
DRM_PANTHOR_EXCEPTION_OK = 0x00,
DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04,
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 0b3fbee3d37a..d5dcd3d1b33a 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -763,7 +763,7 @@ static int panthor_query_timestamp_info(struct panthor_device *ptdev,
{
int ret;
- ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ ret = panthor_device_resume_and_get(ptdev);
if (ret)
return ret;
@@ -1493,6 +1493,7 @@ static void panthor_debugfs_init(struct drm_minor *minor)
* - 1.1 - adds DEV_QUERY_TIMESTAMP_INFO query
* - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
* - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
+ * - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
@@ -1505,9 +1506,8 @@ static const struct drm_driver panthor_drm_driver = {
.fops = &panthor_drm_driver_fops,
.name = "panthor",
.desc = "Panthor DRM driver",
- .date = "20230801",
.major = 1,
- .minor = 2,
+ .minor = 3,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index ecca5565ce41..68eb4fb4d3a8 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -12,6 +12,7 @@
#include <linux/iosys-map.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
@@ -91,26 +92,26 @@ enum panthor_fw_binary_entry_type {
#define CSF_FW_BINARY_ENTRY_UPDATE BIT(30)
#define CSF_FW_BINARY_ENTRY_OPTIONAL BIT(31)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_RD BIT(0)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_WR BIT(1)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_EX BIT(2)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_NONE (0 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED (1 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED_COHERENT (3 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK GENMASK(4, 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_PROT BIT(5)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED BIT(30)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO BIT(31)
-
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS \
- (CSF_FW_BINARY_IFACE_ENTRY_RD_RD | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_WR | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_EX | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_PROT | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD BIT(0)
+#define CSF_FW_BINARY_IFACE_ENTRY_WR BIT(1)
+#define CSF_FW_BINARY_IFACE_ENTRY_EX BIT(2)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_NONE (0 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED (1 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED_COHERENT (3 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK GENMASK(4, 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_PROT BIT(5)
+#define CSF_FW_BINARY_IFACE_ENTRY_SHARED BIT(30)
+#define CSF_FW_BINARY_IFACE_ENTRY_ZERO BIT(31)
+
+#define CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS \
+ (CSF_FW_BINARY_IFACE_ENTRY_RD | \
+ CSF_FW_BINARY_IFACE_ENTRY_WR | \
+ CSF_FW_BINARY_IFACE_ENTRY_EX | \
+ CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK | \
+ CSF_FW_BINARY_IFACE_ENTRY_PROT | \
+ CSF_FW_BINARY_IFACE_ENTRY_SHARED | \
+ CSF_FW_BINARY_IFACE_ENTRY_ZERO)
/**
* struct panthor_fw_binary_section_entry_hdr - Describes a section of FW binary
@@ -262,17 +263,6 @@ struct panthor_fw {
/** @booted: True is the FW is booted */
bool booted;
- /**
- * @fast_reset: True if the post_reset logic can proceed with a fast reset.
- *
- * A fast reset is just a reset where the driver doesn't reload the FW sections.
- *
- * Any time the firmware is properly suspended, a fast reset can take place.
- * On the other hand, if the halt operation failed, the driver will reload
- * all sections to make sure we start from a fresh state.
- */
- bool fast_reset;
-
/** @irq: Job irq data. */
struct panthor_irq irq;
};
@@ -413,7 +403,7 @@ static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
int ret;
if (!section->data.size &&
- !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO))
+ !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO))
return;
ret = panthor_kernel_bo_vmap(section->mem);
@@ -421,7 +411,7 @@ static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
return;
memcpy(section->mem->kmap, section->data.buf, section->data.size);
- if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO) {
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO) {
memset(section->mem->kmap + section->data.size, 0,
panthor_kernel_bo_size(section->mem) - section->data.size);
}
@@ -535,20 +525,20 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
return -EINVAL;
}
- if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS) {
+ if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS) {
drm_err(&ptdev->base, "Firmware contains interface with unsupported flags (0x%x)\n",
hdr.flags);
return -EINVAL;
}
- if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_PROT) {
+ if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_PROT) {
drm_warn(&ptdev->base,
"Firmware protected mode entry not be supported, ignoring");
return 0;
}
if (hdr.va.start == CSF_MCU_SHARED_REGION_START &&
- !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED)) {
+ !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED)) {
drm_err(&ptdev->base,
"Interface at 0x%llx must be shared", CSF_MCU_SHARED_REGION_START);
return -EINVAL;
@@ -587,26 +577,26 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
section_size = hdr.va.end - hdr.va.start;
if (section_size) {
- u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK;
+ u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK;
struct panthor_gem_object *bo;
u32 vm_map_flags = 0;
struct sg_table *sgt;
u64 va = hdr.va.start;
- if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_WR))
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_READONLY;
- if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_EX))
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_EX))
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC;
- /* TODO: CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_*_COHERENT are mapped to
+ /* TODO: CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_*_COHERENT are mapped to
* non-cacheable for now. We might want to introduce a new
* IOMMU_xxx flag (or abuse IOMMU_MMIO, which maps to device
* memory and is currently not used by our driver) for
* AS_MEMATTR_AARCH64_SHARED memory, so we can take benefit
* of IO-coherent systems.
*/
- if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED)
+ if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED)
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED;
section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
@@ -619,7 +609,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
if (drm_WARN_ON(&ptdev->base, section->mem->va_node.start != hdr.va.start))
return -EINVAL;
- if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED) {
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED) {
ret = panthor_kernel_bo_vmap(section->mem);
if (ret)
return ret;
@@ -689,7 +679,7 @@ panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload)
list_for_each_entry(section, &ptdev->fw->sections, node) {
struct sg_table *sgt;
- if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_WR))
continue;
panthor_fw_init_section_mem(ptdev, section);
@@ -1089,7 +1079,7 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
/* Make sure we won't be woken up by a ping. */
cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
- ptdev->fw->fast_reset = false;
+ ptdev->reset.fast = false;
if (!on_hang) {
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
@@ -1098,17 +1088,11 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_HALT, 10, 100000) &&
- glb_iface->output->halt_status == PANTHOR_FW_HALT_OK) {
- ptdev->fw->fast_reset = true;
+ status == MCU_STATUS_HALT, 10, 100000)) {
+ ptdev->reset.fast = true;
} else {
drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
}
-
- /* The FW detects 0 -> 1 transitions. Make sure we reset
- * the HALT bit before the FW is rebooted.
- */
- panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
}
panthor_job_irq_suspend(&ptdev->fw->irq);
@@ -1130,41 +1114,30 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
if (ret)
return ret;
- /* If this is a fast reset, try to start the MCU without reloading
- * the FW sections. If it fails, go for a full reset.
- */
- if (ptdev->fw->fast_reset) {
- ret = panthor_fw_start(ptdev);
- if (!ret)
- goto out;
-
- /* Forcibly reset the MCU and force a slow reset, so we get a
- * fresh boot on the next panthor_fw_start() call.
+ if (!ptdev->reset.fast) {
+ /* On a slow reset, reload all sections, including RO ones.
+ * We're not supposed to end up here anyway, let's just assume
+ * the overhead of reloading everything is acceptable.
*/
- panthor_fw_stop(ptdev);
- ptdev->fw->fast_reset = false;
- drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset");
+ panthor_reload_fw_sections(ptdev, true);
+ } else {
+ /* The FW detects 0 -> 1 transitions. Make sure we reset
+ * the HALT bit before the FW is rebooted.
+ * This is not needed on a slow reset because FW sections are
+ * re-initialized.
+ */
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
- ret = panthor_vm_flush_all(ptdev->fw->vm);
- if (ret) {
- drm_err(&ptdev->base, "FW slow reset failed (couldn't flush FW's AS l2cache)");
- return ret;
- }
+ panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
}
- /* Reload all sections, including RO ones. We're not supposed
- * to end up here anyway, let's just assume the overhead of
- * reloading everything is acceptable.
- */
- panthor_reload_fw_sections(ptdev, true);
-
ret = panthor_fw_start(ptdev);
if (ret) {
- drm_err(&ptdev->base, "FW slow reset failed (couldn't start the FW )");
+ drm_err(&ptdev->base, "FW %s reset failed",
+ ptdev->reset.fast ? "fast" : "slow");
return ret;
}
-out:
/* We must re-initialize the global interface even on fast-reset. */
panthor_fw_init_global_iface(ptdev);
return 0;
@@ -1188,11 +1161,13 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
- /* Make sure the IRQ handler can be called after that point. */
- if (ptdev->fw->irq.irq)
- panthor_job_irq_suspend(&ptdev->fw->irq);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) {
+ /* Make sure the IRQ handler cannot be called after that point. */
+ if (ptdev->fw->irq.irq)
+ panthor_job_irq_suspend(&ptdev->fw->irq);
- panthor_fw_stop(ptdev);
+ panthor_fw_stop(ptdev);
+ }
list_for_each_entry(section, &ptdev->fw->sections, node)
panthor_kernel_bo_destroy(section->mem);
@@ -1205,7 +1180,8 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
panthor_vm_put(ptdev->fw->vm);
ptdev->fw->vm = NULL;
- panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
+ panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
}
/**
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 2d3529a0b156..671049020afa 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -77,6 +77,12 @@ static const struct panthor_model gpu_models[] = {
GPU_IRQ_RESET_COMPLETED | \
GPU_IRQ_CLEAN_CACHES_COMPLETED)
+static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
+{
+ gpu_write(ptdev, GPU_COHERENCY_PROTOCOL,
+ ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
+}
+
static void panthor_gpu_init_info(struct panthor_device *ptdev)
{
const struct panthor_model *model;
@@ -174,7 +180,8 @@ void panthor_gpu_unplug(struct panthor_device *ptdev)
unsigned long flags;
/* Make sure the IRQ handler is not running after that point. */
- panthor_gpu_irq_suspend(&ptdev->gpu->irq);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
+ panthor_gpu_irq_suspend(&ptdev->gpu->irq);
/* Wake-up all waiters. */
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
@@ -365,6 +372,9 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
hweight64(ptdev->gpu_info.shader_present));
}
+ /* Set the desired coherency mode before the power up of L2 */
+ panthor_gpu_coherency_set(ptdev);
+
return panthor_gpu_power_on(ptdev, L2, 1, 20000);
}
@@ -460,11 +470,12 @@ int panthor_gpu_soft_reset(struct panthor_device *ptdev)
*/
void panthor_gpu_suspend(struct panthor_device *ptdev)
{
- /*
- * It may be preferable to simply power down the L2, but for now just
- * soft-reset which will leave the L2 powered down.
- */
- panthor_gpu_soft_reset(ptdev);
+ /* On a fast reset, simply power down the L2. */
+ if (!ptdev->reset.fast)
+ panthor_gpu_soft_reset(ptdev);
+ else
+ panthor_gpu_power_off(ptdev, L2, 1, 20000);
+
panthor_gpu_irq_suspend(&ptdev->gpu->irq);
}
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index a49132f3778b..c39e3eb1c15d 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1941,7 +1941,7 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
return pool;
}
-static u64 mair_to_memattr(u64 mair)
+static u64 mair_to_memattr(u64 mair, bool coherent)
{
u64 memattr = 0;
u32 i;
@@ -1960,14 +1960,21 @@ static u64 mair_to_memattr(u64 mair)
AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
} else {
- /* Use SH_CPU_INNER mode so SH_IS, which is used when
- * IOMMU_CACHE is set, actually maps to the standard
- * definition of inner-shareable and not Mali's
- * internal-shareable mode.
- */
out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
- AS_MEMATTR_AARCH64_SH_CPU_INNER |
AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ /* Use SH_MIDGARD_INNER mode when device isn't coherent,
+ * so SH_IS, which is used when IOMMU_CACHE is set, maps
+ * to Mali's internal-shareable mode. As per the Mali
+ * Spec, inner and outer-shareable modes aren't allowed
+ * for WB memory when coherency is disabled.
+ * Use SH_CPU_INNER mode when coherency is enabled, so
+ * that SH_IS actually maps to the standard definition of
+ * inner-shareable.
+ */
+ if (!coherent)
+ out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
+ else
+ out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
}
memattr |= (u64)out_attr << (8 * i);
@@ -2339,7 +2346,7 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
goto err_sched_fini;
mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
- vm->memattr = mair_to_memattr(mair);
+ vm->memattr = mair_to_memattr(mair, ptdev->coherent);
mutex_lock(&ptdev->mmu->vm.lock);
list_add_tail(&vm->node, &ptdev->mmu->vm.list);
@@ -2665,7 +2672,8 @@ int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm
*/
void panthor_mmu_unplug(struct panthor_device *ptdev)
{
- panthor_mmu_irq_suspend(&ptdev->mmu->irq);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
+ panthor_mmu_irq_suspend(&ptdev->mmu->irq);
mutex_lock(&ptdev->mmu->as.slots_lock);
for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index ef4bec7ff9c7..77b184c3fb0c 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -611,6 +611,16 @@ struct panthor_group {
bool timedout;
/**
+ * @innocent: True when the group becomes unusable because the group suspension
+ * failed during a reset.
+ *
+ * Sometimes the FW was put in a bad state by other groups, causing the group
+ * suspension happening in the reset path to fail. In that case, we consider the
+ * group innocent.
+ */
+ bool innocent;
+
+ /**
* @syncobjs: Pool of per-queue synchronization objects.
*
* One sync object per queue. The position of the sync object is
@@ -2354,7 +2364,7 @@ static void tick_work(struct work_struct *work)
if (!drm_dev_enter(&ptdev->base, &cookie))
return;
- ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ ret = panthor_device_resume_and_get(ptdev);
if (drm_WARN_ON(&ptdev->base, ret))
goto out_dev_exit;
@@ -2690,6 +2700,12 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
u32 csg_id = ffs(slot_mask) - 1;
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ /* If the group was still usable before that point, we consider
+ * it innocent.
+ */
+ if (group_can_run(csg_slot->group))
+ csg_slot->group->innocent = true;
+
/* We consider group suspension failures as fatal and flag the
* group as unusable by setting timedout=true.
*/
@@ -3115,7 +3131,7 @@ queue_run_job(struct drm_sched_job *sched_job)
return dma_fence_get(job->done_fence);
}
- ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ ret = panthor_device_resume_and_get(ptdev);
if (drm_WARN_ON(&ptdev->base, ret))
return ERR_PTR(ret);
@@ -3570,6 +3586,8 @@ int panthor_group_get_state(struct panthor_file *pfile,
get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
get_state->fatal_queues = group->fatal_queues;
}
+ if (group->innocent)
+ get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT;
mutex_unlock(&sched->lock);
group_put(group);
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 13362150b9c6..56ff6a3fb483 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -45,9 +45,9 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -220,7 +220,6 @@ static const struct drm_driver pl111_drm_driver = {
.fops = &drm_fops,
.name = "pl111",
.desc = DRIVER_DESC,
- .date = "20170317",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 98a148bea628..69427eb8bed2 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -6,6 +6,7 @@ config DRM_QXL
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select DRM_EXEC
select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 21f752644242..417061ae59eb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -34,9 +34,9 @@
#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_file.h>
@@ -300,7 +300,6 @@ static struct drm_driver qxl_driver = {
.num_ioctls = ARRAY_SIZE(qxl_ioctls),
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = 0,
.minor = 1,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 32069acd93f8..cc02b5f10ad9 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,12 +38,12 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_dev.h"
@@ -54,7 +54,6 @@ struct iosys_map;
#define DRIVER_NAME "qxl"
#define DRIVER_DESC "RH QXL"
-#define DRIVER_DATE "20120117"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
@@ -101,7 +100,8 @@ struct qxl_gem {
};
struct qxl_bo_list {
- struct ttm_validate_buffer tv;
+ struct qxl_bo *bo;
+ struct list_head list;
};
struct qxl_crtc {
@@ -150,7 +150,7 @@ struct qxl_release {
struct qxl_bo *release_bo;
uint32_t release_offset;
uint32_t surface_release_id;
- struct ww_acquire_ctx ticket;
+ struct drm_exec exec;
struct list_head bos;
};
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 368d26da0d6a..05204a6a3fa8 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -121,13 +121,11 @@ qxl_release_free_list(struct qxl_release *release)
{
while (!list_empty(&release->bos)) {
struct qxl_bo_list *entry;
- struct qxl_bo *bo;
entry = container_of(release->bos.next,
- struct qxl_bo_list, tv.head);
- bo = to_qxl_bo(entry->tv.bo);
- qxl_bo_unref(&bo);
- list_del(&entry->tv.head);
+ struct qxl_bo_list, list);
+ qxl_bo_unref(&entry->bo);
+ list_del(&entry->list);
kfree(entry);
}
release->release_bo = NULL;
@@ -172,8 +170,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
{
struct qxl_bo_list *entry;
- list_for_each_entry(entry, &release->bos, tv.head) {
- if (entry->tv.bo == &bo->tbo)
+ list_for_each_entry(entry, &release->bos, list) {
+ if (entry->bo == bo)
return 0;
}
@@ -182,9 +180,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
return -ENOMEM;
qxl_bo_ref(bo);
- entry->tv.bo = &bo->tbo;
- entry->tv.num_shared = 0;
- list_add_tail(&entry->tv.head, &release->bos);
+ entry->bo = bo;
+ list_add_tail(&entry->list, &release->bos);
return 0;
}
@@ -221,21 +218,28 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos))
return 0;
- ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL);
- if (ret)
- return ret;
-
- list_for_each_entry(entry, &release->bos, tv.head) {
- struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
-
- ret = qxl_release_validate_bo(bo);
- if (ret) {
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
- return ret;
+ drm_exec_init(&release->exec, no_intr ? 0 :
+ DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&release->exec) {
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = drm_exec_prepare_obj(&release->exec,
+ &entry->bo->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(&release->exec);
+ if (ret)
+ goto error;
}
}
+
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = qxl_release_validate_bo(entry->bo);
+ if (ret)
+ goto error;
+ }
return 0;
+error:
+ drm_exec_fini(&release->exec);
+ return ret;
}
void qxl_release_backoff_reserve_list(struct qxl_release *release)
@@ -245,7 +249,7 @@ void qxl_release_backoff_reserve_list(struct qxl_release *release)
if (list_is_singular(&release->bos))
return;
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+ drm_exec_fini(&release->exec);
}
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
@@ -404,18 +408,18 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
- struct ttm_buffer_object *bo;
struct ttm_device *bdev;
- struct ttm_validate_buffer *entry;
+ struct qxl_bo_list *entry;
struct qxl_device *qdev;
+ struct qxl_bo *bo;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos) || list_empty(&release->bos))
return;
- bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
+ bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
+ bdev = bo->tbo.bdev;
qdev = container_of(bdev, struct qxl_device, mman.bdev);
/*
@@ -426,14 +430,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- list_for_each_entry(entry, &release->bos, head) {
+ list_for_each_entry(entry, &release->bos, list) {
bo = entry->bo;
- dma_resv_add_fence(bo->base.resv, &release->base,
+ dma_resv_add_fence(bo->tbo.base.resv, &release->base,
DMA_RESV_USAGE_READ);
- ttm_bo_move_to_lru_tail_unlocked(bo);
- dma_resv_unlock(bo->base.resv);
+ ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
}
- ww_acquire_fini(&release->ticket);
+ drm_exec_fini(&release->exec);
}
-
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 9c6c74a75778..f51bace9555d 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -13,6 +13,7 @@ config DRM_RADEON
select DRM_TTM
select DRM_TTM_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
+ select DRM_EXEC
select SND_HDA_COMPONENT if SND_HDA_CORE
select POWER_SUPPLY
select HWMON
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fd8a4513025f..8605c074d9f7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -75,8 +75,8 @@
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_suballoc.h>
@@ -457,7 +457,8 @@ struct radeon_mman {
struct radeon_bo_list {
struct radeon_bo *robj;
- struct ttm_validate_buffer tv;
+ struct list_head list;
+ bool shared;
uint64_t gpu_offset;
unsigned preferred_domains;
unsigned allowed_domains;
@@ -1030,6 +1031,7 @@ struct radeon_cs_parser {
struct radeon_bo_list *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
+ struct drm_exec exec;
/* indices of various chunks */
struct radeon_cs_chunk *chunk_ib;
struct radeon_cs_chunk *chunk_relocs;
@@ -1043,7 +1045,6 @@ struct radeon_cs_parser {
u32 cs_flags;
u32 ring;
s32 priority;
- struct ww_acquire_ctx ticket;
};
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 5b69cc8011b4..8d64ba18572e 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -775,8 +775,10 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port,
if (!dig->pin || dig->pin->id != port)
continue;
*enabled = true;
+ mutex_lock(&connector->eld_mutex);
ret = drm_eld_size(connector->eld);
memcpy(buf, connector->eld, min(max_bytes, ret));
+ mutex_unlock(&connector->eld_mutex);
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a6700d7278bf..64b26bfeafc9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -182,11 +182,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
}
- p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].tv.num_shared = !r->write_domain;
-
- radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
- priority);
+ p->relocs[i].shared = !r->write_domain;
+ radeon_cs_buckets_add(&buckets, &p->relocs[i].list, priority);
}
radeon_cs_buckets_get_list(&buckets, &p->validated);
@@ -197,7 +194,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (need_mmap_lock)
mmap_read_lock(current->mm);
- r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
+ r = radeon_bo_list_validate(p->rdev, &p->exec, &p->validated, p->ring);
if (need_mmap_lock)
mmap_read_unlock(current->mm);
@@ -253,12 +250,11 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
struct radeon_bo_list *reloc;
int r;
- list_for_each_entry(reloc, &p->validated, tv.head) {
+ list_for_each_entry(reloc, &p->validated, list) {
struct dma_resv *resv;
resv = reloc->robj->tbo.base.resv;
- r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
- reloc->tv.num_shared);
+ r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, reloc->shared);
if (r)
return r;
}
@@ -276,6 +272,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
s32 priority = 0;
INIT_LIST_HEAD(&p->validated);
+ drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
if (!cs->num_chunks) {
return 0;
@@ -397,8 +394,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
static int cmp_size_smaller_first(void *priv, const struct list_head *a,
const struct list_head *b)
{
- struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
- struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
+ struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, list);
+ struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, list);
/* Sort A before B if A is smaller. */
if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
@@ -417,11 +414,13 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
-static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
if (!error) {
+ struct radeon_bo_list *reloc;
+
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
* This assures that the smallest buffers are added first
@@ -433,15 +432,17 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
* per frame under memory pressure.
*/
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
-
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- &parser->ib.fence->base);
- } else if (backoff) {
- ttm_eu_backoff_reservation(&parser->ticket,
- &parser->validated);
+ list_for_each_entry(reloc, &parser->validated, list) {
+ dma_resv_add_fence(reloc->robj->tbo.base.resv,
+ &parser->ib.fence->base,
+ reloc->shared ?
+ DMA_RESV_USAGE_READ :
+ DMA_RESV_USAGE_WRITE);
+ }
}
+ drm_exec_fini(&parser->exec);
+
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
struct radeon_bo *bo = parser->relocs[i].robj;
@@ -693,7 +694,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- radeon_cs_parser_fini(&parser, r, false);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
@@ -707,7 +708,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
if (r) {
- radeon_cs_parser_fini(&parser, r, false);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
@@ -724,7 +725,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
}
out:
- radeon_cs_parser_fini(&parser, r, true);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 5e958cc223f4..267f082bc430 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -37,7 +37,7 @@
#include <linux/mmu_notifier.h>
#include <linux/pci.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
@@ -603,7 +603,6 @@ static const struct drm_driver kms_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 02a65971d140..0f3dbffc492d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -43,7 +43,6 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20080528"
/* Interface history:
*
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index bf2d4b16dc2a..f86773f3db20 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -605,33 +605,40 @@ out:
static void radeon_gem_va_update_vm(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
- struct ttm_validate_buffer tv, *entry;
- struct radeon_bo_list *vm_bos;
- struct ww_acquire_ctx ticket;
+ struct radeon_bo_list *vm_bos, *entry;
struct list_head list;
+ struct drm_exec exec;
unsigned domain;
int r;
INIT_LIST_HEAD(&list);
- tv.bo = &bo_va->bo->tbo;
- tv.num_shared = 1;
- list_add(&tv.head, &list);
-
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r)
- goto error_free;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ list_for_each_entry(entry, &list, list) {
+ r = drm_exec_prepare_obj(&exec, &entry->robj->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_cleanup;
+ }
- list_for_each_entry(entry, &list, head) {
- domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
+ r = drm_exec_prepare_obj(&exec, &bo_va->bo->tbo.base, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_cleanup;
+ }
+
+ list_for_each_entry(entry, &list, list) {
+ domain = radeon_mem_type_to_domain(entry->robj->tbo.resource->mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (domain == RADEON_GEM_DOMAIN_CPU)
- goto error_unreserve;
+ goto error_cleanup;
}
mutex_lock(&bo_va->vm->mutex);
@@ -645,10 +652,8 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
-error_unreserve:
- ttm_eu_backoff_reservation(&ticket, &list);
-
-error_free:
+error_cleanup:
+ drm_exec_fini(&exec);
kvfree(vm_bos);
if (r && r != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7672404fdb29..a0fc0801abb0 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -464,23 +464,26 @@ static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
}
int radeon_bo_list_validate(struct radeon_device *rdev,
- struct ww_acquire_ctx *ticket,
+ struct drm_exec *exec,
struct list_head *head, int ring)
{
struct ttm_operation_ctx ctx = { true, false };
struct radeon_bo_list *lobj;
- struct list_head duplicates;
- int r;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
+ int r;
- INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
- if (unlikely(r != 0)) {
- return r;
+ drm_exec_until_all_locked(exec) {
+ list_for_each_entry(lobj, head, list) {
+ r = drm_exec_prepare_obj(exec, &lobj->robj->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(exec);
+ if (unlikely(r && r != -EALREADY))
+ return r;
+ }
}
- list_for_each_entry(lobj, head, tv.head) {
+ list_for_each_entry(lobj, head, list) {
struct radeon_bo *bo = lobj->robj;
if (!bo->tbo.pin_count) {
u32 domain = lobj->preferred_domains;
@@ -519,7 +522,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
domain = lobj->allowed_domains;
goto retry;
}
- ttm_eu_backoff_reservation(ticket, head);
return r;
}
}
@@ -527,11 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
lobj->tiling_flags = bo->tiling_flags;
}
- list_for_each_entry(lobj, &duplicates, tv.head) {
- lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
- lobj->tiling_flags = lobj->robj->tiling_flags;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 39cc87a59a9a..d7bbb52db546 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -152,7 +152,7 @@ extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern int radeon_bo_list_validate(struct radeon_device *rdev,
- struct ww_acquire_ctx *ticket,
+ struct drm_exec *exec,
struct list_head *head, int ring);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
u32 tiling_flags, u32 pitch);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index c38b4d5d6a14..21a5340aefdf 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -142,10 +142,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].robj = vm->page_directory;
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.num_shared = 1;
+ list[0].shared = true;
list[0].tiling_flags = 0;
- list_add(&list[0].tv.head, head);
+ list_add(&list[0].list, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
@@ -154,10 +153,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].robj = vm->page_tables[i].bo;
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.num_shared = 1;
+ list[idx].shared = true;
list[idx].tiling_flags = 0;
- list_add(&list[idx++].tv.head, head);
+ list_add(&list[idx++].list, head);
}
return list;
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
index f9ecc334c024..d948ff3594c4 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
@@ -18,8 +18,8 @@
#include <linux/slab.h>
#include <linux/wait.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -546,6 +546,23 @@ static const struct rcar_du_device_info rcar_du_r8a779g0_info = {
.dsi_clk_mask = BIT(1) | BIT(0),
};
+static const struct rcar_du_device_info rcar_du_r8a779h0_info = {
+ .gen = 4,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_NO_BLENDING,
+ .channels_mask = BIT(0),
+ .routes = {
+ /* R8A779H0 has one MIPI DSI output. */
+ [RCAR_DU_OUTPUT_DSI0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ },
+ .num_rpf = 5,
+ .dsi_clk_mask = BIT(0),
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
@@ -572,6 +589,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a779a0", .data = &rcar_du_r8a779a0_info },
{ .compatible = "renesas,du-r8a779g0", .data = &rcar_du_r8a779g0_info },
+ { .compatible = "renesas,du-r8a779h0", .data = &rcar_du_r8a779h0_info },
{ }
};
@@ -611,7 +629,6 @@ static const struct drm_driver rcar_du_driver = {
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
- .date = "20130110",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
index 2ccd2581f544..068c106e586c 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
@@ -107,10 +107,12 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
*/
rcrtc = rcdu->crtcs;
num_crtcs = rcdu->num_crtcs;
- } else if (rcdu->info->gen >= 3 && rgrp->num_crtcs > 1) {
+ } else if ((rcdu->info->gen == 3 && rgrp->num_crtcs > 1) ||
+ rcdu->info->gen == 4) {
/*
* On Gen3 dot clocks are setup through per-group registers,
* only available when the group has two channels.
+ * On Gen4 the registers are there for single channel too.
*/
rcrtc = &rcdu->crtcs[rgrp->index * 2];
num_crtcs = rgrp->num_crtcs;
@@ -185,11 +187,21 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1;
rcar_du_group_write(rgrp, DORCR, dorcr);
- /* Apply planes to CRTCs association. */
- mutex_lock(&rgrp->lock);
- rcar_du_group_write(rgrp, DPTSR, (rgrp->dptsr_planes << 16) |
- rgrp->dptsr_planes);
- mutex_unlock(&rgrp->lock);
+ /*
+ * DPTSR is used to select the source for the planes of a group. The
+ * first source is chosen by writing 0 to the respective bits, and this
+ * is always the default value of the register. In other words, writing
+ * DPTSR is only needed if the SoC supports choosing the second source.
+ *
+ * The SoCs documentations seems to confirm this, as the DPTSR register
+ * is not documented if only the first source exists on that SoC.
+ */
+ if (rgrp->channels_mask & BIT(1)) {
+ mutex_lock(&rgrp->lock);
+ rcar_du_group_write(rgrp, DPTSR, (rgrp->dptsr_planes << 16) |
+ rgrp->dptsr_planes);
+ mutex_unlock(&rgrp->lock);
+ }
}
/*
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 8180625d5866..3c0c18d5249a 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -587,7 +587,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
for (timeout = 10; timeout > 0; --timeout) {
if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
(rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
- (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
+ (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK_PHY))
break;
usleep_range(1000, 2000);
@@ -1081,6 +1081,8 @@ static const struct rcar_mipi_dsi_device_info v4h_data = {
static const struct of_device_id rcar_mipi_dsi_of_table[] = {
{ .compatible = "renesas,r8a779a0-dsi-csi2-tx", .data = &v3u_data },
{ .compatible = "renesas,r8a779g0-dsi-csi2-tx", .data = &v4h_data },
+ /* DSI in r8a779h0 is identical to r8a779g0 */
+ { .compatible = "renesas,r8a779h0-dsi-csi2-tx", .data = &v4h_data },
{ }
};
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
index f8114d11f2d1..a6b276f1d6ee 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
@@ -142,7 +142,6 @@
#define CLOCKSET1 0x101c
#define CLOCKSET1_LOCK_PHY (1 << 17)
-#define CLOCKSET1_LOCK (1 << 16)
#define CLOCKSET1_CLKSEL (1 << 8)
#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2)
#define CLOCKSET1_CLKINSEL_DIG (1 << 2)
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
index c4c1474d487e..6e7aac6219be 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
@@ -28,7 +28,6 @@
#include "rzg2l_du_vsp.h"
#define DU_MCR0 0x00
-#define DU_MCR0_DPI_OE BIT(0)
#define DU_MCR0_DI_EN BIT(8)
#define DU_DITR0 0x10
@@ -217,14 +216,9 @@ static void rzg2l_du_crtc_put(struct rzg2l_du_crtc *rcrtc)
static void rzg2l_du_start_stop(struct rzg2l_du_crtc *rcrtc, bool start)
{
- struct rzg2l_du_crtc_state *rstate = to_rzg2l_crtc_state(rcrtc->crtc.state);
struct rzg2l_du_device *rcdu = rcrtc->dev;
- u32 val = DU_MCR0_DI_EN;
- if (rstate->outputs & BIT(RZG2L_DU_OUTPUT_DPAD0))
- val |= DU_MCR0_DPI_OE;
-
- writel(start ? val : 0, rcdu->mmio + DU_MCR0);
+ writel(start ? DU_MCR0_DI_EN : 0, rcdu->mmio + DU_MCR0);
}
static void rzg2l_du_crtc_start(struct rzg2l_du_crtc *rcrtc)
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index b069efd8ffc3..cbd9b9841267 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -12,8 +12,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -84,7 +84,6 @@ static const struct drm_driver rzg2l_du_driver = {
.fops = &rzg2l_du_fops,
.name = "rzg2l-du",
.desc = "Renesas RZ/G2L Display Unit",
- .date = "20230410",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
index 339cbaaea0b5..564ab4cb3d37 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/of.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_panel.h>
@@ -24,6 +25,22 @@
static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = {
};
+static enum drm_mode_status
+rzg2l_du_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct rzg2l_du_encoder *renc = to_rzg2l_encoder(encoder);
+
+ if (renc->output == RZG2L_DU_OUTPUT_DPAD0 && mode->clock > 83500)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_encoder_helper_funcs rzg2l_du_encoder_helper_funcs = {
+ .mode_valid = rzg2l_du_encoder_mode_valid,
+};
+
int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
enum rzg2l_du_output output,
struct device_node *enc_node)
@@ -48,6 +65,7 @@ int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
return PTR_ERR(renc);
renc->output = output;
+ drm_encoder_helper_add(&renc->base, &rzg2l_du_encoder_helper_funcs);
/* Attach the bridge to the encoder. */
ret = drm_bridge_attach(&renc->base, bridge, NULL,
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index b99217b4e05d..90c6269ccd29 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -311,11 +311,11 @@ int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu)
dev->mode_config.helper_private = &rzg2l_du_mode_config_helper;
/*
- * The RZ DU uses the VSP1 for memory access, and is limited
- * to frame sizes of 1920x1080.
+ * The RZ DU was designed to support a frame size of 1920x1200 (landscape)
+ * or 1200x1920 (portrait).
*/
dev->mode_config.max_width = 1920;
- dev->mode_config.max_height = 1080;
+ dev->mode_config.max_height = 1920;
rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
index 76ee3e16077c..2f31822b2245 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
@@ -17,8 +17,8 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -107,7 +107,6 @@ static const struct drm_driver shmob_drm_driver = {
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
- .date = "20120424",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 3ac579615749..26c4410b2407 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -11,6 +11,7 @@ config DRM_ROCKCHIP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select DRM_DW_MIPI_DSI2 if ROCKCHIP_DW_MIPI_DSI2
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
@@ -82,6 +83,15 @@ config ROCKCHIP_DW_MIPI_DSI
enable MIPI DSI on RK3288 or RK3399 based SoC, you should
select this option.
+config ROCKCHIP_DW_MIPI_DSI2
+ bool "Rockchip specific extensions for Synopsys DW MIPI DSI2"
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the Synopsys DesignWare DSI2 driver. If you want to
+ enable MIPI DSI on RK3576 or RK3588 based SoC, you should
+ select this option.
+
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 3eab662a5a1d..2b867cebbc12 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -13,6 +13,7 @@ rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI2) += dw-mipi-dsi2-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 546d13f19f9b..0844175c37c5 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -2,7 +2,7 @@
/*
* Rockchip SoC DP (Display Port) interface driver.
*
- * Copyright (C) Fuzhou Rockchip Electronics Co., Ltd.
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Andy Yan <andy.yan@rock-chips.com>
* Yakir Yang <ykk@rock-chips.com>
* Jeff Chen <jeff.chen@rock-chips.com>
@@ -386,7 +386,7 @@ static int rockchip_dp_probe(struct platform_device *pdev)
return -ENODEV;
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV)
return ret;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index ff9d95e2c4d4..b17de83b988b 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
*/
@@ -885,7 +885,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.mute_stream = cdn_dp_audio_mute_stream,
.get_eld = cdn_dp_audio_get_eld,
.hook_plugged_cb = cdn_dp_audio_hook_plugged_cb,
- .no_capture_mute = 1,
};
static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
@@ -896,6 +895,7 @@ static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
.spdif = 1,
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
+ .no_capture_mute = 1,
};
dp->audio_pdev = platform_device_register_data(
@@ -947,9 +947,6 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
{
struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
event_work);
- struct drm_connector *connector = &dp->connector;
- enum drm_connector_status old_status;
-
int ret;
mutex_lock(&dp->lock);
@@ -1009,11 +1006,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
out:
mutex_unlock(&dp->lock);
-
- old_status = connector->status;
- connector->status = connector->funcs->detect(connector, false);
- if (old_status != connector->status)
- drm_kms_helper_hotplug_event(dp->drm_dev);
+ drm_connector_helper_hpd_irq_event(&dp->connector);
}
static int cdn_dp_pd_event(struct notifier_block *nb,
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 8e6e95d269da..17498f576ce7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Chris Zhong <zyw@rock-chips.com>
- * Copyright (C) 2016 ROCKCHIP, Inc.
+ * Copyright (C) Rockchip Electronics Co., Ltd.
*/
#ifndef _CDN_DP_CORE_H
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 33fb4d05c506..924fb1d3ece2 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
index c7780ae3272a..13ed8cbdbafa 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 1b64b6e39cc8..3398160ad75e 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Chris Zhong <zyw@rock-chips.com>
* Nickey Yang <nickey.yang@rock-chips.com>
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
new file mode 100644
index 000000000000..cdd490778756
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2024 Rockchip Electronics Co., Ltd.
+ * Author:
+ * Guochun Huang <hero.huang@rock-chips.com>
+ * Heiko Stuebner <heiko.stuebner@cherry.de>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+
+#include <drm/bridge/dw_mipi_dsi2.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <uapi/linux/videodev2.h>
+
+#include "rockchip_drm_drv.h"
+
+#define PSEC_PER_SEC 1000000000000LL
+
+struct dsigrf_reg {
+ u16 offset;
+ u16 lsb;
+ u16 msb;
+};
+
+enum grf_reg_fields {
+ TXREQCLKHS_EN,
+ GATING_EN,
+ IPI_SHUTDN,
+ IPI_COLORM,
+ IPI_COLOR_DEPTH,
+ IPI_FORMAT,
+ MAX_FIELDS,
+};
+
+#define IPI_DEPTH_5_6_5_BITS 0x02
+#define IPI_DEPTH_6_BITS 0x03
+#define IPI_DEPTH_8_BITS 0x05
+#define IPI_DEPTH_10_BITS 0x06
+
+struct rockchip_dw_dsi2_chip_data {
+ u32 reg;
+ const struct dsigrf_reg *grf_regs;
+ unsigned long long max_bit_rate_per_lane;
+};
+
+struct dw_mipi_dsi2_rockchip {
+ struct device *dev;
+ struct rockchip_encoder encoder;
+ struct regmap *regmap;
+
+ unsigned int lane_mbps; /* per lane */
+ u32 format;
+
+ struct regmap *grf_regmap;
+ struct phy *phy;
+ union phy_configure_opts phy_opts;
+
+ struct dw_mipi_dsi2 *dmd;
+ struct dw_mipi_dsi2_plat_data pdata;
+ const struct rockchip_dw_dsi2_chip_data *cdata;
+};
+
+static inline struct dw_mipi_dsi2_rockchip *to_dsi2(struct drm_encoder *encoder)
+{
+ struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
+
+ return container_of(rkencoder, struct dw_mipi_dsi2_rockchip, encoder);
+}
+
+static void grf_field_write(struct dw_mipi_dsi2_rockchip *dsi2, enum grf_reg_fields index,
+ unsigned int val)
+{
+ const struct dsigrf_reg *field = &dsi2->cdata->grf_regs[index];
+
+ if (!field)
+ return;
+
+ regmap_write(dsi2->grf_regmap, field->offset,
+ (val << field->lsb) | (GENMASK(field->msb, field->lsb) << 16));
+}
+
+static int dw_mipi_dsi2_phy_init(void *priv_data)
+{
+ return 0;
+}
+
+static void dw_mipi_dsi2_phy_power_on(void *priv_data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ int ret;
+
+ ret = phy_set_mode(dsi2->phy, PHY_MODE_MIPI_DPHY);
+ if (ret) {
+ dev_err(dsi2->dev, "Failed to set phy mode: %d\n", ret);
+ return;
+ }
+
+ phy_configure(dsi2->phy, &dsi2->phy_opts);
+ phy_power_on(dsi2->phy);
+}
+
+static void dw_mipi_dsi2_phy_power_off(void *priv_data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+
+ phy_power_off(dsi2->phy);
+}
+
+static int
+dw_mipi_dsi2_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ u64 max_lane_rate, target_phyclk;
+ unsigned int lane_rate_kbps;
+ int bpp;
+
+ max_lane_rate = dsi2->cdata->max_bit_rate_per_lane;
+
+ dsi2->format = format;
+ bpp = mipi_dsi_pixel_format_to_bpp(format);
+ if (bpp < 0) {
+ dev_err(dsi2->dev, "failed to get bpp for pixel format %d\n", format);
+ return bpp;
+ }
+
+ lane_rate_kbps = mode->clock * bpp / lanes;
+
+ /*
+ * Set BW a little larger only in video burst mode in
+ * consideration of the protocol overhead and HS mode
+ * switching to BLLP mode, take 1 / 0.9, since Mbps must
+ * big than bandwidth of RGB
+ */
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ lane_rate_kbps = (lane_rate_kbps * 10) / 9;
+
+ if (lane_rate_kbps > max_lane_rate) {
+ dev_err(dsi2->dev, "DPHY clock frequency is out of range\n");
+ return -ERANGE;
+ }
+
+ dsi2->lane_mbps = lane_rate_kbps / 1000;
+ *lane_mbps = dsi2->lane_mbps;
+
+ if (dsi2->phy) {
+ target_phyclk = DIV_ROUND_CLOSEST_ULL(lane_rate_kbps * lanes * 1000, bpp);
+ phy_mipi_dphy_get_default_config(target_phyclk, bpp, lanes,
+ &dsi2->phy_opts.mipi_dphy);
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_phy_get_iface(void *priv_data, struct dw_mipi_dsi2_phy_iface *iface)
+{
+ /* PPI width is fixed to 16 bits in DCPHY */
+ iface->ppi_width = 16;
+ iface->phy_type = DW_MIPI_DSI2_DPHY;
+}
+
+static int
+dw_mipi_dsi2_phy_get_timing(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi2_phy_timing *timing)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ struct phy_configure_opts_mipi_dphy *cfg = &dsi2->phy_opts.mipi_dphy;
+ unsigned long long tmp, ui;
+ unsigned long long hstx_clk;
+
+ hstx_clk = DIV_ROUND_CLOSEST_ULL(dsi2->lane_mbps * USEC_PER_SEC, 16);
+
+ ui = ALIGN(PSEC_PER_SEC, hstx_clk);
+ do_div(ui, hstx_clk);
+
+ /* PHY_LP2HS_TIME = (TLPX + THS-PREPARE + THS-ZERO) / Tphy_hstx_clk */
+ tmp = cfg->lpx + cfg->hs_prepare + cfg->hs_zero;
+ tmp = DIV_ROUND_CLOSEST_ULL(tmp << 16, ui);
+ timing->data_lp2hs = tmp;
+
+ /* PHY_HS2LP_TIME = (THS-TRAIL + THS-EXIT) / Tphy_hstx_clk */
+ tmp = cfg->hs_trail + cfg->hs_exit;
+ tmp = DIV_ROUND_CLOSEST_ULL(tmp << 16, ui);
+ timing->data_hs2lp = tmp;
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi2_phy_ops dw_mipi_dsi2_rockchip_phy_ops = {
+ .init = dw_mipi_dsi2_phy_init,
+ .power_on = dw_mipi_dsi2_phy_power_on,
+ .power_off = dw_mipi_dsi2_phy_power_off,
+ .get_interface = dw_mipi_dsi2_phy_get_iface,
+ .get_lane_mbps = dw_mipi_dsi2_get_lane_mbps,
+ .get_timing = dw_mipi_dsi2_phy_get_timing,
+};
+
+static void dw_mipi_dsi2_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = to_dsi2(encoder);
+ u32 color_depth;
+
+ switch (dsi2->format) {
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ color_depth = IPI_DEPTH_6_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ color_depth = IPI_DEPTH_5_6_5_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ color_depth = IPI_DEPTH_8_BITS;
+ break;
+ default:
+ /* Should've been caught by atomic_check */
+ WARN_ON(1);
+ return;
+ }
+
+ grf_field_write(dsi2, IPI_COLOR_DEPTH, color_depth);
+}
+
+static int
+dw_mipi_dsi2_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct dw_mipi_dsi2_rockchip *dsi2 = to_dsi2(encoder);
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_display_info *info = &connector->display_info;
+
+ switch (dsi2->format) {
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ s->output_mode = ROCKCHIP_OUT_MODE_P666;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ s->output_mode = ROCKCHIP_OUT_MODE_P565;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (info->num_bus_formats)
+ s->bus_format = info->bus_formats[0];
+ else
+ s->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ s->output_type = DRM_MODE_CONNECTOR_DSI;
+ s->bus_flags = info->bus_flags;
+ s->color_space = V4L2_COLORSPACE_DEFAULT;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+dw_mipi_dsi2_encoder_helper_funcs = {
+ .atomic_enable = dw_mipi_dsi2_encoder_atomic_enable,
+ .atomic_check = dw_mipi_dsi2_encoder_atomic_check,
+};
+
+static int rockchip_dsi2_drm_create_encoder(struct dw_mipi_dsi2_rockchip *dsi2,
+ struct drm_device *drm_dev)
+{
+ struct drm_encoder *encoder = &dsi2->encoder.encoder;
+ int ret;
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+ dsi2->dev->of_node);
+
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
+ if (ret) {
+ dev_err(dsi2->dev, "Failed to initialize encoder with drm\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &dw_mipi_dsi2_encoder_helper_funcs);
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_rockchip_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = rockchip_dsi2_drm_create_encoder(dsi2, drm_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to create drm encoder\n");
+
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dsi2->encoder,
+ dev->of_node, 0, 0);
+
+ ret = dw_mipi_dsi2_bind(dsi2->dmd, &dsi2->encoder.encoder);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to bind\n");
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_rockchip_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = dev_get_drvdata(dev);
+
+ dw_mipi_dsi2_unbind(dsi2->dmd);
+}
+
+static const struct component_ops dw_mipi_dsi2_rockchip_ops = {
+ .bind = dw_mipi_dsi2_rockchip_bind,
+ .unbind = dw_mipi_dsi2_rockchip_unbind,
+};
+
+static int dw_mipi_dsi2_rockchip_host_attach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ int ret;
+
+ ret = component_add(dsi2->dev, &dw_mipi_dsi2_rockchip_ops);
+ if (ret)
+ return dev_err_probe(dsi2->dev, ret, "Failed to register component\n");
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_rockchip_host_detach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+
+ component_del(dsi2->dev, &dw_mipi_dsi2_rockchip_ops);
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi2_host_ops dw_mipi_dsi2_rockchip_host_ops = {
+ .attach = dw_mipi_dsi2_rockchip_host_attach,
+ .detach = dw_mipi_dsi2_rockchip_host_detach,
+};
+
+static const struct regmap_config dw_mipi_dsi2_rockchip_regmap_config = {
+ .name = "dsi2-host",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static int dw_mipi_dsi2_rockchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct rockchip_dw_dsi2_chip_data *cdata =
+ of_device_get_match_data(dev);
+ struct dw_mipi_dsi2_rockchip *dsi2;
+ struct resource *res;
+ void __iomem *base;
+ int i;
+
+ dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
+ if (!dsi2)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "Unable to get dsi registers\n");
+
+ dsi2->regmap = devm_regmap_init_mmio(dev, base, &dw_mipi_dsi2_rockchip_regmap_config);
+ if (IS_ERR(dsi2->regmap))
+ return dev_err_probe(dev, PTR_ERR(dsi2->regmap), "failed to init register map\n");
+
+ i = 0;
+ while (cdata[i].reg) {
+ if (cdata[i].reg == res->start) {
+ dsi2->cdata = &cdata[i];
+ break;
+ }
+
+ i++;
+ }
+
+ if (!dsi2->cdata)
+ return dev_err_probe(dev, -EINVAL, "No dsi-config for %s node\n", np->name);
+
+ dsi2->grf_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(dsi2->grf_regmap))
+ return dev_err_probe(dsi2->dev, PTR_ERR(dsi2->grf_regmap), "Unable to get grf\n");
+
+ dsi2->phy = devm_phy_optional_get(dev, "dcphy");
+ if (IS_ERR(dsi2->phy))
+ return dev_err_probe(dev, PTR_ERR(dsi2->phy), "failed to get mipi phy\n");
+
+ dsi2->dev = dev;
+ dsi2->pdata.regmap = dsi2->regmap;
+ dsi2->pdata.max_data_lanes = 4;
+ dsi2->pdata.phy_ops = &dw_mipi_dsi2_rockchip_phy_ops;
+ dsi2->pdata.host_ops = &dw_mipi_dsi2_rockchip_host_ops;
+ dsi2->pdata.priv_data = dsi2;
+ platform_set_drvdata(pdev, dsi2);
+
+ dsi2->dmd = dw_mipi_dsi2_probe(pdev, &dsi2->pdata);
+ if (IS_ERR(dsi2->dmd))
+ return dev_err_probe(dev, PTR_ERR(dsi2->dmd), "Failed to probe dw_mipi_dsi2\n");
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_rockchip_remove(struct platform_device *pdev)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = platform_get_drvdata(pdev);
+
+ dw_mipi_dsi2_remove(dsi2->dmd);
+}
+
+static const struct dsigrf_reg rk3588_dsi0_grf_reg_fields[MAX_FIELDS] = {
+ [TXREQCLKHS_EN] = { 0x0000, 11, 11 },
+ [GATING_EN] = { 0x0000, 10, 10 },
+ [IPI_SHUTDN] = { 0x0000, 9, 9 },
+ [IPI_COLORM] = { 0x0000, 8, 8 },
+ [IPI_COLOR_DEPTH] = { 0x0000, 4, 7 },
+ [IPI_FORMAT] = { 0x0000, 0, 3 },
+};
+
+static const struct dsigrf_reg rk3588_dsi1_grf_reg_fields[MAX_FIELDS] = {
+ [TXREQCLKHS_EN] = { 0x0004, 11, 11 },
+ [GATING_EN] = { 0x0004, 10, 10 },
+ [IPI_SHUTDN] = { 0x0004, 9, 9 },
+ [IPI_COLORM] = { 0x0004, 8, 8 },
+ [IPI_COLOR_DEPTH] = { 0x0004, 4, 7 },
+ [IPI_FORMAT] = { 0x0004, 0, 3 },
+};
+
+static const struct rockchip_dw_dsi2_chip_data rk3588_chip_data[] = {
+ {
+ .reg = 0xfde20000,
+ .grf_regs = rk3588_dsi0_grf_reg_fields,
+ .max_bit_rate_per_lane = 4500000ULL,
+ },
+ {
+ .reg = 0xfde30000,
+ .grf_regs = rk3588_dsi1_grf_reg_fields,
+ .max_bit_rate_per_lane = 4500000ULL,
+ }
+};
+
+static const struct of_device_id dw_mipi_dsi2_rockchip_dt_ids[] = {
+ {
+ .compatible = "rockchip,rk3588-mipi-dsi2",
+ .data = &rk3588_chip_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_mipi_dsi2_rockchip_dt_ids);
+
+struct platform_driver dw_mipi_dsi2_rockchip_driver = {
+ .probe = dw_mipi_dsi2_rockchip_probe,
+ .remove = dw_mipi_dsi2_rockchip_remove,
+ .driver = {
+ .of_match_table = dw_mipi_dsi2_rockchip_dt_ids,
+ .name = "dw-mipi-dsi2",
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 42bda4ffbbbd..e7a6669c46b0 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2014, Rockchip Electronics Co., Ltd.
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index c8b362cc2b95..e498767a0a66 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -28,20 +28,26 @@
#define RK3588_GRF_SOC_CON2 0x0308
#define RK3588_HDMI0_HPD_INT_MSK BIT(13)
#define RK3588_HDMI0_HPD_INT_CLR BIT(12)
+#define RK3588_HDMI1_HPD_INT_MSK BIT(15)
+#define RK3588_HDMI1_HPD_INT_CLR BIT(14)
#define RK3588_GRF_SOC_CON7 0x031c
#define RK3588_SET_HPD_PATH_MASK GENMASK(13, 12)
#define RK3588_GRF_SOC_STATUS1 0x0384
#define RK3588_HDMI0_LEVEL_INT BIT(16)
+#define RK3588_HDMI1_LEVEL_INT BIT(24)
#define RK3588_GRF_VO1_CON3 0x000c
+#define RK3588_GRF_VO1_CON6 0x0018
#define RK3588_SCLIN_MASK BIT(9)
#define RK3588_SDAIN_MASK BIT(10)
#define RK3588_MODE_MASK BIT(11)
#define RK3588_I2S_SEL_MASK BIT(13)
#define RK3588_GRF_VO1_CON9 0x0024
#define RK3588_HDMI0_GRANT_SEL BIT(10)
+#define RK3588_HDMI1_GRANT_SEL BIT(12)
#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
#define HOTPLUG_DEBOUNCE_MS 150
+#define MAX_HDMI_PORT_NUM 2
struct rockchip_hdmi_qp {
struct device *dev;
@@ -53,6 +59,7 @@ struct rockchip_hdmi_qp {
struct phy *phy;
struct gpio_desc *enable_gpio;
struct delayed_work hpd_work;
+ int port_id;
};
static struct rockchip_hdmi_qp *to_rockchip_hdmi_qp(struct drm_encoder *encoder)
@@ -127,20 +134,24 @@ dw_hdmi_qp_rk3588_read_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
u32 val;
regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &val);
+ val &= hdmi->port_id ? RK3588_HDMI1_LEVEL_INT : RK3588_HDMI0_LEVEL_INT;
- return val & RK3588_HDMI0_LEVEL_INT ?
- connector_status_connected : connector_status_disconnected;
+ return val ? connector_status_connected : connector_status_disconnected;
}
static void dw_hdmi_qp_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
{
struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+ u32 val;
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
+ RK3588_HDMI1_HPD_INT_CLR | RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
+ RK3588_HDMI0_HPD_INT_CLR | RK3588_HDMI0_HPD_INT_MSK);
- regmap_write(hdmi->regmap,
- RK3588_GRF_SOC_CON2,
- HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR |
- RK3588_HDMI0_HPD_INT_MSK));
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
}
static const struct dw_hdmi_qp_phy_ops rk3588_hdmi_phy_ops = {
@@ -173,8 +184,12 @@ static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id)
regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat);
if (intr_stat) {
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK,
- RK3588_HDMI0_HPD_INT_MSK);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK,
+ RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK,
+ RK3588_HDMI0_HPD_INT_MSK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_WAKE_THREAD;
}
@@ -191,22 +206,44 @@ static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id)
if (!intr_stat)
return IRQ_NONE;
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
+ RK3588_HDMI1_HPD_INT_CLR);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
+ RK3588_HDMI0_HPD_INT_CLR);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
mod_delayed_work(system_wq, &hdmi->hpd_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
- val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
+ if (hdmi->port_id)
+ val |= HIWORD_UPDATE(0, RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_HANDLED;
}
+struct rockchip_hdmi_qp_cfg {
+ unsigned int num_ports;
+ unsigned int port_ids[MAX_HDMI_PORT_NUM];
+ const struct dw_hdmi_qp_phy_ops *phy_ops;
+};
+
+static const struct rockchip_hdmi_qp_cfg rk3588_hdmi_cfg = {
+ .num_ports = 2,
+ .port_ids = {
+ 0xfde80000,
+ 0xfdea0000,
+ },
+ .phy_ops = &rk3588_hdmi_phy_ops,
+};
+
static const struct of_device_id dw_hdmi_qp_rockchip_dt_ids[] = {
{ .compatible = "rockchip,rk3588-dw-hdmi-qp",
- .data = &rk3588_hdmi_phy_ops },
+ .data = &rk3588_hdmi_cfg },
{},
};
MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids);
@@ -214,17 +251,15 @@ MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids);
static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
void *data)
{
- static const char * const clk_names[] = {
- "pclk", "earc", "aud", "hdp", "hclk_vo1",
- "ref" /* keep "ref" last */
- };
struct platform_device *pdev = to_platform_device(dev);
+ const struct rockchip_hdmi_qp_cfg *cfg;
struct dw_hdmi_qp_plat_data plat_data;
struct drm_device *drm = data;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct rockchip_hdmi_qp *hdmi;
- struct clk *clk;
+ struct resource *res;
+ struct clk_bulk_data *clks;
int ret, irq, i;
u32 val;
@@ -235,12 +270,31 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
if (!hdmi)
return -ENOMEM;
- plat_data.phy_ops = of_device_get_match_data(dev);
- if (!plat_data.phy_ops)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
return -ENODEV;
- plat_data.phy_data = hdmi;
hdmi->dev = &pdev->dev;
+ hdmi->port_id = -ENODEV;
+
+ /* Identify port ID by matching base IO address */
+ for (i = 0; i < cfg->num_ports; i++) {
+ if (res->start == cfg->port_ids[i]) {
+ hdmi->port_id = i;
+ break;
+ }
+ }
+ if (hdmi->port_id < 0) {
+ drm_err(hdmi, "Failed to match HDMI port ID\n");
+ return hdmi->port_id;
+ }
+
+ plat_data.phy_ops = cfg->phy_ops;
+ plat_data.phy_data = hdmi;
encoder = &hdmi->encoder.encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -270,18 +324,22 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->vo_regmap);
}
- for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
- clk = devm_clk_get_enabled(hdmi->dev, clk_names[i]);
+ ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks);
+ if (ret < 0) {
+ drm_err(hdmi, "Failed to get clocks: %d\n", ret);
+ return ret;
+ }
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "Failed to get %s clock: %d\n",
- clk_names[i], ret);
- return ret;
+ for (i = 0; i < ret; i++) {
+ if (!strcmp(clks[i].id, "ref")) {
+ hdmi->ref_clk = clks[1].clk;
+ break;
}
}
- hdmi->ref_clk = clk;
+ if (!hdmi->ref_clk) {
+ drm_err(hdmi, "Missing ref clock\n");
+ return -EINVAL;
+ }
hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable",
GPIOD_OUT_HIGH);
@@ -303,17 +361,26 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
- regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val);
+ regmap_write(hdmi->vo_regmap,
+ hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
+ val);
val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
RK3588_SET_HPD_PATH_MASK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
- RK3588_HDMI0_GRANT_SEL);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
+ RK3588_HDMI1_GRANT_SEL);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
+ RK3588_HDMI0_GRANT_SEL);
regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
INIT_DELAYED_WORK(&hdmi->hpd_work, dw_hdmi_qp_rk3588_hpd_work);
@@ -391,14 +458,20 @@ static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
- regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val);
+ regmap_write(hdmi->vo_regmap,
+ hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
+ val);
val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
RK3588_SET_HPD_PATH_MASK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
- RK3588_HDMI0_GRANT_SEL);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
+ RK3588_HDMI1_GRANT_SEL);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
+ RK3588_HDMI0_GRANT_SEL);
regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
dw_hdmi_qp_resume(dev, hdmi->hdmi);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index b58e2a29294b..898d90155057 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
* Yakir Yang <ykk@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
index a7edf3559e60..8b7ef3fac485 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.h
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
* Yakir Yang <ykk@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index b0fc8ace2e41..403336397214 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.h b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
index 39a31c62a428..c3598ba7428c 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.h
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 32d8394c4c49..439edc165ff6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*
* based on exynos_drm_drv.c
@@ -17,7 +17,7 @@
#include <linux/console.h>
#include <linux/iommu.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -39,7 +39,6 @@
#define DRIVER_NAME "rockchip"
#define DRIVER_DESC "RockChip Soc DRM"
-#define DRIVER_DATE "20140818"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -235,7 +234,6 @@ static const struct drm_driver rockchip_drm_driver = {
.fops = &rockchip_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -488,8 +486,7 @@ static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- if (drm)
- drm_atomic_helper_shutdown(drm);
+ drm_atomic_helper_shutdown(drm);
}
static const struct of_device_id rockchip_drm_dt_ids[] = {
@@ -536,6 +533,8 @@ static int __init rockchip_drm_init(void)
CONFIG_ROCKCHIP_DW_HDMI_QP);
ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
CONFIG_ROCKCHIP_DW_MIPI_DSI);
+ ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi2_rockchip_driver,
+ CONFIG_ROCKCHIP_DW_MIPI_DSI2);
ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver,
CONFIG_ROCKCHIP_RK3066_HDMI);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 24b4ce5ceaf1..c183e82a42a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*
* based on exynos_drm_drv.h
@@ -90,6 +90,7 @@ extern struct platform_driver cdn_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_rockchip_driver;
+extern struct platform_driver dw_mipi_dsi2_rockchip_driver;
extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
extern struct platform_driver rockchip_lvds_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index cfe8b793d344..dcc1f07632c3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
index bae4e079dfb1..5179026b12d6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 93ed841f5dce..6330b883efc3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 72f59ac6d258..cdeae36b91a1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 69900138295b..57747f1cff26 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 0cf512cc1614..f04c9731ae7b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 9873172e3fd3..17a98845fd31 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -24,16 +24,17 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <uapi/linux/videodev2.h>
#include <dt-bindings/soc/rockchip,vop2.h>
-#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_vop2.h"
#include "rockchip_rgb.h"
@@ -186,6 +187,7 @@ struct vop2 {
*/
u32 registered_num_wins;
+ struct resource *res;
void __iomem *regs;
struct regmap *map;
@@ -237,6 +239,37 @@ struct vop2 {
#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0)
+/*
+ * bus-format types.
+ */
+struct drm_bus_format_enum_list {
+ int type;
+ const char *name;
+};
+
+static const struct drm_bus_format_enum_list drm_bus_format_enum_list[] = {
+ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+ { MEDIA_BUS_FMT_RGB565_1X16, "RGB565_1X16" },
+ { MEDIA_BUS_FMT_RGB666_1X18, "RGB666_1X18" },
+ { MEDIA_BUS_FMT_RGB666_1X24_CPADHI, "RGB666_1X24_CPADHI" },
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, "RGB666_1X7X3_SPWG" },
+ { MEDIA_BUS_FMT_YUV8_1X24, "YUV8_1X24" },
+ { MEDIA_BUS_FMT_UYYVYY8_0_5X24, "UYYVYY8_0_5X24" },
+ { MEDIA_BUS_FMT_YUV10_1X30, "YUV10_1X30" },
+ { MEDIA_BUS_FMT_UYYVYY10_0_5X30, "UYYVYY10_0_5X30" },
+ { MEDIA_BUS_FMT_RGB888_3X8, "RGB888_3X8" },
+ { MEDIA_BUS_FMT_RGB888_1X24, "RGB888_1X24" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, "RGB888_1X7X4_SPWG" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, "RGB888_1X7X4_JEIDA" },
+ { MEDIA_BUS_FMT_UYVY8_2X8, "UYVY8_2X8" },
+ { MEDIA_BUS_FMT_YUYV8_1X16, "YUYV8_1X16" },
+ { MEDIA_BUS_FMT_UYVY8_1X16, "UYVY8_1X16" },
+ { MEDIA_BUS_FMT_RGB101010_1X30, "RGB101010_1X30" },
+ { MEDIA_BUS_FMT_YUYV10_1X20, "YUYV10_1X20" },
+};
+
+static DRM_ENUM_NAME_FN(drm_get_bus_format_name, drm_bus_format_enum_list)
+
static const struct regmap_config vop2_regmap_config;
static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
@@ -278,6 +311,15 @@ static u32 vop2_readl(struct vop2 *vop2, u32 offset)
return val;
}
+static u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset)
+{
+ u32 val;
+
+ regmap_read(vp->vop2->map, vp->data->offset + offset, &val);
+
+ return val;
+}
+
static void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v)
{
regmap_field_write(win->reg[reg], v);
@@ -550,6 +592,25 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
if (modifier == DRM_FORMAT_MOD_INVALID)
return false;
+ if (vop2->data->soc_id == 3568 || vop2->data->soc_id == 3566) {
+ if (vop2_cluster_window(win)) {
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(vop2->drm,
+ "Cluster window only supports format with afbc\n");
+ return false;
+ }
+ }
+ }
+
+ if (format == DRM_FORMAT_XRGB2101010 || format == DRM_FORMAT_XBGR2101010) {
+ if (vop2->data->soc_id == 3588) {
+ if (!rockchip_afbc(plane, modifier)) {
+ drm_dbg_kms(vop2->drm, "Only support 32 bpp format with afbc\n");
+ return false;
+ }
+ }
+ }
+
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
@@ -998,6 +1059,67 @@ static void vop2_disable(struct vop2 *vop2)
clk_disable_unprepare(vop2->hclk);
}
+static bool vop2_vp_dsp_lut_is_enabled(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ return dsp_ctrl & RK3568_VP_DSP_CTRL__DSP_LUT_EN;
+}
+
+static void vop2_vp_dsp_lut_disable(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ dsp_ctrl &= ~RK3568_VP_DSP_CTRL__DSP_LUT_EN;
+ vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+}
+
+static bool vop2_vp_dsp_lut_poll_disabled(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl;
+ int ret = readx_poll_timeout(vop2_vp_dsp_lut_is_enabled, vp, dsp_ctrl,
+ !dsp_ctrl, 5, 30 * 1000);
+ if (ret) {
+ drm_err(vp->vop2->drm, "display LUT RAM enable timeout!\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void vop2_vp_dsp_lut_enable(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_LUT_EN;
+ vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+}
+
+static void vop2_vp_dsp_lut_update_enable(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ dsp_ctrl |= RK3588_VP_DSP_CTRL__GAMMA_UPDATE_EN;
+ vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+}
+
+static inline bool vop2_supports_seamless_gamma_lut_update(struct vop2 *vop2)
+{
+ return (vop2->data->soc_id != 3566 && vop2->data->soc_id != 3568);
+}
+
+static bool vop2_gamma_lut_in_use(struct vop2 *vop2, struct vop2_video_port *vp)
+{
+ const int nr_vps = vop2->data->nr_vps;
+ int gamma_en_vp_id;
+
+ for (gamma_en_vp_id = 0; gamma_en_vp_id < nr_vps; gamma_en_vp_id++)
+ if (vop2_vp_dsp_lut_is_enabled(&vop2->vps[gamma_en_vp_id]))
+ break;
+
+ return gamma_en_vp_id != nr_vps && gamma_en_vp_id != vp->id;
+}
+
static void vop2_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1271,8 +1393,9 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_w = drm_rect_width(dest);
if (dest->x1 + dsp_w > adjusted_mode->hdisplay) {
- drm_err(vop2->drm, "vp%d %s dest->x1[%d] + dsp_w[%d] exceed mode hdisplay[%d]\n",
- vp->id, win->data->name, dest->x1, dsp_w, adjusted_mode->hdisplay);
+ drm_dbg_kms(vop2->drm,
+ "vp%d %s dest->x1[%d] + dsp_w[%d] exceed mode hdisplay[%d]\n",
+ vp->id, win->data->name, dest->x1, dsp_w, adjusted_mode->hdisplay);
dsp_w = adjusted_mode->hdisplay - dest->x1;
if (dsp_w < 4)
dsp_w = 4;
@@ -1282,8 +1405,9 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_h = drm_rect_height(dest);
if (dest->y1 + dsp_h > adjusted_mode->vdisplay) {
- drm_err(vop2->drm, "vp%d %s dest->y1[%d] + dsp_h[%d] exceed mode vdisplay[%d]\n",
- vp->id, win->data->name, dest->y1, dsp_h, adjusted_mode->vdisplay);
+ drm_dbg_kms(vop2->drm,
+ "vp%d %s dest->y1[%d] + dsp_h[%d] exceed mode vdisplay[%d]\n",
+ vp->id, win->data->name, dest->y1, dsp_h, adjusted_mode->vdisplay);
dsp_h = adjusted_mode->vdisplay - dest->y1;
if (dsp_h < 4)
dsp_h = 4;
@@ -1296,15 +1420,15 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
*/
if (!(win->data->feature & WIN_FEATURE_AFBDC)) {
if (actual_w > dsp_w && (actual_w & 0xf) == 1) {
- drm_err(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n",
- vp->id, win->data->name, actual_w);
+ drm_dbg_kms(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n",
+ vp->id, win->data->name, actual_w);
actual_w -= 1;
}
}
if (afbc_en && actual_w % 4) {
- drm_err(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n",
- vp->id, win->data->name, actual_w);
+ drm_dbg_kms(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n",
+ vp->id, win->data->name, actual_w);
actual_w = ALIGN_DOWN(actual_w, 4);
}
@@ -1320,20 +1444,28 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
+ if (vop2->data->soc_id > 3568) {
+ vop2_win_write(win, VOP2_WIN_AXI_BUS_ID, win->data->axi_bus_id);
+ vop2_win_write(win, VOP2_WIN_AXI_YRGB_R_ID, win->data->axi_yrgb_r_id);
+ vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id);
+ }
+
if (vop2_cluster_window(win))
vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en);
if (afbc_en) {
- u32 stride;
+ u32 stride, block_w;
+
+ /* the afbc superblock is 16 x 16 or 32 x 8 */
+ block_w = fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 ? 32 : 16;
- /* the afbc superblock is 16 x 16 */
afbc_format = vop2_convert_afbc_format(fb->format->format);
/* Enable color transform for YTR */
if (fb->modifier & AFBC_FORMAT_MOD_YTR)
afbc_format |= (1 << 4);
- afbc_tile_num = ALIGN(actual_w, 16) >> 4;
+ afbc_tile_num = ALIGN(actual_w, block_w) / block_w;
/*
* AFBC pic_vir_width is count by pixel, this is different
@@ -1341,8 +1473,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
*/
stride = (fb->pitches[0] << 3) / bpp;
if ((stride & 0x3f) && (xmirror || rotate_90 || rotate_270))
- drm_err(vop2->drm, "vp%d %s stride[%d] not 64 pixel aligned\n",
- vp->id, win->data->name, stride);
+ drm_dbg_kms(vop2->drm, "vp%d %s stride[%d] not 64 pixel aligned\n",
+ vp->id, win->data->name, stride);
+
+ /* It's for head stride, each head size is 16 byte */
+ stride = ALIGN(stride, block_w) / block_w * 16;
uv_swap = vop2_afbc_uv_swap(fb->format->format);
/*
@@ -1374,7 +1509,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
else
vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 1);
- vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
+ if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
+ vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 1);
+ else
+ vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
+
transform_offset = vop2_afbc_transform_offset(pstate, half_block_en);
vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info);
@@ -1482,6 +1621,77 @@ static bool vop2_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+static void vop2_crtc_write_gamma_lut(struct vop2 *vop2, struct drm_crtc *crtc)
+{
+ const struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ const struct vop2_video_port_data *vp_data = &vop2->data->vp[vp->id];
+ struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+ unsigned int i, bpc = ilog2(vp_data->gamma_lut_len);
+ u32 word;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ word = (drm_color_lut_extract(lut[i].blue, bpc) << (2 * bpc)) |
+ (drm_color_lut_extract(lut[i].green, bpc) << bpc) |
+ drm_color_lut_extract(lut[i].red, bpc);
+
+ writel(word, vop2->lut_regs + i * 4);
+ }
+}
+
+static void vop2_crtc_atomic_set_gamma_seamless(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc)
+{
+ vop2_writel(vop2, RK3568_LUT_PORT_SEL,
+ FIELD_PREP(RK3588_LUT_PORT_SEL__GAMMA_AHB_WRITE_SEL, vp->id));
+ vop2_vp_dsp_lut_enable(vp);
+ vop2_crtc_write_gamma_lut(vop2, crtc);
+ vop2_vp_dsp_lut_update_enable(vp);
+}
+
+static void vop2_crtc_atomic_set_gamma_rk356x(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc)
+{
+ vop2_vp_dsp_lut_disable(vp);
+ vop2_cfg_done(vp);
+ if (!vop2_vp_dsp_lut_poll_disabled(vp))
+ return;
+
+ vop2_writel(vop2, RK3568_LUT_PORT_SEL, vp->id);
+ vop2_crtc_write_gamma_lut(vop2, crtc);
+ vop2_vp_dsp_lut_enable(vp);
+}
+
+static void vop2_crtc_atomic_try_set_gamma(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ if (!vop2->lut_regs)
+ return;
+
+ if (!crtc_state->gamma_lut) {
+ vop2_vp_dsp_lut_disable(vp);
+ return;
+ }
+
+ if (vop2_supports_seamless_gamma_lut_update(vop2))
+ vop2_crtc_atomic_set_gamma_seamless(vop2, vp, crtc);
+ else
+ vop2_crtc_atomic_set_gamma_rk356x(vop2, vp, crtc);
+}
+
+static inline void vop2_crtc_atomic_try_set_gamma_locked(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ vop2_lock(vop2);
+ vop2_crtc_atomic_try_set_gamma(vop2, vp, crtc, crtc_state);
+ vop2_unlock(vop2);
+}
+
static void vop2_dither_setup(struct drm_crtc *crtc, u32 *dsp_ctrl)
{
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
@@ -1721,9 +1931,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
else
dclk_out_rate = v_pixclk >> 2;
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
if (!dclk_rate) {
- drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld KHZ\n",
+ drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n",
dclk_out_rate);
return 0;
}
@@ -1738,9 +1948,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
* dclk_rate = N * dclk_core_rate N = (1,2,4 ),
* we get a little factor here
*/
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
if (!dclk_rate) {
- drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld KHZ\n",
+ drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n",
dclk_out_rate);
return 0;
}
@@ -2057,11 +2267,40 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+ vop2_crtc_atomic_try_set_gamma(vop2, vp, crtc, crtc_state);
+
drm_crtc_vblank_on(crtc);
vop2_unlock(vop2);
}
+static int vop2_crtc_atomic_check_gamma(struct vop2_video_port *vp,
+ struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct vop2 *vop2 = vp->vop2;
+ unsigned int len;
+
+ if (!vp->vop2->lut_regs || !crtc_state->color_mgmt_changed ||
+ !crtc_state->gamma_lut)
+ return 0;
+
+ len = drm_color_lut_size(crtc_state->gamma_lut);
+ if (len != crtc->gamma_size) {
+ drm_dbg(vop2->drm, "Invalid LUT size; got %d, expected %d\n",
+ len, crtc->gamma_size);
+ return -EINVAL;
+ }
+
+ if (!vop2_supports_seamless_gamma_lut_update(vop2) && vop2_gamma_lut_in_use(vop2, vp)) {
+ drm_info(vop2->drm, "Gamma LUT can be enabled for only one CRTC at a time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vop2_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -2069,6 +2308,11 @@ static int vop2_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane *plane;
int nplanes = 0;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ ret = vop2_crtc_atomic_check_gamma(vp, crtc, state, crtc_state);
+ if (ret)
+ return ret;
drm_atomic_crtc_state_for_each_plane(plane, crtc_state)
nplanes++;
@@ -2159,7 +2403,6 @@ static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
{
- u32 offset = (main_win->data->phys_id * 0x10);
struct vop2_alpha_config alpha_config;
struct vop2_alpha alpha;
struct drm_plane_state *bottom_win_pstate;
@@ -2167,6 +2410,7 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
u16 src_glb_alpha_val, dst_glb_alpha_val;
bool premulti_en = false;
bool swap = false;
+ u32 offset = 0;
/* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
bottom_win_pstate = main_win->base.state;
@@ -2185,6 +2429,22 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
vop2_parse_alpha(&alpha_config, &alpha);
alpha.src_color_ctrl.bits.src_dst_swap = swap;
+
+ switch (main_win->data->phys_id) {
+ case ROCKCHIP_VOP2_CLUSTER0:
+ offset = 0x0;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER1:
+ offset = 0x10;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER2:
+ offset = 0x20;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER3:
+ offset = 0x30;
+ break;
+ }
+
vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset,
alpha.src_color_ctrl.val);
vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset,
@@ -2232,6 +2492,12 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
struct vop2_win *win = to_vop2_win(plane);
int zpos = plane->state->normalized_zpos;
+ /*
+ * Need to configure alpha from second layer.
+ */
+ if (zpos == 0)
+ continue;
+
if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
premulti_en = 1;
else
@@ -2308,7 +2574,10 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
struct drm_plane *plane;
u32 layer_sel = 0;
u32 port_sel;
- unsigned int nlayer, ofs;
+ u8 layer_id;
+ u8 old_layer_id;
+ u8 layer_sel_id;
+ unsigned int ofs;
u32 ovl_ctrl;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
@@ -2352,9 +2621,30 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
for (i = 0; i < vp->id; i++)
ofs += vop2->vps[i].nlayers;
- nlayer = 0;
drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
struct vop2_win *win = to_vop2_win(plane);
+ struct vop2_win *old_win;
+
+ layer_id = (u8)(plane->state->normalized_zpos + ofs);
+
+ /*
+ * Find the layer this win bind in old state.
+ */
+ for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) {
+ layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf;
+ if (layer_sel_id == win->data->layer_sel_id)
+ break;
+ }
+
+ /*
+ * Find the win bind to this layer in old state
+ */
+ for (i = 0; i < vop2->data->win_size; i++) {
+ old_win = &vop2->win[i];
+ layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf;
+ if (layer_sel_id == old_win->data->layer_sel_id)
+ break;
+ }
switch (win->data->phys_id) {
case ROCKCHIP_VOP2_CLUSTER0:
@@ -2399,17 +2689,14 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
break;
}
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
- 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
- win->data->layer_sel_id);
- nlayer++;
- }
-
- /* configure unused layers to 0x5 (reserved) */
- for (; nlayer < vp->nlayers; nlayer++) {
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 5);
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id);
+ /*
+ * When we bind a window from layerM to layerN, we also need to move the old
+ * window on layerN to layerM to avoid one window selected by two or more layers.
+ */
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id);
}
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
@@ -2444,9 +2731,11 @@ static void vop2_setup_dly_for_windows(struct vop2 *vop2)
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
break;
case ROCKCHIP_VOP2_SMART0:
+ case ROCKCHIP_VOP2_ESMART2:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
break;
case ROCKCHIP_VOP2_SMART1:
+ case ROCKCHIP_VOP2_ESMART3:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
break;
}
@@ -2487,7 +2776,13 @@ static void vop2_crtc_atomic_begin(struct drm_crtc *crtc,
static void vop2_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct vop2 *vop2 = vp->vop2;
+
+ /* In case of modeset, gamma lut update already happened in atomic enable */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state) && crtc_state->color_mgmt_changed)
+ vop2_crtc_atomic_try_set_gamma_locked(vop2, vp, crtc, crtc_state);
vop2_post_config(crtc);
@@ -2513,6 +2808,228 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
.atomic_disable = vop2_crtc_atomic_disable,
};
+static void vop2_dump_connector_on_crtc(struct drm_crtc *crtc, struct seq_file *s)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ drm_connector_list_iter_begin(crtc->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (crtc->state->connector_mask & drm_connector_mask(connector))
+ seq_printf(s, " Connector: %s\n", connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static int vop2_plane_state_dump(struct seq_file *s, struct drm_plane *plane)
+{
+ struct vop2_win *win = to_vop2_win(plane);
+ struct drm_plane_state *pstate = plane->state;
+ struct drm_rect *src, *dst;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rk_obj;
+ bool xmirror;
+ bool ymirror;
+ bool rotate_270;
+ bool rotate_90;
+ dma_addr_t fb_addr;
+ int i;
+
+ seq_printf(s, " %s: %s\n", win->data->name, !pstate ?
+ "DISABLED" : pstate->crtc ? "ACTIVE" : "DISABLED");
+
+ if (!pstate || !pstate->fb)
+ return 0;
+
+ fb = pstate->fb;
+ src = &pstate->src;
+ dst = &pstate->dst;
+ xmirror = pstate->rotation & DRM_MODE_REFLECT_X ? true : false;
+ ymirror = pstate->rotation & DRM_MODE_REFLECT_Y ? true : false;
+ rotate_270 = pstate->rotation & DRM_MODE_ROTATE_270;
+ rotate_90 = pstate->rotation & DRM_MODE_ROTATE_90;
+
+ seq_printf(s, "\twin_id: %d\n", win->win_id);
+
+ seq_printf(s, "\tformat: %p4cc%s glb_alpha[0x%x]\n",
+ &fb->format->format,
+ drm_is_afbc(fb->modifier) ? "[AFBC]" : "",
+ pstate->alpha >> 8);
+ seq_printf(s, "\trotate: xmirror: %d ymirror: %d rotate_90: %d rotate_270: %d\n",
+ xmirror, ymirror, rotate_90, rotate_270);
+ seq_printf(s, "\tzpos: %d\n", pstate->normalized_zpos);
+ seq_printf(s, "\tsrc: pos[%d, %d] rect[%d x %d]\n", src->x1 >> 16,
+ src->y1 >> 16, drm_rect_width(src) >> 16,
+ drm_rect_height(src) >> 16);
+ seq_printf(s, "\tdst: pos[%d, %d] rect[%d x %d]\n", dst->x1, dst->y1,
+ drm_rect_width(dst), drm_rect_height(dst));
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ obj = fb->obj[i];
+ rk_obj = to_rockchip_obj(obj);
+ fb_addr = rk_obj->dma_addr + fb->offsets[i];
+
+ seq_printf(s, "\tbuf[%d]: addr: %pad pitch: %d offset: %d\n",
+ i, &fb_addr, fb->pitches[i], fb->offsets[i]);
+ }
+
+ return 0;
+}
+
+static int vop2_crtc_state_dump(struct drm_crtc *crtc, struct seq_file *s)
+{
+ struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct drm_crtc_state *cstate = crtc->state;
+ struct rockchip_crtc_state *vcstate;
+ struct drm_display_mode *mode;
+ struct drm_plane *plane;
+ bool interlaced;
+
+ seq_printf(s, "Video Port%d: %s\n", vp->id, !cstate ?
+ "DISABLED" : cstate->active ? "ACTIVE" : "DISABLED");
+
+ if (!cstate || !cstate->active)
+ return 0;
+
+ mode = &crtc->state->adjusted_mode;
+ vcstate = to_rockchip_crtc_state(cstate);
+ interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ vop2_dump_connector_on_crtc(crtc, s);
+ seq_printf(s, "\tbus_format[%x]: %s\n", vcstate->bus_format,
+ drm_get_bus_format_name(vcstate->bus_format));
+ seq_printf(s, "\toutput_mode[%x]", vcstate->output_mode);
+ seq_printf(s, " color_space[%d]\n", vcstate->color_space);
+ seq_printf(s, " Display mode: %dx%d%s%d\n",
+ mode->hdisplay, mode->vdisplay, interlaced ? "i" : "p",
+ drm_mode_vrefresh(mode));
+ seq_printf(s, "\tclk[%d] real_clk[%d] type[%x] flag[%x]\n",
+ mode->clock, mode->crtc_clock, mode->type, mode->flags);
+ seq_printf(s, "\tH: %d %d %d %d\n", mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal);
+ seq_printf(s, "\tV: %d %d %d %d\n", mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ vop2_plane_state_dump(s, plane);
+ }
+
+ return 0;
+}
+
+static int vop2_summary_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *drm_dev = minor->dev;
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock_all(drm_dev);
+ drm_for_each_crtc(crtc, drm_dev) {
+ vop2_crtc_state_dump(crtc, s);
+ }
+ drm_modeset_unlock_all(drm_dev);
+
+ return 0;
+}
+
+static void vop2_regs_print(struct vop2 *vop2, struct seq_file *s,
+ const struct vop2_regs_dump *dump, bool active_only)
+{
+ resource_size_t start;
+ u32 val;
+ int i;
+
+ if (dump->en_mask && active_only) {
+ val = vop2_readl(vop2, dump->base + dump->en_reg);
+ if ((val & dump->en_mask) != dump->en_val)
+ return;
+ }
+
+ seq_printf(s, "\n%s:\n", dump->name);
+
+ start = vop2->res->start + dump->base;
+ for (i = 0; i < dump->size >> 2; i += 4) {
+ seq_printf(s, "%08x: %08x %08x %08x %08x\n", (u32)start + i * 4,
+ vop2_readl(vop2, dump->base + (4 * i)),
+ vop2_readl(vop2, dump->base + (4 * (i + 1))),
+ vop2_readl(vop2, dump->base + (4 * (i + 2))),
+ vop2_readl(vop2, dump->base + (4 * (i + 3))));
+ }
+}
+
+static void __vop2_regs_dump(struct seq_file *s, bool active_only)
+{
+ struct drm_info_node *node = s->private;
+ struct vop2 *vop2 = node->info_ent->data;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *drm_dev = minor->dev;
+ const struct vop2_regs_dump *dump;
+ unsigned int i;
+
+ drm_modeset_lock_all(drm_dev);
+
+ regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register);
+
+ if (vop2->enable_count) {
+ for (i = 0; i < vop2->data->regs_dump_size; i++) {
+ dump = &vop2->data->regs_dump[i];
+ vop2_regs_print(vop2, s, dump, active_only);
+ }
+ } else {
+ seq_puts(s, "VOP disabled\n");
+ }
+ drm_modeset_unlock_all(drm_dev);
+}
+
+static int vop2_regs_show(struct seq_file *s, void *arg)
+{
+ __vop2_regs_dump(s, false);
+
+ return 0;
+}
+
+static int vop2_active_regs_show(struct seq_file *s, void *data)
+{
+ __vop2_regs_dump(s, true);
+
+ return 0;
+}
+
+static struct drm_info_list vop2_debugfs_list[] = {
+ { "summary", vop2_summary_show, 0, NULL },
+ { "active_regs", vop2_active_regs_show, 0, NULL },
+ { "regs", vop2_regs_show, 0, NULL },
+};
+
+static void vop2_debugfs_init(struct vop2 *vop2, struct drm_minor *minor)
+{
+ struct dentry *root;
+ unsigned int i;
+
+ root = debugfs_create_dir("vop2", minor->debugfs_root);
+ if (!IS_ERR(root)) {
+ for (i = 0; i < ARRAY_SIZE(vop2_debugfs_list); i++)
+ vop2_debugfs_list[i].data = vop2;
+
+ drm_debugfs_create_files(vop2_debugfs_list,
+ ARRAY_SIZE(vop2_debugfs_list),
+ root, minor);
+ }
+}
+
+static int vop2_crtc_late_register(struct drm_crtc *crtc)
+{
+ struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct vop2 *vop2 = vp->vop2;
+
+ if (drm_crtc_index(crtc) == 0)
+ vop2_debugfs_init(vop2, crtc->dev->primary);
+
+ return 0;
+}
+
static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *vcstate;
@@ -2562,6 +3079,7 @@ static const struct drm_crtc_funcs vop2_crtc_funcs = {
.atomic_destroy_state = vop2_crtc_destroy_state,
.enable_vblank = vop2_crtc_enable_vblank,
.disable_vblank = vop2_crtc_disable_vblank,
+ .late_register = vop2_crtc_late_register,
};
static irqreturn_t vop2_isr(int irq, void *data)
@@ -2790,7 +3308,12 @@ static int vop2_create_crtcs(struct vop2 *vop2)
}
drm_crtc_helper_add(&vp->crtc, &vop2_crtc_helper_funcs);
+ if (vop2->lut_regs) {
+ const struct vop2_video_port_data *vp_data = &vop2_data->vp[vp->id];
+ drm_mode_crtc_set_gamma_size(&vp->crtc, vp_data->gamma_lut_len);
+ drm_crtc_enable_color_mgmt(&vp->crtc, 0, false, vp_data->gamma_lut_len);
+ }
init_completion(&vp->dsp_hold_completion);
}
@@ -2865,6 +3388,10 @@ static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
[VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
[VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
[VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8),
+ /* RK3588 only, reserved bit on rk3568*/
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
/* Scale */
[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
@@ -2957,6 +3484,10 @@ static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = {
[VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
[VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
[VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
+ /* RK3588 only, reserved register on rk3568 */
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
/* Scale */
[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
@@ -3106,6 +3637,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return -EINVAL;
}
+ vop2->res = res;
vop2->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop2->regs))
return PTR_ERR(vop2->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 615a16196aff..29cc7fb8f6d8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
@@ -9,6 +9,7 @@
#include <linux/regmap.h>
#include <drm/drm_modes.h>
+#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#define VOP2_VP_FEATURE_OUTPUT_10BIT BIT(0)
@@ -78,6 +79,9 @@ enum vop2_win_regs {
VOP2_WIN_COLOR_KEY,
VOP2_WIN_COLOR_KEY_EN,
VOP2_WIN_DITHER_UP,
+ VOP2_WIN_AXI_BUS_ID,
+ VOP2_WIN_AXI_YRGB_R_ID,
+ VOP2_WIN_AXI_UV_R_ID,
/* scale regs */
VOP2_WIN_SCALE_YRGB_X,
@@ -122,6 +126,15 @@ enum vop2_win_regs {
VOP2_WIN_MAX_REG,
};
+struct vop2_regs_dump {
+ const char *name;
+ u32 base;
+ u32 size;
+ u32 en_reg;
+ u32 en_val;
+ u32 en_mask;
+};
+
struct vop2_win_data {
const char *name;
unsigned int phys_id;
@@ -140,6 +153,10 @@ struct vop2_win_data {
unsigned int layer_sel_id;
uint64_t feature;
+ uint8_t axi_bus_id;
+ uint8_t axi_yrgb_r_id;
+ uint8_t axi_uv_r_id;
+
unsigned int max_upscale_factor;
unsigned int max_downscale_factor;
const u8 dly[VOP2_DLY_MODE_MAX];
@@ -160,10 +177,12 @@ struct vop2_data {
u64 feature;
const struct vop2_win_data *win;
const struct vop2_video_port_data *vp;
+ const struct vop2_regs_dump *regs_dump;
struct vop_rect max_input;
struct vop_rect max_output;
unsigned int win_size;
+ unsigned int regs_dump_size;
unsigned int soc_id;
};
@@ -308,6 +327,7 @@ enum dst_factor_mode {
#define RK3568_CLUSTER_WIN_CTRL0 0x00
#define RK3568_CLUSTER_WIN_CTRL1 0x04
+#define RK3568_CLUSTER_WIN_CTRL2 0x08
#define RK3568_CLUSTER_WIN_YRGB_MST 0x10
#define RK3568_CLUSTER_WIN_CBR_MST 0x14
#define RK3568_CLUSTER_WIN_VIR 0x18
@@ -330,6 +350,7 @@ enum dst_factor_mode {
/* (E)smart register definition, offset relative to window base */
#define RK3568_SMART_CTRL0 0x00
#define RK3568_SMART_CTRL1 0x04
+#define RK3588_SMART_AXI_CTRL 0x08
#define RK3568_SMART_REGION0_CTRL 0x10
#define RK3568_SMART_REGION0_YRGB_MST 0x14
#define RK3568_SMART_REGION0_CBR_MST 0x18
@@ -394,6 +415,7 @@ enum dst_factor_mode {
#define RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN BIT(15)
#define RK3568_VP_DSP_CTRL__STANDBY BIT(31)
+#define RK3568_VP_DSP_CTRL__DSP_LUT_EN BIT(28)
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_MODE BIT(20)
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_SEL GENMASK(19, 18)
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_EN BIT(17)
@@ -408,6 +430,8 @@ enum dst_factor_mode {
#define RK3568_VP_DSP_CTRL__CORE_DCLK_DIV BIT(4)
#define RK3568_VP_DSP_CTRL__OUT_MODE GENMASK(3, 0)
+#define RK3588_VP_DSP_CTRL__GAMMA_UPDATE_EN BIT(22)
+
#define RK3588_VP_CLK_CTRL__DCLK_OUT_DIV GENMASK(3, 2)
#define RK3588_VP_CLK_CTRL__DCLK_CORE_DIV GENMASK(1, 0)
@@ -460,6 +484,8 @@ enum dst_factor_mode {
#define RK3588_DSP_IF_POL__DP1_PIN_POL GENMASK(14, 12)
#define RK3588_DSP_IF_POL__DP0_PIN_POL GENMASK(10, 8)
+#define RK3588_LUT_PORT_SEL__GAMMA_AHB_WRITE_SEL GENMASK(13, 12)
+
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2_PHASE_LOCK BIT(5)
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2 BIT(4)
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index aba733736ff7..385cf6881504 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Mark Yao <mark.yao@rock-chips.com>
* Sandy Huang <hjc@rock-chips.com>
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
index 4ce967d23813..ca83d7b6bea7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.h
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Sandy Huang <hjc@rock-chips.com>
* Mark Yao <mark.yao@rock-chips.com>
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index dbfbde24698e..811020665120 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Sandy Huang <hjc@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
index 1bd4e20e91eb..116f958b894d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Sandy Huang <hjc@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index f9d87a0abc8b..65a88f489693 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Andy Yan <andy.yan@rock-chips.com>
*/
@@ -258,6 +258,88 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
},
};
+static const struct vop2_regs_dump rk3568_regs_dump[] = {
+ {
+ .name = "SYS",
+ .base = RK3568_REG_CFG_DONE,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0
+ }, {
+ .name = "OVL",
+ .base = RK3568_OVL_CTRL,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "VP0",
+ .base = RK3568_VP0_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP1",
+ .base = RK3568_VP1_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP2",
+ .base = RK3568_VP2_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+
+ }, {
+ .name = "Cluster0",
+ .base = RK3568_CLUSTER0_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster1",
+ .base = RK3568_CLUSTER1_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Esmart0",
+ .base = RK3568_ESMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart1",
+ .base = RK3568_ESMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Smart0",
+ .base = RK3568_SMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Smart1",
+ .base = RK3568_SMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ },
+};
+
static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
{
.id = 0,
@@ -313,7 +395,7 @@ static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
* AXI1 is a read only bus.
*
* Every window on a AXI bus must assigned two unique
- * read id(yrgb_id/uv_id, valid id are 0x1~0xe).
+ * read id(yrgb_r_id/uv_r_id, valid id are 0x1~0xe).
*
* AXI0:
* Cluster0/1, Esmart0/1, WriteBack
@@ -333,6 +415,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 0,
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 2,
+ .axi_uv_r_id = 3,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -349,6 +434,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 6,
+ .axi_uv_r_id = 7,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -364,6 +452,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 2,
+ .axi_uv_r_id = 3,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -379,6 +470,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 6,
+ .axi_uv_r_id = 7,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -393,6 +487,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 2,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x0a,
+ .axi_uv_r_id = 0x0b,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
@@ -406,6 +503,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 3,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x0c,
+ .axi_uv_r_id = 0x01,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
@@ -419,6 +519,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 6,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0a,
+ .axi_uv_r_id = 0x0b,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
@@ -432,12 +535,118 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 7,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0c,
+ .axi_uv_r_id = 0x0d,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
},
};
+static const struct vop2_regs_dump rk3588_regs_dump[] = {
+ {
+ .name = "SYS",
+ .base = RK3568_REG_CFG_DONE,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0
+ }, {
+ .name = "OVL",
+ .base = RK3568_OVL_CTRL,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "VP0",
+ .base = RK3568_VP0_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP1",
+ .base = RK3568_VP1_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP2",
+ .base = RK3568_VP2_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+
+ }, {
+ .name = "VP3",
+ .base = RK3588_VP3_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "Cluster0",
+ .base = RK3568_CLUSTER0_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster1",
+ .base = RK3568_CLUSTER1_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster2",
+ .base = RK3588_CLUSTER2_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster3",
+ .base = RK3588_CLUSTER3_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Esmart0",
+ .base = RK3568_ESMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart1",
+ .base = RK3568_ESMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart2",
+ .base = RK3588_ESMART2_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart3",
+ .base = RK3588_ESMART3_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ },
+};
+
static const struct vop2_data rk3566_vop = {
.feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
@@ -446,6 +655,8 @@ static const struct vop2_data rk3566_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .regs_dump = rk3568_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
.soc_id = 3566,
};
@@ -457,6 +668,8 @@ static const struct vop2_data rk3568_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .regs_dump = rk3568_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
.soc_id = 3568,
};
@@ -469,6 +682,8 @@ static const struct vop2_data rk3588_vop = {
.vp = rk3588_vop_video_ports,
.win = rk3588_vop_win_data,
.win_size = ARRAY_SIZE(rk3588_vop_win_data),
+ .regs_dump = rk3588_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3588_regs_dump),
.soc_id = 3588,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 8998967f0c00..4e2099d86517 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index fbf1bcc68625..addf8ca085f6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 486d8f5282f9..b777690fd660 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -18,9 +18,9 @@
#include <linux/pwm.h>
#include <linux/regulator/consumer.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
@@ -39,7 +39,6 @@
#define DRIVER_NAME "ssd130x"
#define DRIVER_DESC "DRM driver for Solomon SSD13xx OLED displays"
-#define DRIVER_DATE "20220131"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -1784,7 +1783,6 @@ static const struct drm_driver ssd130x_drm_driver = {
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index bc1c747d3ea4..ceacdcb7c566 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -23,7 +23,6 @@
#define DRIVER_NAME "sprd"
#define DRIVER_DESC "Spreadtrum SoCs' DRM Driver"
-#define DRIVER_DATE "20200201"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -59,7 +58,6 @@ static struct drm_driver sprd_drm_drv = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 61ceff9aee7e..5e9332df21df 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -13,9 +13,9 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -29,7 +29,6 @@
#define DRIVER_NAME "sti"
#define DRIVER_DESC "STMicroelectronics SoC DRM"
-#define DRIVER_DATE "20140601"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -143,7 +142,6 @@ static const struct drm_driver sti_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 21b46a6465f0..ca2fe17de4a5 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1225,7 +1225,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size
struct drm_connector *connector = hdmi->drm_connector;
DRM_DEBUG_DRIVER("\n");
+ mutex_lock(&connector->eld_mutex);
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
return 0;
}
@@ -1235,7 +1237,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.audio_shutdown = hdmi_audio_shutdown,
.mute_stream = hdmi_audio_mute,
.get_eld = hdmi_audio_get_eld,
- .no_capture_mute = 1,
};
static int sti_hdmi_register_audio_driver(struct device *dev,
@@ -1245,6 +1246,7 @@ static int sti_hdmi_register_audio_driver(struct device *dev,
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
+ .no_capture_mute = 1,
};
DRM_DEBUG_DRIVER("\n");
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index bf090a354989..8ebcaf953782 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -16,9 +16,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -62,7 +62,6 @@ static const struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
- .date = "20170330",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 5eccf58f2e17..c11dfb2739fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -15,8 +15,8 @@
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -50,7 +50,6 @@ static const struct drm_driver sun4i_drv_driver = {
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
- .date = "20150629",
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 453f19f16ab7..ab0938ba61f7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -187,34 +187,6 @@ sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector,
return MODE_NOCLOCK;
}
-static int sun4i_hdmi_connector_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *state)
-{
- struct drm_connector_state *conn_state =
- drm_atomic_get_new_connector_state(state, connector);
- struct drm_crtc *crtc = conn_state->crtc;
- struct drm_crtc_state *crtc_state = crtc->state;
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
- enum drm_mode_status status;
-
- status = sun4i_hdmi_connector_clock_valid(connector, mode,
- conn_state->hdmi.tmds_char_rate);
- if (status != MODE_OK)
- return -EINVAL;
-
- return 0;
-}
-
-static enum drm_mode_status
-sun4i_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- unsigned long long rate = drm_hdmi_compute_mode_clock(mode, 8,
- HDMI_COLORSPACE_RGB);
-
- return sun4i_hdmi_connector_clock_valid(connector, mode, rate);
-}
-
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
@@ -268,8 +240,8 @@ static const struct drm_connector_hdmi_funcs sun4i_hdmi_hdmi_connector_funcs = {
};
static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = {
- .atomic_check = sun4i_hdmi_connector_atomic_check,
- .mode_valid = sun4i_hdmi_connector_mode_valid,
+ .atomic_check = drm_atomic_helper_connector_hdmi_check,
+ .mode_valid = drm_hdmi_connector_mode_valid,
.get_modes = sun4i_hdmi_get_modes,
};
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index bf3421667ecc..4596073fe28f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -13,9 +13,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
@@ -35,7 +35,6 @@
#define DRIVER_NAME "tegra"
#define DRIVER_DESC "NVIDIA Tegra graphics"
-#define DRIVER_DATE "20120330"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -901,7 +900,6 @@ static const struct drm_driver tegra_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
index 6bba97d0be88..22e2d959eb31 100644
--- a/drivers/gpu/drm/tests/drm_connector_test.c
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -9,6 +9,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
@@ -181,6 +182,465 @@ static struct kunit_suite drmm_connector_init_test_suite = {
.test_cases = drmm_connector_init_tests,
};
+static const struct drm_connector_funcs dummy_dynamic_init_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .destroy = drm_connector_cleanup,
+};
+
+/*
+ * Test that the initialization of a bog standard dynamic connector works
+ * as expected and doesn't report any error.
+ */
+static void drm_test_drm_connector_dynamic_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+static void drm_test_connector_dynamic_init_cleanup(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+
+ drm_connector_cleanup(connector);
+}
+
+/*
+ * Test that the initialization of a dynamic connector without a DDC adapter
+ * doesn't report any error.
+ */
+static void drm_test_drm_connector_dynamic_init_null_ddc(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+/*
+ * Test that the initialization of a dynamic connector doesn't add the
+ * connector to the connector list.
+ */
+static void drm_test_drm_connector_dynamic_init_not_added(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_PTR_EQ(test, connector->head.next, &connector->head);
+}
+
+static void test_connector_property(struct kunit *test,
+ struct drm_connector *connector,
+ const struct drm_property *expected_prop)
+{
+ struct drm_property *prop;
+ uint64_t val;
+ int ret;
+
+ KUNIT_ASSERT_NOT_NULL(test, expected_prop);
+ prop = drm_mode_obj_find_prop_id(&connector->base, expected_prop->base.id);
+ KUNIT_ASSERT_PTR_EQ_MSG(test, prop, expected_prop,
+ "Can't find property %s", expected_prop->name);
+
+ ret = drm_object_property_get_default_value(&connector->base, prop, &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, 0);
+
+ /* TODO: Check property value in the connector state. */
+}
+
+/*
+ * Test that the initialization of a dynamic connector adds all the expected
+ * properties to it.
+ */
+static void drm_test_drm_connector_dynamic_init_properties(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_mode_config *config = &priv->drm.mode_config;
+ const struct drm_property *props[] = {
+ config->edid_property,
+ config->dpms_property,
+ config->link_status_property,
+ config->non_desktop_property,
+ config->tile_property,
+ config->prop_crtc_id,
+ };
+ int ret;
+ int i;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ for (i = 0; i < ARRAY_SIZE(props); i++)
+ test_connector_property(test, connector, props[i]);
+}
+
+/*
+ * Test that the initialization of a dynamic connector succeeds for all
+ * possible connector types.
+ */
+static void drm_test_drm_connector_dynamic_init_type_valid(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ unsigned int connector_type = *(unsigned int *)test->param_value;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ connector_type,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+/*
+ * Test that the initialization of a dynamic connector sets the expected name
+ * for it for all possible connector types.
+ */
+static void drm_test_drm_connector_dynamic_init_name(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ unsigned int connector_type = *(unsigned int *)test->param_value;
+ char expected_name[128];
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ connector_type,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ snprintf(expected_name, sizeof(expected_name), "%s-%d",
+ drm_get_connector_type_name(connector_type), connector->connector_type_id);
+ KUNIT_ASSERT_STREQ(test, connector->name, expected_name);
+}
+
+static struct kunit_case drm_connector_dynamic_init_tests[] = {
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init_null_ddc),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init_not_added),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init_properties),
+ KUNIT_CASE_PARAM(drm_test_drm_connector_dynamic_init_type_valid,
+ drm_connector_init_type_valid_gen_params),
+ KUNIT_CASE_PARAM(drm_test_drm_connector_dynamic_init_name,
+ drm_connector_init_type_valid_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_connector_dynamic_init_test_suite = {
+ .name = "drm_connector_dynamic_init",
+ .init = drm_test_connector_init,
+ .exit = drm_test_connector_dynamic_init_cleanup,
+ .test_cases = drm_connector_dynamic_init_tests,
+};
+
+static int drm_test_connector_dynamic_register_early_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv;
+ int ret;
+
+ ret = drm_test_connector_init(test);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv = test->priv;
+
+ ret = drm_connector_dynamic_init(&priv->drm, &priv->connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return 0;
+}
+
+static void drm_test_connector_dynamic_register_early_cleanup(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+
+ drm_connector_unregister(connector);
+ drm_connector_put(connector);
+}
+
+/*
+ * Test that registration of a dynamic connector adds it to the connector list.
+ */
+static void drm_test_drm_connector_dynamic_register_early_on_list(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&connector->head));
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_PTR_EQ(test, connector->head.next, &priv->drm.mode_config.connector_list);
+}
+
+/*
+ * Test that the registration of a dynamic connector before the drm device is
+ * registered results in deferring the connector's user interface registration.
+ */
+static void drm_test_drm_connector_dynamic_register_early_defer(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_EQ(test, connector->registration_state, DRM_CONNECTOR_INITIALIZING);
+}
+
+/*
+ * Test that the registration of a dynamic connector fails, if this is done before
+ * the connector is initialized.
+ */
+static void drm_test_drm_connector_dynamic_register_early_no_init(struct kunit *test)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ connector = kunit_kzalloc(test, sizeof(*connector), GFP_KERNEL); /* auto freed */
+ KUNIT_ASSERT_NOT_NULL(test, connector);
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, -EINVAL);
+}
+
+/*
+ * Test that the registration of a dynamic connector before the drm device is
+ * registered results in deferring adding a mode object for the connector.
+ */
+static void drm_test_drm_connector_dynamic_register_early_no_mode_object(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_connector *tmp_connector;
+ int ret;
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ tmp_connector = drm_connector_lookup(connector->dev, NULL, connector->base.id);
+ KUNIT_ASSERT_NULL(test, tmp_connector);
+}
+
+static struct kunit_case drm_connector_dynamic_register_early_tests[] = {
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_on_list),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_defer),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_no_init),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_no_mode_object),
+ { }
+};
+
+static struct kunit_suite drm_connector_dynamic_register_early_test_suite = {
+ .name = "drm_connector_dynamic_register_early",
+ .init = drm_test_connector_dynamic_register_early_init,
+ .exit = drm_test_connector_dynamic_register_early_cleanup,
+ .test_cases = drm_connector_dynamic_register_early_tests,
+};
+
+static int drm_test_connector_dynamic_register_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv;
+ int ret;
+
+ ret = drm_test_connector_dynamic_register_early_init(test);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv = test->priv;
+
+ ret = drm_dev_register(priv->connector.dev, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return 0;
+}
+
+static void drm_test_connector_dynamic_register_cleanup(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_device *dev = priv->connector.dev;
+
+ drm_connector_unregister(&priv->connector);
+ drm_connector_put(&priv->connector);
+
+ drm_dev_unregister(dev);
+
+ drm_test_connector_dynamic_register_early_cleanup(test);
+}
+
+static void drm_test_drm_connector_dynamic_register_on_list(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&priv->connector.head));
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_PTR_EQ(test, priv->connector.head.next, &priv->drm.mode_config.connector_list);
+}
+
+/*
+ * Test that the registration of a dynamic connector doesn't get deferred if
+ * this is done after the drm device is registered.
+ */
+static void drm_test_drm_connector_dynamic_register_no_defer(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_EQ(test, priv->connector.registration_state, DRM_CONNECTOR_INITIALIZING);
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_EQ(test, priv->connector.registration_state, DRM_CONNECTOR_REGISTERED);
+}
+
+/*
+ * Test that the registration of a dynamic connector fails if this is done after the
+ * drm device is registered, but before the connector is initialized.
+ */
+static void drm_test_drm_connector_dynamic_register_no_init(struct kunit *test)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ connector = kunit_kzalloc(test, sizeof(*connector), GFP_KERNEL); /* auto freed */
+ KUNIT_ASSERT_NOT_NULL(test, connector);
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, -EINVAL);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered adds the mode object for the connector.
+ */
+static void drm_test_drm_connector_dynamic_register_mode_object(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_connector *tmp_connector;
+ int ret;
+
+ tmp_connector = drm_connector_lookup(connector->dev, NULL, connector->base.id);
+ KUNIT_ASSERT_NULL(test, tmp_connector);
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ tmp_connector = drm_connector_lookup(connector->dev, NULL, connector->base.id);
+ KUNIT_ASSERT_PTR_EQ(test, tmp_connector, connector);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered adds the connector to sysfs.
+ */
+static void drm_test_drm_connector_dynamic_register_sysfs(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ KUNIT_ASSERT_NULL(test, connector->kdev);
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_NOT_NULL(test, connector->kdev);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered sets the connector's sysfs name as expected.
+ */
+static void drm_test_drm_connector_dynamic_register_sysfs_name(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ char expected_name[128];
+ int ret;
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ snprintf(expected_name, sizeof(expected_name), "card%d-%s",
+ connector->dev->primary->index, connector->name);
+
+ KUNIT_ASSERT_STREQ(test, dev_name(connector->kdev), expected_name);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered adds the connector to debugfs.
+ */
+static void drm_test_drm_connector_dynamic_register_debugfs(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_NULL(test, priv->connector.debugfs_entry);
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ KUNIT_ASSERT_NOT_NULL(test, priv->connector.debugfs_entry);
+ else
+ KUNIT_ASSERT_NULL(test, priv->connector.debugfs_entry);
+}
+
+static struct kunit_case drm_connector_dynamic_register_tests[] = {
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_on_list),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_no_defer),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_no_init),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_mode_object),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_sysfs),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_sysfs_name),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_debugfs),
+ { }
+};
+
+static struct kunit_suite drm_connector_dynamic_register_test_suite = {
+ .name = "drm_connector_dynamic_register",
+ .init = drm_test_connector_dynamic_register_init,
+ .exit = drm_test_connector_dynamic_register_cleanup,
+ .test_cases = drm_connector_dynamic_register_tests,
+};
+
/*
* Test that the registration of a bog standard connector works as
* expected and doesn't report any error.
@@ -635,6 +1095,64 @@ static void drm_test_connector_hdmi_init_formats_no_rgb(struct kunit *test)
KUNIT_EXPECT_LT(test, ret, 0);
}
+struct drm_connector_hdmi_init_formats_yuv420_allowed_test {
+ unsigned long supported_formats;
+ bool yuv420_allowed;
+ int expected_result;
+};
+
+#define YUV420_ALLOWED_TEST(_formats, _allowed, _result) \
+ { \
+ .supported_formats = BIT(HDMI_COLORSPACE_RGB) | (_formats), \
+ .yuv420_allowed = _allowed, \
+ .expected_result = _result, \
+ }
+
+static const struct drm_connector_hdmi_init_formats_yuv420_allowed_test
+drm_connector_hdmi_init_formats_yuv420_allowed_tests[] = {
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), true, 0),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), false, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), true, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), false, 0),
+};
+
+static void
+drm_connector_hdmi_init_formats_yuv420_allowed_desc(const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *t,
+ char *desc)
+{
+ sprintf(desc, "supported_formats=0x%lx yuv420_allowed=%d",
+ t->supported_formats, t->yuv420_allowed);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_tests,
+ drm_connector_hdmi_init_formats_yuv420_allowed_desc);
+
+/*
+ * Test that the registration of an HDMI connector succeeds only when
+ * the presence of YUV420 in the supported formats matches the value
+ * of the ycbcr_420_allowed flag.
+ */
+static void drm_test_connector_hdmi_init_formats_yuv420_allowed(struct kunit *test)
+{
+ const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *params;
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ params = test->param_value;
+ priv->connector.ycbcr_420_allowed = params->yuv420_allowed;
+
+ ret = drmm_connector_hdmi_init(&priv->drm, &priv->connector,
+ "Vendor", "Product",
+ &dummy_funcs,
+ &dummy_hdmi_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ &priv->ddc,
+ params->supported_formats,
+ 8);
+ KUNIT_EXPECT_EQ(test, ret, params->expected_result);
+}
+
/*
* Test that the registration of an HDMI connector with an HDMI
* connector type succeeds.
@@ -726,6 +1244,8 @@ static struct kunit_case drmm_connector_hdmi_init_tests[] = {
KUNIT_CASE(drm_test_connector_hdmi_init_bpc_null),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_empty),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_no_rgb),
+ KUNIT_CASE_PARAM(drm_test_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_gen_params),
KUNIT_CASE(drm_test_connector_hdmi_init_null_ddc),
KUNIT_CASE(drm_test_connector_hdmi_init_null_product),
KUNIT_CASE(drm_test_connector_hdmi_init_null_vendor),
@@ -1283,6 +1803,9 @@ static struct kunit_suite drm_hdmi_compute_mode_clock_test_suite = {
kunit_test_suites(
&drmm_connector_hdmi_init_test_suite,
&drmm_connector_init_test_suite,
+ &drm_connector_dynamic_init_test_suite,
+ &drm_connector_dynamic_register_early_test_suite,
+ &drm_connector_dynamic_register_test_suite,
&drm_connector_attach_broadcast_rgb_property_test_suite,
&drm_get_tv_mode_from_name_test_suite,
&drm_hdmi_compute_mode_clock_test_suite,
diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
index 89cd9e4f4d32..9e0e2fb65944 100644
--- a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -199,10 +199,8 @@ static const struct drm_dp_mst_calc_pbn_div_test drm_dp_mst_calc_pbn_div_dp1_4_c
static void drm_test_dp_mst_calc_pbn_div(struct kunit *test)
{
const struct drm_dp_mst_calc_pbn_div_test *params = test->param_value;
- /* mgr->dev is only needed by drm_dbg_kms(), but it's not called for the test cases. */
- struct drm_dp_mst_topology_mgr *mgr = test->priv;
- KUNIT_EXPECT_EQ(test, drm_dp_get_vc_payload_bw(mgr, params->link_rate, params->lane_count).full,
+ KUNIT_EXPECT_EQ(test, drm_dp_get_vc_payload_bw(params->link_rate, params->lane_count).full,
params->expected.full);
}
@@ -568,21 +566,8 @@ static struct kunit_case drm_dp_mst_helper_tests[] = {
{ }
};
-static int drm_dp_mst_helper_tests_init(struct kunit *test)
-{
- struct drm_dp_mst_topology_mgr *mgr;
-
- mgr = kunit_kzalloc(test, sizeof(*mgr), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mgr);
-
- test->priv = mgr;
-
- return 0;
-}
-
static struct kunit_suite drm_dp_mst_helper_test_suite = {
.name = "drm_dp_mst_helper",
- .init = drm_dp_mst_helper_tests_init,
.test_cases = drm_dp_mst_helper_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 294773342e71..b976a5e9aef5 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -46,7 +46,7 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
struct drm_display_mode *mode, *preferred;
mutex_lock(&drm->mode_config.mutex);
- preferred = list_first_entry(&connector->modes, struct drm_display_mode, head);
+ preferred = list_first_entry_or_null(&connector->modes, struct drm_display_mode, head);
list_for_each_entry(mode, &connector->modes, head)
if (mode->type & DRM_MODE_TYPE_PREFERRED)
preferred = mode;
@@ -105,9 +105,8 @@ static int set_connector_edid(struct kunit *test, struct drm_connector *connecto
mutex_lock(&drm->mode_config.mutex);
ret = connector->funcs->fill_modes(connector, 4096, 4096);
mutex_unlock(&drm->mode_config.mutex);
- KUNIT_ASSERT_GT(test, ret, 0);
- return 0;
+ return ret;
}
static const struct drm_connector_hdmi_funcs dummy_connector_hdmi_funcs = {
@@ -125,6 +124,18 @@ static const struct drm_connector_hdmi_funcs reject_connector_hdmi_funcs = {
.tmds_char_rate_valid = reject_connector_tmds_char_rate_valid,
};
+static enum drm_mode_status
+reject_100MHz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
+{
+ return (tmds_rate > 100ULL * 1000 * 1000) ? MODE_BAD : MODE_OK;
+}
+
+static const struct drm_connector_hdmi_funcs reject_100_MHz_connector_hdmi_funcs = {
+ .tmds_char_rate_valid = reject_100MHz_connector_tmds_char_rate_valid,
+};
+
static int dummy_connector_get_modes(struct drm_connector *connector)
{
struct drm_atomic_helper_connector_hdmi_priv *priv =
@@ -147,6 +158,7 @@ static int dummy_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs dummy_connector_helper_funcs = {
.atomic_check = drm_atomic_helper_connector_hdmi_check,
.get_modes = dummy_connector_get_modes,
+ .mode_valid = drm_hdmi_connector_mode_valid,
};
static void dummy_hdmi_connector_reset(struct drm_connector *connector)
@@ -164,9 +176,10 @@ static const struct drm_connector_funcs dummy_connector_funcs = {
static
struct drm_atomic_helper_connector_hdmi_priv *
-drm_atomic_helper_connector_hdmi_init(struct kunit *test,
- unsigned int formats,
- unsigned int max_bpc)
+drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc,
+ const struct drm_connector_hdmi_funcs *hdmi_funcs)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
@@ -208,7 +221,7 @@ drm_atomic_helper_connector_hdmi_init(struct kunit *test,
ret = drmm_connector_hdmi_init(drm, conn,
"Vendor", "Product",
&dummy_connector_funcs,
- &dummy_connector_hdmi_funcs,
+ hdmi_funcs,
DRM_MODE_CONNECTOR_HDMIA,
NULL,
formats,
@@ -220,10 +233,27 @@ drm_atomic_helper_connector_hdmi_init(struct kunit *test,
drm_mode_config_reset(drm);
- ret = set_connector_edid(test, conn,
+ return priv;
+}
+
+static
+struct drm_atomic_helper_connector_hdmi_priv *
+drm_kunit_helper_connector_hdmi_init(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
+ formats, max_bpc,
+ &dummy_connector_hdmi_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ ret = set_connector_edid(test, &priv->connector,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
return priv;
}
@@ -247,9 +277,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
@@ -310,9 +340,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
@@ -373,9 +403,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -429,9 +459,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -485,9 +515,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -543,9 +573,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -601,9 +631,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -659,9 +689,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -719,16 +749,16 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -793,16 +823,16 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -862,18 +892,18 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_dvi_1080p,
ARRAY_SIZE(test_edid_dvi_1080p));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_FALSE(test, info->is_hdmi);
@@ -911,16 +941,16 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -958,16 +988,16 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -1005,16 +1035,16 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -1056,9 +1086,9 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
@@ -1112,16 +1142,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1179,18 +1209,18 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1242,11 +1272,11 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -1254,7 +1284,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1305,16 +1335,16 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1370,18 +1400,18 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1438,16 +1468,16 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1496,18 +1526,18 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1538,6 +1568,57 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
}
+/* Test that atomic check succeeds when disabling a connector. */
+static void drm_test_check_disable_connector(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_display_mode *preferred;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ conn = &priv->connector;
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ crtc_state->active = false;
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
KUNIT_CASE(drm_test_check_broadcast_rgb_auto_cea_mode),
KUNIT_CASE(drm_test_check_broadcast_rgb_auto_cea_mode_vic_1),
@@ -1552,6 +1633,7 @@ static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
*/
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_changed),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_not_changed),
+ KUNIT_CASE(drm_test_check_disable_connector),
KUNIT_CASE(drm_test_check_hdmi_funcs_reject_rate),
KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback),
KUNIT_CASE(drm_test_check_max_tmds_rate_format_fallback),
@@ -1593,9 +1675,9 @@ static void drm_test_check_broadcast_rgb_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1615,9 +1697,9 @@ static void drm_test_check_bpc_8_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1639,9 +1721,9 @@ static void drm_test_check_bpc_10_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1663,9 +1745,9 @@ static void drm_test_check_bpc_12_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1685,11 +1767,11 @@ static void drm_test_check_format_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1707,11 +1789,11 @@ static void drm_test_check_tmds_char_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1734,9 +1816,148 @@ static struct kunit_suite drm_atomic_helper_connector_hdmi_reset_test_suite = {
.test_cases = drm_atomic_helper_connector_hdmi_reset_tests,
};
+/*
+ * Test that the default behaviour for drm_hdmi_connector_mode_valid() is not
+ * to reject any modes. Pass a correct EDID and verify that preferred mode
+ * matches the expectations (1080p).
+ */
+static void drm_test_check_mode_valid(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+
+ KUNIT_EXPECT_EQ(test, preferred->hdisplay, 1920);
+ KUNIT_EXPECT_EQ(test, preferred->vdisplay, 1080);
+ KUNIT_EXPECT_EQ(test, preferred->clock, 148500);
+}
+
+/*
+ * Test that the drm_hdmi_connector_mode_valid() will reject modes depending on
+ * the .tmds_char_rate_valid() behaviour.
+ * Pass a correct EDID and verify that high-rate modes are filtered.
+ */
+static void drm_test_check_mode_valid_reject_rate(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_100_MHz_connector_hdmi_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+
+ ret = set_connector_edid(test, conn,
+ test_edid_hdmi_1080p_rgb_max_200mhz,
+ ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ /*
+ * Unlike the drm_test_check_mode_valid() here 1080p is rejected, but
+ * 480p is allowed.
+ */
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
+ KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
+ KUNIT_EXPECT_EQ(test, preferred->clock, 25200);
+}
+
+/*
+ * Test that the drm_hdmi_connector_mode_valid() will not mark any modes as
+ * valid if .tmds_char_rate_valid() rejects all of them. Pass a correct EDID
+ * and verify that there is no preferred mode and no modes were set for the
+ * connector.
+ */
+static void drm_test_check_mode_valid_reject(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_connector_hdmi_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+
+ /* should reject all modes */
+ ret = set_connector_edid(test, conn,
+ test_edid_hdmi_1080p_rgb_max_200mhz,
+ ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NULL(test, preferred);
+}
+
+/*
+ * Test that the drm_hdmi_connector_mode_valid() will reject modes that don't
+ * pass the info.max_tmds_clock filter. Pass crafted EDID and verify that
+ * high-rate modes are filtered.
+ */
+static void drm_test_check_mode_valid_reject_max_clock(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+
+ ret = set_connector_edid(test, conn,
+ test_edid_hdmi_1080p_rgb_max_100mhz,
+ ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_100mhz));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ KUNIT_ASSERT_EQ(test, conn->display_info.max_tmds_clock, 100 * 1000);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
+ KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
+ KUNIT_EXPECT_EQ(test, preferred->clock, 25200);
+}
+
+static struct kunit_case drm_atomic_helper_connector_hdmi_mode_valid_tests[] = {
+ KUNIT_CASE(drm_test_check_mode_valid),
+ KUNIT_CASE(drm_test_check_mode_valid_reject),
+ KUNIT_CASE(drm_test_check_mode_valid_reject_rate),
+ KUNIT_CASE(drm_test_check_mode_valid_reject_max_clock),
+ { }
+};
+
+static struct kunit_suite drm_atomic_helper_connector_hdmi_mode_valid_test_suite = {
+ .name = "drm_atomic_helper_connector_hdmi_mode_valid",
+ .test_cases = drm_atomic_helper_connector_hdmi_mode_valid_tests,
+};
+
kunit_test_suites(
&drm_atomic_helper_connector_hdmi_check_test_suite,
&drm_atomic_helper_connector_hdmi_reset_test_suite,
+ &drm_atomic_helper_connector_hdmi_mode_valid_test_suite,
);
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
diff --git a/drivers/gpu/drm/tests/drm_kunit_edid.h b/drivers/gpu/drm/tests/drm_kunit_edid.h
index 107559900e97..6358397a5d7a 100644
--- a/drivers/gpu/drm/tests/drm_kunit_edid.h
+++ b/drivers/gpu/drm/tests/drm_kunit_edid.h
@@ -74,6 +74,108 @@ static const unsigned char test_edid_dvi_1080p[] = {
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
* 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
+ * 00 12 34 00 14 20 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e4
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 42
+ * Made in: 2023
+ * Basic Display Parameters & Features:
+ * Digital display
+ * DFP 1.x compatible TMDS
+ * Maximum image size: 160 cm x 90 cm
+ * Gamma: 2.20
+ * Monochrome or grayscale display
+ * First detailed timing is the preferred timing
+ * Color Characteristics:
+ * Red : 0.0000, 0.0000
+ * Green: 0.0000, 0.0000
+ * Blue : 0.0000, 0.0000
+ * White: 0.0000, 0.0000
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (1600 mm x 900 mm)
+ * Hfront 88 Hsync 44 Hback 148 Hpol P
+ * Vfront 4 Vsync 5 Vback 36 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 50-70 Hz V, 30-70 kHz H, max dotclock 150 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0x92
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Underscans IT Video Formats by default
+ * Native detailed modes: 1
+ * Colorimetry Data Block:
+ * sRGB
+ * Video Data Block:
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz
+ * Video Capability Data Block:
+ * YCbCr quantization: No Data
+ * RGB quantization: Selectable (via AVI Q)
+ * PT scan behavior: No Data
+ * IT scan behavior: Always Underscanned
+ * CE scan behavior: Always Underscanned
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.2.3.4
+ * Maximum TMDS clock: 100 MHz
+ * Extended HDMI video details:
+ * Checksum: 0xe4 Unused space in Extension Block: 100 bytes
+ */
+static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x01, 0x03, 0x81, 0xa0, 0x5a, 0x78,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+ 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
+ 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x14, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xe4
+};
+
+/*
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 2a 00 00 00 00 00
+ * 00 21 01 03 81 a0 5a 78 02 00 00 00 00 00 00 00
+ * 00 00 00 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 02 3a 80 18 71 38 2d 40 58 2c
+ * 45 00 40 84 63 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 32
+ * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
+ *
+ * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
* 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 04a6b8cc62ac..3c0b7824c0be 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -320,8 +320,7 @@ static void kunit_action_drm_mode_destroy(void *ptr)
}
/**
- * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
- for a KUnit test
+ * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
* @test: The test context object
* @dev: DRM device
* @video_code: CEA VIC of the mode
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 1ad711f8d2a8..cacb5f3d8085 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -700,7 +700,7 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
{
dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
- /* clear the irqstatus for newly enabled irqs */
+ /* clear the irqstatus for irqs that will be enabled */
dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
dispc_k2g_vp_set_irqenable(dispc, 0, mask);
@@ -708,6 +708,9 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+ /* clear the irqstatus for irqs that were disabled */
+ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & old_mask);
+
/* flush posted write */
dispc_k2g_read_irqenable(dispc);
}
@@ -780,24 +783,18 @@ static
void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
{
unsigned int i;
- u32 top_clear = 0;
for (i = 0; i < dispc->feat->num_vps; ++i) {
- if (clearmask & DSS_IRQ_VP_MASK(i)) {
+ if (clearmask & DSS_IRQ_VP_MASK(i))
dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
- top_clear |= BIT(i);
- }
}
for (i = 0; i < dispc->feat->num_planes; ++i) {
- if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
+ if (clearmask & DSS_IRQ_PLANE_MASK(i))
dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
- top_clear |= BIT(4 + i);
- }
}
- if (dispc->feat->subrev == DISPC_K2G)
- return;
- dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
+ /* always clear the top level irqstatus */
+ dispc_write(dispc, DISPC_IRQSTATUS, dispc_read(dispc, DISPC_IRQSTATUS));
/* Flush posted writes */
dispc_read(dispc, DISPC_IRQSTATUS);
@@ -843,7 +840,7 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
old_mask = dispc_k3_read_irqenable(dispc);
- /* clear the irqstatus for newly enabled irqs */
+ /* clear the irqstatus for irqs that will be enabled */
dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
for (i = 0; i < dispc->feat->num_vps; ++i) {
@@ -868,6 +865,9 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
if (main_disable)
dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+ /* clear the irqstatus for irqs that were disabled */
+ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & old_mask);
+
/* Flush posted writes */
dispc_read(dispc, DISPC_IRQENABLE_SET);
}
@@ -2767,8 +2767,12 @@ static void dispc_init_errata(struct dispc_device *dispc)
*/
static void dispc_softreset_k2g(struct dispc_device *dispc)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dispc->tidss->irq_lock, flags);
dispc_set_irqenable(dispc, 0);
dispc_read_and_clear_irqstatus(dispc);
+ spin_unlock_irqrestore(&dispc->tidss->irq_lock, flags);
for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 7c8fd6407d82..d4652e8cc28c 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -9,9 +9,9 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -113,7 +113,6 @@ static const struct drm_driver tidss_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "tidss",
.desc = "TI Keystone DSS",
- .date = "20180215",
.major = 1,
.minor = 0,
};
@@ -140,7 +139,7 @@ static int tidss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tidss);
- spin_lock_init(&tidss->wait_lock);
+ spin_lock_init(&tidss->irq_lock);
ret = dispc_init(tidss);
if (ret) {
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index d7f27b0b0315..7f4f4282bc04 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -29,8 +29,9 @@ struct tidss_device {
unsigned int irq;
- spinlock_t wait_lock; /* protects the irq masks */
- dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
+ /* protects the irq masks field and irqenable/irqstatus registers */
+ spinlock_t irq_lock;
+ dispc_irq_t irq_mask; /* enabled irqs */
};
#define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 604334ef526a..5abc788781f4 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -15,10 +15,9 @@
#include "tidss_irq.h"
#include "tidss_plane.h"
-/* call with wait_lock and dispc runtime held */
static void tidss_irq_update(struct tidss_device *tidss)
{
- assert_spin_locked(&tidss->wait_lock);
+ assert_spin_locked(&tidss->irq_lock);
dispc_set_irqenable(tidss->dispc, tidss->irq_mask);
}
@@ -31,11 +30,11 @@ void tidss_irq_enable_vblank(struct drm_crtc *crtc)
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ spin_lock_irqsave(&tidss->irq_lock, flags);
tidss->irq_mask |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
tidss_irq_update(tidss);
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
+ spin_unlock_irqrestore(&tidss->irq_lock, flags);
}
void tidss_irq_disable_vblank(struct drm_crtc *crtc)
@@ -46,11 +45,11 @@ void tidss_irq_disable_vblank(struct drm_crtc *crtc)
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ spin_lock_irqsave(&tidss->irq_lock, flags);
tidss->irq_mask &= ~(DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
DSS_IRQ_VP_VSYNC_ODD(hw_videoport));
tidss_irq_update(tidss);
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
+ spin_unlock_irqrestore(&tidss->irq_lock, flags);
}
static irqreturn_t tidss_irq_handler(int irq, void *arg)
@@ -60,7 +59,9 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg)
unsigned int id;
dispc_irq_t irqstatus;
+ spin_lock(&tidss->irq_lock);
irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
+ spin_unlock(&tidss->irq_lock);
for (id = 0; id < tidss->num_crtcs; id++) {
struct drm_crtc *crtc = tidss->crtcs[id];
@@ -78,8 +79,13 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg)
tidss_crtc_error_irq(crtc, irqstatus);
}
- if (irqstatus & DSS_IRQ_DEVICE_OCP_ERR)
- dev_err_ratelimited(tidss->dev, "OCP error\n");
+ for (unsigned int i = 0; i < tidss->num_planes; ++i) {
+ struct drm_plane *plane = tidss->planes[i];
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ if (irqstatus & DSS_IRQ_PLANE_FIFO_UNDERFLOW(tplane->hw_plane_id))
+ tidss_plane_error_irq(plane, irqstatus);
+ }
return IRQ_HANDLED;
}
@@ -88,9 +94,9 @@ void tidss_irq_resume(struct tidss_device *tidss)
{
unsigned long flags;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ spin_lock_irqsave(&tidss->irq_lock, flags);
tidss_irq_update(tidss);
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
+ spin_unlock_irqrestore(&tidss->irq_lock, flags);
}
int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
@@ -105,7 +111,7 @@ int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
if (ret)
return ret;
- tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
+ tidss->irq_mask = 0;
for (unsigned int i = 0; i < tidss->num_crtcs; ++i) {
struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
@@ -115,6 +121,12 @@ int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
}
+ for (unsigned int i = 0; i < tidss->num_planes; ++i) {
+ struct tidss_plane *tplane = to_tidss_plane(tidss->planes[i]);
+
+ tidss->irq_mask |= DSS_IRQ_PLANE_FIFO_UNDERFLOW(tplane->hw_plane_id);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tidss/tidss_irq.h b/drivers/gpu/drm/tidss/tidss_irq.h
index b512614d5863..dd61f645f662 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.h
+++ b/drivers/gpu/drm/tidss/tidss_irq.h
@@ -19,15 +19,13 @@
* bit use |D |fou|FEOL|FEOL|FEOL|FEOL| UUUU | |
* bit number|0 |1-3|4-7 |8-11| 12-19 | 20-23 | 24-31 |
*
- * device bits: D = OCP error
+ * device bits: D = Unused
* WB bits: f = frame done wb, o = wb buffer overflow,
* u = wb buffer uncomplete
* vp bits: F = frame done, E = vsync even, O = vsync odd, L = sync lost
* plane bits: U = fifo underflow
*/
-#define DSS_IRQ_DEVICE_OCP_ERR BIT(0)
-
#define DSS_IRQ_DEVICE_FRAMEDONEWB BIT(1)
#define DSS_IRQ_DEVICE_WBBUFFEROVERFLOW BIT(2)
#define DSS_IRQ_DEVICE_WBUNCOMPLETEERROR BIT(3)
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index a5d86822c9e3..116de124bddb 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -18,6 +18,14 @@
#include "tidss_drv.h"
#include "tidss_plane.h"
+void tidss_plane_error_irq(struct drm_plane *plane, u64 irqstatus)
+{
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dev_err_ratelimited(plane->dev->dev, "Plane%u underflow (irq %llx)\n",
+ tplane->hw_plane_id, irqstatus);
+}
+
/* drm_plane_helper_funcs */
static int tidss_plane_atomic_check(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
index e933e158b617..aecaf2728406 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.h
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -22,4 +22,6 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
u32 crtc_mask, const u32 *formats,
u32 num_formats);
+void tidss_plane_error_irq(struct drm_plane *plane, u64 irqstatus);
+
#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 6f0df8d6b90c..7caec4d38ddf 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -13,8 +13,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -481,7 +481,6 @@ static const struct drm_driver tilcdc_driver = {
.fops = &fops,
.name = "tilcdc",
.desc = "TI LCD Controller DRM",
- .date = "20121205",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 4aaf56f8707d..60816d2eb4ff 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index 0cc68042a6d6..2748d1f21d86 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -6,8 +6,9 @@
*/
#include <linux/clk.h>
+
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -289,7 +290,7 @@ static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
* There is only one output port inside each device. It is linked with
* encoder endpoint.
*/
- endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ endpoint_node = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 0, -1);
if (endpoint_node) {
encoder_node = of_graph_get_remote_port_parent(endpoint_node);
of_node_put(endpoint_node);
@@ -366,7 +367,6 @@ static const struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
- .date = "20160219",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 6f91ff1dbf7e..89a699370a59 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -5,9 +5,9 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -680,7 +680,6 @@ static const struct drm_driver bochs_driver = {
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
- .date = "20130925",
.major = 1,
.minor = 0,
DRM_GEM_SHMEM_DRIVER_OPS,
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 4d2adcaeaa60..52ec1e4ea9e5 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -24,10 +24,10 @@
#include <video/cirrus.h>
#include <video/vga.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -46,9 +46,8 @@
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
-#define DRIVER_NAME "cirrus"
+#define DRIVER_NAME "cirrus-qemu"
#define DRIVER_DESC "qemu cirrus vga"
-#define DRIVER_DATE "2019"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
@@ -589,14 +588,14 @@ static int cirrus_pipe_init(struct cirrus_device *cirrus)
encoder = &cirrus->encoder;
ret = drm_encoder_init(dev, encoder, &cirrus_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
connector = &cirrus->connector;
ret = drm_connector_init(dev, connector, &cirrus_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_VIRTUAL);
if (ret)
return ret;
drm_connector_helper_add(connector, &cirrus_connector_helper_funcs);
@@ -659,7 +658,6 @@ static const struct drm_driver cirrus_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 0c17ae532fb4..41e9bfb2e2ff 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -7,9 +7,9 @@
#include <linux/pm.h>
#include <linux/usb.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -34,7 +34,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
#define DRIVER_NAME "gm12u320"
#define DRIVER_DESC "Grain Media GM12U320 USB projector display"
-#define DRIVER_DATE "2019"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -626,7 +625,6 @@ static const struct drm_driver gm12u320_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 6b0d1846cfcf..df263818f45f 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -16,8 +16,8 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -199,7 +199,6 @@ static const struct drm_driver hx8357d_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
.desc = "HX8357D",
- .date = "20181023",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index 5eb39ca1a855..62cadf5e033d 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -7,8 +7,8 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -118,7 +118,6 @@ static struct drm_driver ili9163_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9163",
.desc = "Ilitek ILI9163",
- .date = "20210208",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 875e2d09729a..6de44ff69b51 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -16,8 +16,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
@@ -364,7 +364,6 @@ static const struct drm_driver ili9225_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
- .date = "20171106",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index c1dfdfbbd30c..e55029433509 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -15,8 +15,8 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -155,7 +155,6 @@ static const struct drm_driver ili9341_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
- .date = "20180514",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 7e46a720d5e2..093661c771a0 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -14,8 +14,8 @@
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -177,7 +177,6 @@ static const struct drm_driver ili9486_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
.desc = "Ilitek ILI9486",
- .date = "20200118",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index f1461c55dba6..b6b4664908ae 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -13,8 +13,8 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -159,7 +159,6 @@ static const struct drm_driver mi0283qt_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
- .date = "20160614",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index 9898eab5e9e2..13491c0e704a 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -5,9 +5,9 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
@@ -25,7 +25,6 @@
#define DRIVER_NAME "ofdrm"
#define DRIVER_DESC "DRM driver for OF platform devices"
-#define DRIVER_DATE "20220501"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -1348,7 +1347,6 @@ static struct drm_driver ofdrm_driver = {
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index e66729b31bd6..0460ecaef4bd 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -10,12 +10,13 @@
#include <linux/firmware.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -269,7 +270,6 @@ static const struct drm_driver panel_mipi_dbi_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "panel-mipi-dbi",
.desc = "MIPI DBI compatible display panel",
- .date = "20220103",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 77944eb17b3c..52ba6c699bc8 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -21,8 +21,8 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -917,7 +917,6 @@ static const struct drm_driver repaper_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
- .date = "20170405",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c
index 2d2315bd6aef..03d2850310c4 100644
--- a/drivers/gpu/drm/tiny/sharp-memory.c
+++ b/drivers/gpu/drm/tiny/sharp-memory.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -107,7 +107,6 @@ static const struct drm_driver sharp_memory_drm_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "sharp_memory_display",
.desc = "Sharp Display Memory LCD",
- .date = "20231129",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 4d4f05dee244..5d9ab8adf800 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -10,9 +10,9 @@
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
@@ -31,7 +31,6 @@
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
-#define DRIVER_DATE "20200625"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -1015,7 +1014,6 @@ static struct drm_driver simpledrm_driver = {
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 97013685c62f..a29672d84ede 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -12,8 +12,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
@@ -295,7 +295,6 @@ static const struct drm_driver st7586_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
- .date = "20170801",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 0747ebd999cc..1d60f6e5b3bc 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -16,8 +16,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -160,7 +160,6 @@ static const struct drm_driver st7735r_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
- .date = "20171128",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index 3139fd9128d8..f8f20d2f6174 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -258,13 +258,13 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->priority = bo_prio;
- err = ttm_resource_alloc(bo, place, &res1);
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res1;
/* Add a dummy resource to populate LRU */
- ttm_resource_alloc(bo, place, &res2);
+ ttm_resource_alloc(bo, place, &res2, NULL);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_unreserve(bo);
@@ -300,12 +300,12 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_pin(bo);
- err = ttm_resource_alloc(bo, place, &res1);
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res1;
/* Add a dummy resource to the pinned list */
- err = ttm_resource_alloc(bo, place, &res2);
+ err = ttm_resource_alloc(bo, place, &res2, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
list_is_last(&res2->lru.link, &priv->ttm_dev->unevictable), 1);
@@ -355,7 +355,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
dma_resv_unlock(bo1->base.resv);
- err = ttm_resource_alloc(bo1, place, &res1);
+ err = ttm_resource_alloc(bo1, place, &res1, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo1->resource = res1;
@@ -363,7 +363,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
dma_resv_unlock(bo2->base.resv);
- err = ttm_resource_alloc(bo2, place, &res2);
+ err = ttm_resource_alloc(bo2, place, &res2, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo2->resource = res2;
@@ -401,7 +401,7 @@ static void ttm_bo_put_basic(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->type = ttm_bo_type_device;
- err = ttm_resource_alloc(bo, place, &res);
+ err = ttm_resource_alloc(bo, place, &res, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
@@ -518,7 +518,7 @@ static void ttm_bo_pin_unpin_resource(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
- err = ttm_resource_alloc(bo, place, &res);
+ err = ttm_resource_alloc(bo, place, &res, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
@@ -569,7 +569,7 @@ static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
- err = ttm_resource_alloc(bo, place, &res);
+ err = ttm_resource_alloc(bo, place, &res, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
index 1adf18481ea0..3148f5d3dbd6 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -542,7 +542,7 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
bo->ttm = old_tt;
}
- err = ttm_resource_alloc(bo, place, &bo->resource);
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test, man->usage, size);
@@ -603,7 +603,7 @@ static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
bo->type = params->bo_type;
- err = ttm_resource_alloc(bo, place, &bo->resource);
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
KUNIT_EXPECT_EQ(test, err, 0);
placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
index a9f4b81921c3..e6ea2bd01f07 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -302,7 +302,7 @@ static void ttm_sys_man_free_basic(struct kunit *test)
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
- ttm_resource_alloc(bo, place, &res);
+ ttm_resource_alloc(bo, place, &res, NULL);
man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
man->func->free(man, res);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 48c5365efca1..ea5e49858857 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -42,6 +42,7 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
+#include <linux/cgroup_dmem.h>
#include <linux/dma-resv.h>
#include "ttm_module.h"
@@ -499,6 +500,13 @@ struct ttm_bo_evict_walk {
struct ttm_resource **res;
/** @evicted: Number of successful evictions. */
unsigned long evicted;
+
+ /** @limit_pool: Which pool limit we should test against */
+ struct dmem_cgroup_pool_state *limit_pool;
+ /** @try_low: Whether we should attempt to evict BO's with low watermark threshold */
+ bool try_low;
+ /** @hit_low: If we cannot evict a bo when @try_low is false (first pass) */
+ bool hit_low;
};
static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
@@ -507,6 +515,10 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *
container_of(walk, typeof(*evict_walk), walk);
s64 lret;
+ if (!dmem_cgroup_state_evict_valuable(evict_walk->limit_pool, bo->resource->css,
+ evict_walk->try_low, &evict_walk->hit_low))
+ return 0;
+
if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
return 0;
@@ -524,7 +536,7 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *
evict_walk->evicted++;
if (evict_walk->res)
lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
- evict_walk->res);
+ evict_walk->res, NULL);
if (lret == 0)
return 1;
out:
@@ -545,7 +557,8 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
struct ttm_buffer_object *evictor,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket,
- struct ttm_resource **res)
+ struct ttm_resource **res,
+ struct dmem_cgroup_pool_state *limit_pool)
{
struct ttm_bo_evict_walk evict_walk = {
.walk = {
@@ -556,22 +569,39 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
.place = place,
.evictor = evictor,
.res = res,
+ .limit_pool = limit_pool,
};
s64 lret;
evict_walk.walk.trylock_only = true;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+
+ /* One more attempt if we hit low limit? */
+ if (!lret && evict_walk.hit_low) {
+ evict_walk.try_low = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ }
if (lret || !ticket)
goto out;
+ /* Reset low limit */
+ evict_walk.try_low = evict_walk.hit_low = false;
/* If ticket-locking, repeat while making progress. */
evict_walk.walk.trylock_only = false;
+
+retry:
do {
/* The walk may clear the evict_walk.walk.ticket field */
evict_walk.walk.ticket = ticket;
evict_walk.evicted = 0;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
} while (!lret && evict_walk.evicted);
+
+ /* We hit the low limit? Try once more */
+ if (!lret && evict_walk.hit_low && !evict_walk.try_low) {
+ evict_walk.try_low = true;
+ goto retry;
+ }
out:
if (lret < 0)
return lret;
@@ -689,6 +719,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
+ struct dmem_cgroup_pool_state *limit_pool = NULL;
struct ttm_resource_manager *man;
bool may_evict;
@@ -701,15 +732,20 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
continue;
may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
- ret = ttm_resource_alloc(bo, place, res);
+ ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
if (ret) {
- if (ret != -ENOSPC)
+ if (ret != -ENOSPC && ret != -EAGAIN) {
+ dmem_cgroup_pool_state_put(limit_pool);
return ret;
- if (!may_evict)
+ }
+ if (!may_evict) {
+ dmem_cgroup_pool_state_put(limit_pool);
continue;
+ }
ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
- ticket, res);
+ ticket, res, limit_pool);
+ dmem_cgroup_pool_state_put(limit_pool);
if (ret == -EBUSY)
continue;
if (ret)
@@ -1056,6 +1092,8 @@ struct ttm_bo_swapout_walk {
struct ttm_lru_walk walk;
/** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
gfp_t gfp_flags;
+
+ bool hit_low, evict_low;
};
static s64
@@ -1106,7 +1144,7 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
- ret = ttm_resource_alloc(bo, &place, &evict_mem);
+ ret = ttm_resource_alloc(bo, &place, &evict_mem, NULL);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 2c699ed1963a..a194db83421d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -58,13 +58,13 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
return VM_FAULT_RETRY;
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
mmap_read_unlock(vmf->vma->vm_mm);
(void)dma_resv_wait_timeout(bo->base.resv,
DMA_RESV_USAGE_KERNEL, true,
MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
return VM_FAULT_RETRY;
}
@@ -130,12 +130,12 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
mmap_read_unlock(vmf->vma->vm_mm);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
}
return VM_FAULT_RETRY;
@@ -353,7 +353,7 @@ void ttm_bo_vm_open(struct vm_area_struct *vma)
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
}
EXPORT_SYMBOL(ttm_bo_vm_open);
@@ -361,7 +361,7 @@ void ttm_bo_vm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
vma->vm_private_data = NULL;
}
EXPORT_SYMBOL(ttm_bo_vm_close);
@@ -405,13 +405,25 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
return len;
}
-int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write)
+/**
+ * ttm_bo_access - Helper to access a buffer object
+ *
+ * @bo: ttm buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Utility function to access a buffer object. Useful when buffer object cannot
+ * be easily mapped (non-contiguous, non-visible, etc...). Should not directly
+ * be exported to user space via a peak / poke interface.
+ *
+ * Returns:
+ * @len if successful, negative error code on failure.
+ */
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write)
{
- struct ttm_buffer_object *bo = vma->vm_private_data;
- unsigned long offset = (addr) - vma->vm_start +
- ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
- << PAGE_SHIFT);
int ret;
if (len < 1 || (offset + len) > bo->base.size)
@@ -429,8 +441,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
break;
default:
if (bo->bdev->funcs->access_memory)
- ret = bo->bdev->funcs->access_memory(
- bo, offset, buf, len, write);
+ ret = bo->bdev->funcs->access_memory
+ (bo, offset, buf, len, write);
else
ret = -EIO;
}
@@ -439,6 +451,18 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
+EXPORT_SYMBOL(ttm_bo_access);
+
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ unsigned long offset = (addr) - vma->vm_start +
+ ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
+ << PAGE_SHIFT);
+
+ return ttm_bo_access(bo, offset, buf, len, write);
+}
EXPORT_SYMBOL(ttm_bo_vm_access);
static const struct vm_operations_struct ttm_bo_vm_ops = {
@@ -462,7 +486,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
/*
* Drivers may want to override the vm_ops field. Otherwise we
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index a87665eb28a6..cc29bbf3eabb 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -26,6 +26,7 @@
#include <linux/io-mapping.h>
#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
+#include <linux/cgroup_dmem.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
@@ -350,15 +351,28 @@ EXPORT_SYMBOL(ttm_resource_fini);
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource **res_ptr)
+ struct ttm_resource **res_ptr,
+ struct dmem_cgroup_pool_state **ret_limit_pool)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
+ struct dmem_cgroup_pool_state *pool = NULL;
int ret;
+ if (man->cg) {
+ ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool);
+ if (ret)
+ return ret;
+ }
+
ret = man->func->alloc(man, bo, place, res_ptr);
- if (ret)
+ if (ret) {
+ if (pool)
+ dmem_cgroup_uncharge(pool, bo->base.size);
return ret;
+ }
+
+ (*res_ptr)->css = pool;
spin_lock(&bo->bdev->lru_lock);
ttm_resource_add_bulk_move(*res_ptr, bo);
@@ -370,6 +384,7 @@ EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
{
struct ttm_resource_manager *man;
+ struct dmem_cgroup_pool_state *pool;
if (!*res)
return;
@@ -377,9 +392,13 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
spin_lock(&bo->bdev->lru_lock);
ttm_resource_del_bulk_move(*res, bo);
spin_unlock(&bo->bdev->lru_lock);
+
+ pool = (*res)->css;
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
+ if (man->cg)
+ dmem_cgroup_uncharge(pool, bo->base.size);
}
EXPORT_SYMBOL(ttm_resource_free);
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index c341aee37dd9..a048e37f1c2c 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -37,9 +37,9 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -146,7 +146,6 @@ static const struct drm_driver tve200_drm_driver = {
.fops = &drm_fops,
.name = "tve200",
.desc = DRIVER_DESC,
- .date = "20170703",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 8d8ae40f945c..05b3a152cc33 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -5,8 +5,8 @@
#include <linux/module.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -78,7 +78,6 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 1eb716d9dad5..be00dc1d87a1 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -26,7 +26,6 @@ struct drm_mode_create_dumb;
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
-#define DRIVER_DATE "20120220"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 73ab7dd31b17..bb7815599435 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -13,10 +13,6 @@
* Display engines requiring physically contiguous allocations should
* look into Mesa's "renderonly" support (as used by the Mesa pl111
* driver) for an example of how to integrate with V3D.
- *
- * Long term, we should support evicting pages from the MMU when under
- * memory pressure (thus the v3d_bo_get_pages() refcounting), but
- * that's not a high priority since our systems tend to not have swap.
*/
#include <linux/dma-buf.h>
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 19e3ee7ac897..76816f2551c1 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -237,8 +237,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
if (v3d->ver >= 40) {
int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
- V3D_SET_FIELD(cycle_count_reg,
- V3D_PCTR_S0));
+ V3D_SET_FIELD_VER(cycle_count_reg,
+ V3D_PCTR_S0, v3d->ver));
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
} else {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index bee51c942a56..930737a9347b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -31,7 +31,6 @@
#define DRIVER_NAME "v3d"
#define DRIVER_DESC "Broadcom V3D graphics"
-#define DRIVER_DATE "20180419"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -224,6 +223,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_COUNTER, v3d_perfmon_get_counter_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(V3D_PERFMON_SET_GLOBAL, v3d_perfmon_set_global_ioctl, DRM_RENDER_ALLOW),
};
static const struct drm_driver v3d_drm_driver = {
@@ -248,7 +248,6 @@ static const struct drm_driver v3d_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index de73eefff9ac..dc1cfe2e14be 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -183,6 +183,12 @@ struct v3d_dev {
u32 num_allocated;
u32 pages_allocated;
} bo_stats;
+
+ /* To support a performance analysis tool in user space, we require
+ * a single, globally configured performance monitor (perfmon) for
+ * all jobs.
+ */
+ struct v3d_perfmon *global_perfmon;
};
static inline struct v3d_dev *
@@ -594,6 +600,8 @@ int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_perfmon_set_global_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* v3d_sysfs.c */
int v3d_sysfs_init(struct device *dev);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 20bf33702c3c..da203045df9b 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -108,6 +108,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->bin_job = NULL;
status = IRQ_HANDLED;
}
@@ -118,6 +119,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->render_job = NULL;
status = IRQ_HANDLED;
}
@@ -128,6 +130,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->csd_job = NULL;
status = IRQ_HANDLED;
}
@@ -165,6 +168,7 @@ v3d_hub_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->tfu_job = NULL;
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index 0f564fd7160c..a25d25a8ae61 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -4,7 +4,7 @@
/**
* DOC: Broadcom V3D MMU
*
- * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has
+ * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has
* a single level of page tables for the V3D's 4GB address space to
* map to AXI bus addresses, thus it could need up to 4MB of
* physically contiguous memory to store the PTEs.
@@ -15,14 +15,14 @@
*
* To protect clients from each other, we should use the GMP to
* quickly mask out (at 128kb granularity) what pages are available to
- * each client. This is not yet implemented.
+ * each client. This is not yet implemented.
*/
#include "v3d_drv.h"
#include "v3d_regs.h"
-/* Note: All PTEs for the 1MB superpage must be filled with the
- * superpage bit set.
+/* Note: All PTEs for the 64KB bigpage or 1MB superpage must be filled
+ * with the bigpage/superpage bit set.
*/
#define V3D_PTE_SUPERPAGE BIT(31)
#define V3D_PTE_BIGPAGE BIT(30)
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index 924814cab46a..3ebda2fa46fc 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -240,17 +240,18 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
for (i = 0; i < ncounters; i++) {
u32 source = i / 4;
- u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0);
+ u32 channel = V3D_SET_FIELD_VER(perfmon->counters[i], V3D_PCTR_S0,
+ v3d->ver);
i++;
- channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
- V3D_PCTR_S1);
+ channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
+ V3D_PCTR_S1, v3d->ver);
i++;
- channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
- V3D_PCTR_S2);
+ channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
+ V3D_PCTR_S2, v3d->ver);
i++;
- channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
- V3D_PCTR_S3);
+ channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
+ V3D_PCTR_S3, v3d->ver);
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel);
}
@@ -312,6 +313,9 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
if (perfmon == v3d->active_perfmon)
v3d_perfmon_stop(v3d, perfmon, false);
+ /* If the global perfmon is being destroyed, set it to NULL */
+ cmpxchg(&v3d->global_perfmon, perfmon, NULL);
+
v3d_perfmon_put(perfmon);
return 0;
@@ -383,6 +387,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
{
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_perfmon_destroy *req = data;
+ struct v3d_dev *v3d = v3d_priv->v3d;
struct v3d_perfmon *perfmon;
mutex_lock(&v3d_priv->perfmon.lock);
@@ -392,6 +397,13 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
if (!perfmon)
return -EINVAL;
+ /* If the active perfmon is being destroyed, stop it first */
+ if (perfmon == v3d->active_perfmon)
+ v3d_perfmon_stop(v3d, perfmon, false);
+
+ /* If the global perfmon is being destroyed, set it to NULL */
+ cmpxchg(&v3d->global_perfmon, perfmon, NULL);
+
v3d_perfmon_put(perfmon);
return 0;
@@ -451,3 +463,34 @@ int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
return 0;
}
+
+int v3d_perfmon_set_global_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_perfmon_set_global *req = data;
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_perfmon *perfmon;
+
+ if (req->flags & ~DRM_V3D_PERFMON_CLEAR_GLOBAL)
+ return -EINVAL;
+
+ perfmon = v3d_perfmon_find(v3d_priv, req->id);
+ if (!perfmon)
+ return -EINVAL;
+
+ /* If the request is to clear the global performance monitor */
+ if (req->flags & DRM_V3D_PERFMON_CLEAR_GLOBAL) {
+ if (!v3d->global_perfmon)
+ return -EINVAL;
+
+ xchg(&v3d->global_perfmon, NULL);
+
+ return 0;
+ }
+
+ if (cmpxchg(&v3d->global_perfmon, NULL, perfmon))
+ return -EBUSY;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_performance_counters.h b/drivers/gpu/drm/v3d/v3d_performance_counters.h
index d919a2fc9449..2bc4cce0744a 100644
--- a/drivers/gpu/drm/v3d/v3d_performance_counters.h
+++ b/drivers/gpu/drm/v3d/v3d_performance_counters.h
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2024 Raspberry Pi
*/
+
#ifndef V3D_PERFORMANCE_COUNTERS_H
#define V3D_PERFORMANCE_COUNTERS_H
-/* Holds a description of a given performance counter. The index of performance
- * counter is given by the array on v3d_performance_counter.h
+/* Holds a description of a given performance counter. The index of
+ * performance counter is given by the array on `v3d_performance_counter.c`.
*/
struct v3d_perf_counter_desc {
/* Category of the counter */
@@ -20,15 +21,12 @@ struct v3d_perf_counter_desc {
};
struct v3d_perfmon_info {
- /*
- * Different revisions of V3D have different total number of
+ /* Different revisions of V3D have different total number of
* performance counters.
*/
unsigned int max_counters;
- /*
- * Array of counters valid for the platform.
- */
+ /* Array of counters valid for the platform. */
const struct v3d_perf_counter_desc *counters;
};
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 1b1a62ad9585..6da3c69082bd 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -15,6 +15,14 @@
fieldval & field##_MASK; \
})
+#define V3D_SET_FIELD_VER(value, field, ver) \
+ ({ \
+ typeof(ver) _ver = (ver); \
+ u32 fieldval = (value) << field##_SHIFT(_ver); \
+ WARN_ON((fieldval & ~field##_MASK(_ver)) != 0); \
+ fieldval & field##_MASK(_ver); \
+ })
+
#define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >> \
field##_SHIFT)
@@ -354,18 +362,15 @@
#define V3D_V4_PCTR_0_SRC_28_31 0x0067c
#define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \
4 * (x))
-# define V3D_PCTR_S0_MASK V3D_MASK(6, 0)
-# define V3D_V7_PCTR_S0_MASK V3D_MASK(7, 0)
-# define V3D_PCTR_S0_SHIFT 0
-# define V3D_PCTR_S1_MASK V3D_MASK(14, 8)
-# define V3D_V7_PCTR_S1_MASK V3D_MASK(15, 8)
-# define V3D_PCTR_S1_SHIFT 8
-# define V3D_PCTR_S2_MASK V3D_MASK(22, 16)
-# define V3D_V7_PCTR_S2_MASK V3D_MASK(23, 16)
-# define V3D_PCTR_S2_SHIFT 16
-# define V3D_PCTR_S3_MASK V3D_MASK(30, 24)
-# define V3D_V7_PCTR_S3_MASK V3D_MASK(31, 24)
-# define V3D_PCTR_S3_SHIFT 24
+# define V3D_PCTR_S0_MASK(ver) (((ver) >= 71) ? V3D_MASK(7, 0) : V3D_MASK(6, 0))
+# define V3D_PCTR_S0_SHIFT(ver) 0
+# define V3D_PCTR_S1_MASK(ver) (((ver) >= 71) ? V3D_MASK(15, 8) : V3D_MASK(14, 8))
+# define V3D_PCTR_S1_SHIFT(ver) 8
+# define V3D_PCTR_S2_MASK(ver) (((ver) >= 71) ? V3D_MASK(23, 16) : V3D_MASK(22, 16))
+# define V3D_PCTR_S2_SHIFT(ver) 16
+# define V3D_PCTR_S3_MASK(ver) (((ver) >= 71) ? V3D_MASK(31, 24) : V3D_MASK(30, 24))
+# define V3D_PCTR_S3_SHIFT(ver) 24
+
#define V3D_PCTR_CYCLE_COUNT(ver) ((ver >= 71) ? 0 : 32)
/* Output values of the counters. */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 99ac4995b5a1..da08ddb01d21 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -5,16 +5,16 @@
* DOC: Broadcom V3D scheduling
*
* The shared DRM GPU scheduler is used to coordinate submitting jobs
- * to the hardware. Each DRM fd (roughly a client process) gets its
- * own scheduler entity, which will process jobs in order. The GPU
- * scheduler will round-robin between clients to submit the next job.
+ * to the hardware. Each DRM fd (roughly a client process) gets its
+ * own scheduler entity, which will process jobs in order. The GPU
+ * scheduler will schedule the clients with a FIFO scheduling algorithm.
*
* For simplicity, and in order to keep latency low for interactive
* jobs when bulk background jobs are queued up, we submit a new job
* to the HW only when it has completed the last one, instead of
- * filling up the CT[01]Q FIFOs with jobs. Similarly, we use
- * drm_sched_job_add_dependency() to manage the dependency between bin and
- * render, instead of having the clients submit jobs using the HW's
+ * filling up the CT[01]Q FIFOs with jobs. Similarly, we use
+ * `drm_sched_job_add_dependency()` to manage the dependency between bin
+ * and render, instead of having the clients submit jobs using the HW's
* semaphores to interlock between them.
*/
@@ -120,11 +120,19 @@ v3d_cpu_job_free(struct drm_sched_job *sched_job)
static void
v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
{
- if (job->perfmon != v3d->active_perfmon)
+ struct v3d_perfmon *perfmon = v3d->global_perfmon;
+
+ if (!perfmon)
+ perfmon = job->perfmon;
+
+ if (perfmon == v3d->active_perfmon)
+ return;
+
+ if (perfmon != v3d->active_perfmon)
v3d_perfmon_stop(v3d, v3d->active_perfmon, true);
- if (job->perfmon && v3d->active_perfmon != job->perfmon)
- v3d_perfmon_start(v3d, job->perfmon);
+ if (perfmon && v3d->active_perfmon != perfmon)
+ v3d_perfmon_start(v3d, perfmon);
}
static void
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index d607aa9c4ec2..4ff5de46fb22 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -11,10 +11,11 @@
#include "v3d_trace.h"
/* Takes the reservation lock on all the BOs being referenced, so that
- * at queue submit time we can update the reservations.
+ * we can attach fences and update the reservations after pushing the job
+ * to the queue.
*
* We don't lock the RCL the tile alloc/state BOs, or overflow memory
- * (all of which are on exec->unref_list). They're entirely private
+ * (all of which are on render->unref_list). They're entirely private
* to v3d, so we don't attach dma-buf fences to them.
*/
static int
@@ -55,11 +56,11 @@ fail:
* @bo_count: Number of GEM handles passed in
*
* The command validator needs to reference BOs by their index within
- * the submitted job's BO list. This does the validation of the job's
+ * the submitted job's BO list. This does the validation of the job's
* BO list and reference counting for the lifetime of the job.
*
* Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at v3d_exec_cleanup() time.
+ * failure, because that will happen at `v3d_job_free()`.
*/
static int
v3d_lookup_bos(struct drm_device *dev,
@@ -981,6 +982,11 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
goto fail;
if (args->perfmon_id) {
+ if (v3d->global_perfmon) {
+ ret = -EAGAIN;
+ goto fail_perfmon;
+ }
+
render->base.perfmon = v3d_perfmon_find(v3d_priv,
args->perfmon_id);
@@ -1196,6 +1202,11 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
goto fail;
if (args->perfmon_id) {
+ if (v3d->global_perfmon) {
+ ret = -EAGAIN;
+ goto fail_perfmon;
+ }
+
job->base.perfmon = v3d_perfmon_find(v3d_priv,
args->perfmon_id);
if (!job->base.perfmon) {
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index a536c467e2b2..bb861f0a0a31 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -13,8 +13,8 @@
#include <linux/pci.h>
#include <linux/vt_kern.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_file.h>
@@ -189,7 +189,6 @@ static const struct drm_driver driver = {
.fops = &vbox_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index e77bd6512eb1..dfa935f381a6 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -25,7 +25,6 @@
#define DRIVER_NAME "vboxvideo"
#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
-#define DRIVER_DATE "20130823"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index c5f30b317698..6cc7b7e6294a 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -10,6 +10,7 @@ config DRM_VC4
depends on COMMON_CLK
depends on PM
select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
select DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c
index 6527fb1db71e..e276a957b01c 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
@@ -51,8 +51,8 @@ struct vc4_mock_desc {
static const struct vc4_mock_desc vc4_mock =
VC4_MOCK_DESC(
- VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
- VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ VC4_MOCK_CRTC_DESC(&bcm2835_txp_data.base,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP0,
DRM_MODE_ENCODER_VIRTUAL,
DRM_MODE_CONNECTOR_WRITEBACK)),
VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv0_data,
@@ -77,8 +77,8 @@ static const struct vc4_mock_desc vc4_mock =
static const struct vc4_mock_desc vc5_mock =
VC4_MOCK_DESC(
- VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
- VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ VC4_MOCK_CRTC_DESC(&bcm2835_txp_data.base,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP0,
DRM_MODE_ENCODER_VIRTUAL,
DRM_MODE_CONNECTOR_WRITEBACK)),
VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv0_data,
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 61622e951031..40a05869a50e 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -90,7 +90,7 @@ static const struct encoder_constraint vc4_encoder_constraints[] = {
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 1),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
- ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP0, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 2),
};
@@ -98,7 +98,7 @@ static const struct encoder_constraint vc5_encoder_constraints[] = {
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DPI, 0),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
- ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 0, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP0, 0, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 0, 1, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 0, 1, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI1, 0, 1, 2),
@@ -207,7 +207,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("1 output: DSI1",
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("1 output: TXP",
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: DSI0, HDMI0",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0),
@@ -219,7 +219,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: DSI0, TXP",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: DPI, HDMI0",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0),
@@ -231,19 +231,19 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: DPI, TXP",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: HDMI0, DSI1",
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: VEC, DSI1",
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: VEC, TXP",
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0,
@@ -251,7 +251,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
@@ -259,7 +259,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0,
@@ -267,7 +267,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
@@ -275,7 +275,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
};
KUNIT_ARRAY_PARAM(vc4_test_pv_muxing,
@@ -287,7 +287,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_DSI0),
VC4_PV_MUXING_TEST("TXP/DSI1 Conflict",
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("HDMI0/VEC Conflict",
VC4_ENCODER_TYPE_HDMI0,
@@ -296,22 +296,22 @@ static const struct pv_muxing_param vc4_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("More than 3 outputs: DPI, HDMI0, DSI1, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
};
KUNIT_ARRAY_PARAM(vc4_test_pv_muxing_invalid,
@@ -342,7 +342,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("2 outputs: DPI, TXP",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: DPI, VEC",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC),
@@ -360,7 +360,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("2 outputs: DSI0, TXP",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: DSI0, VEC",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC),
@@ -372,7 +372,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("2 outputs: DSI1, TXP",
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
@@ -384,7 +384,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: HDMI0, HDMI1",
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
@@ -393,14 +393,14 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("2 outputs: HDMI1, TXP",
VC4_ENCODER_TYPE_HDMI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: TXP, VEC",
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
@@ -415,15 +415,15 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, DSI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI0",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DPI, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
@@ -440,7 +440,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
@@ -455,15 +455,15 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, DSI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI0",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DSI0, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
@@ -490,17 +490,17 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
@@ -519,17 +519,17 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, DSI1, HDMI0, HDMI1",
@@ -540,19 +540,19 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0, HDMI1",
@@ -563,24 +563,24 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
@@ -599,17 +599,17 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, DSI1, HDMI0, HDMI1",
@@ -620,19 +620,19 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0, HDMI1",
@@ -643,27 +643,27 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: VEC, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index ee82a959d279..cf40a53ad42e 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -83,13 +83,22 @@ static unsigned int
vc4_crtc_get_cob_allocation(struct vc4_dev *vc4, unsigned int channel)
{
struct vc4_hvs *hvs = vc4->hvs;
- u32 dispbase = HVS_READ(SCALER_DISPBASEX(channel));
+ u32 dispbase, top, base;
+
/* Top/base are supposed to be 4-pixel aligned, but the
* Raspberry Pi firmware fills the low bits (which are
* presumably ignored).
*/
- u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
- u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+
+ if (vc4->gen >= VC4_GEN_6_C) {
+ dispbase = HVS_READ(SCALER6_DISPX_COB(channel));
+ top = VC4_GET_FIELD(dispbase, SCALER6_DISPX_COB_TOP) & ~3;
+ base = VC4_GET_FIELD(dispbase, SCALER6_DISPX_COB_BASE) & ~3;
+ } else {
+ dispbase = HVS_READ(SCALER_DISPBASEX(channel));
+ top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
+ base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+ }
return top - base + 4;
}
@@ -122,7 +131,10 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
* Read vertical scanline which is currently composed for our
* pixelvalve by the HVS, and also the scaler status.
*/
- val = HVS_READ(SCALER_DISPSTATX(channel));
+ if (vc4->gen >= VC4_GEN_6_C)
+ val = HVS_READ(SCALER6_DISPX_STATUS(channel));
+ else
+ val = HVS_READ(SCALER_DISPSTATX(channel));
/* Get optional system timestamp after query. */
if (etime)
@@ -131,7 +143,12 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
/* Vertical position of hvs composed scanline. */
- *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ *vpos = VC4_GET_FIELD(val, SCALER6_DISPX_STATUS_YLINE);
+ else
+ *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
*hpos = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
@@ -223,6 +240,11 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
+
+ /*
+ * NOTE: Could we use register 0x68 (PV_HW_CFG1) to get the FIFO
+ * size?
+ */
u32 fifo_len_bytes = pv_data->fifo_depth;
/*
@@ -404,6 +426,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
*/
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
+ (vc4->gen >= VC4_GEN_6_C ? PV_VCONTROL_ODD_TIMING : 0) |
(is_dsi ? PV_VCONTROL_DSI : 0) |
PV_VCONTROL_INTERLACE |
(odd_field_first
@@ -415,6 +438,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
} else {
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
+ (vc4->gen >= VC4_GEN_6_C ? PV_VCONTROL_ODD_TIMING : 0) |
(is_dsi ? PV_VCONTROL_DSI : 0));
CRTC_WRITE(PV_VSYNCD_EVEN, 0);
}
@@ -429,11 +453,17 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
- if (vc4->gen == VC4_GEN_5)
+ if (vc4->gen >= VC4_GEN_5)
CRTC_WRITE(PV_MUX_CFG,
VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
+ if (vc4->gen >= VC4_GEN_6_C)
+ CRTC_WRITE(PV_PIPE_INIT_CTRL,
+ VC4_SET_FIELD(1, PV_PIPE_INIT_CTRL_PV_INIT_WIDTH) |
+ VC4_SET_FIELD(1, PV_PIPE_INIT_CTRL_PV_INIT_IDLE) |
+ PV_PIPE_INIT_CTRL_PV_INIT_EN);
+
CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR |
vc4_crtc_get_fifo_full_level_bits(vc4_crtc, format) |
VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
@@ -459,8 +489,10 @@ static void require_hvs_enabled(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
- WARN_ON_ONCE((HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE) !=
- SCALER_DISPCTRL_ENABLE);
+ if (vc4->gen >= VC4_GEN_6_C)
+ WARN_ON_ONCE(!(HVS_READ(SCALER6_CONTROL) & SCALER6_CONTROL_HVS_EN));
+ else
+ WARN_ON_ONCE(!(HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE));
}
static int vc4_crtc_disable(struct drm_crtc *crtc,
@@ -530,7 +562,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
"brcm,bcm2711-pixelvalve2") ||
of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
- "brcm,bcm2711-pixelvalve4")))
+ "brcm,bcm2711-pixelvalve4") ||
+ of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
+ "brcm,bcm2712-pixelvalve0") ||
+ of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
+ "brcm,bcm2712-pixelvalve1")))
return 0;
if (!(CRTC_READ(PV_CONTROL) & PV_CONTROL_EN))
@@ -789,14 +825,21 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
+ unsigned int current_dlist;
u32 chan = vc4_crtc->current_hvs_channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
spin_lock(&vc4_crtc->irq_lock);
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ current_dlist = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_DL(chan)),
+ SCALER6_DISPX_DL_LACT);
+ else
+ current_dlist = HVS_READ(SCALER_DISPLACTX(chan));
+
if (vc4_crtc->event &&
- (vc4_crtc->current_dlist == HVS_READ(SCALER_DISPLACTX(chan)) ||
- vc4_crtc->feeds_txp)) {
+ (vc4_crtc->current_dlist == current_dlist || vc4_crtc->feeds_txp)) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
@@ -807,7 +850,8 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
* the CRTC and encoder already reconfigured, leading to
* underruns. This can be seen when reconfiguring the CRTC.
*/
- vc4_hvs_unmask_underrun(hvs, chan);
+ if (vc4->gen < VC4_GEN_6_C)
+ vc4_hvs_unmask_underrun(hvs, chan);
}
spin_unlock(&vc4_crtc->irq_lock);
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -1265,6 +1309,32 @@ const struct vc4_pv_data bcm2711_pv4_data = {
},
};
+const struct vc4_pv_data bcm2712_pv0_data = {
+ .base = {
+ .debugfs_name = "crtc0_regs",
+ .hvs_available_channels = BIT(0),
+ .hvs_output = 0,
+ },
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_HDMI0,
+ },
+};
+
+const struct vc4_pv_data bcm2712_pv1_data = {
+ .base = {
+ .debugfs_name = "crtc1_regs",
+ .hvs_available_channels = BIT(1),
+ .hvs_output = 1,
+ },
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_HDMI1,
+ },
+};
+
static const struct of_device_id vc4_crtc_dt_match[] = {
{ .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data },
{ .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data },
@@ -1274,6 +1344,8 @@ static const struct of_device_id vc4_crtc_dt_match[] = {
{ .compatible = "brcm,bcm2711-pixelvalve2", .data = &bcm2711_pv2_data },
{ .compatible = "brcm,bcm2711-pixelvalve3", .data = &bcm2711_pv3_data },
{ .compatible = "brcm,bcm2711-pixelvalve4", .data = &bcm2711_pv4_data },
+ { .compatible = "brcm,bcm2712-pixelvalve0", .data = &bcm2712_pv0_data },
+ { .compatible = "brcm,bcm2712-pixelvalve1", .data = &bcm2712_pv1_data },
{}
};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 2c60d37275b0..c7cb1e3a6434 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -31,8 +31,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -47,7 +47,6 @@
#define DRIVER_NAME "vc4"
#define DRIVER_DESC "Broadcom VC4 graphics"
-#define DRIVER_DATE "20140616"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -222,7 +221,6 @@ const struct drm_driver vc4_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -244,7 +242,6 @@ const struct drm_driver vc5_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -279,6 +276,7 @@ static void vc4_component_unbind_all(void *ptr)
static const struct of_device_id vc4_dma_range_matches[] = {
{ .compatible = "brcm,bcm2711-hvs" },
+ { .compatible = "brcm,bcm2712-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
{ .compatible = "brcm,bcm2835-v3d" },
{ .compatible = "brcm,cygnus-v3d" },
@@ -300,16 +298,18 @@ static int vc4_drm_bind(struct device *dev)
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- if (of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"))
- gen = VC4_GEN_5;
- else
- gen = VC4_GEN_4;
+ gen = (enum vc4_gen)of_device_get_match_data(dev);
if (gen > VC4_GEN_4)
driver = &vc5_drm_driver;
else
driver = &vc4_drm_driver;
+ if (gen >= VC4_GEN_6_C)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+ else
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
node = of_find_matching_node_and_match(NULL, vc4_dma_range_matches,
NULL);
if (node) {
@@ -462,9 +462,11 @@ static void vc4_platform_drm_shutdown(struct platform_device *pdev)
}
static const struct of_device_id vc4_of_match[] = {
- { .compatible = "brcm,bcm2711-vc5", },
- { .compatible = "brcm,bcm2835-vc4", },
- { .compatible = "brcm,cygnus-vc4", },
+ { .compatible = "brcm,bcm2711-vc5", .data = (void *)VC4_GEN_5 },
+ /* NB GEN_6_C will be corrected on D0 hw to GEN_6_D via vc4_hvs_bind */
+ { .compatible = "brcm,bcm2712-vc6", .data = (void *)VC4_GEN_6_C },
+ { .compatible = "brcm,bcm2835-vc4", .data = (void *)VC4_GEN_4 },
+ { .compatible = "brcm,cygnus-vc4", .data = (void *)VC4_GEN_4 },
{},
};
MODULE_DEVICE_TABLE(of, vc4_of_match);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index c6be1997f1c7..4a078ffd9f82 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -84,6 +84,8 @@ struct vc4_perfmon {
enum vc4_gen {
VC4_GEN_4,
VC4_GEN_5,
+ VC4_GEN_6_C,
+ VC4_GEN_6_D,
};
struct vc4_dev {
@@ -316,6 +318,21 @@ struct vc4_v3d {
struct debugfs_regset32 regset;
};
+#define VC4_NUM_UPM_HANDLES 32
+struct vc4_upm_refcounts {
+ refcount_t refcount;
+
+ /* Allocation size */
+ size_t size;
+ /* Our allocation in UPM for prefetching. */
+ struct drm_mm_node upm;
+
+ /* Pointer back to the HVS structure */
+ struct vc4_hvs *hvs;
+};
+
+#define HVS_NUM_CHANNELS 3
+
struct vc4_hvs {
struct vc4_dev *vc4;
struct platform_device *pdev;
@@ -324,6 +341,7 @@ struct vc4_hvs {
unsigned int dlist_mem_size;
struct clk *core_clk;
+ struct clk *disp_clk;
unsigned long max_core_rate;
@@ -331,8 +349,15 @@ struct vc4_hvs {
* list. Units are dwords.
*/
struct drm_mm dlist_mm;
+
/* Memory manager for the LBM memory used by HVS scaling. */
struct drm_mm lbm_mm;
+
+ /* Memory manager for the UPM memory used for prefetching. */
+ struct drm_mm upm_mm;
+ struct ida upm_handles;
+ struct vc4_upm_refcounts upm_refcounts[VC4_NUM_UPM_HANDLES + 1];
+
spinlock_t mm_lock;
struct drm_mm_node mitchell_netravali_filter;
@@ -355,6 +380,7 @@ struct vc4_hvs {
};
#define HVS_NUM_CHANNELS 3
+#define HVS_UBM_WORD_SIZE 256
struct vc4_hvs_state {
struct drm_private_state base;
@@ -424,6 +450,12 @@ struct vc4_plane_state {
/* Our allocation in LBM for temporary storage during scaling. */
struct drm_mm_node lbm;
+ /* The Unified Pre-Fetcher Handle */
+ unsigned int upm_handle[DRM_FORMAT_MAX_PLANES];
+
+ /* Number of lines to pre-fetch */
+ unsigned int upm_buffer_lines;
+
/* Set when the plane has per-pixel alpha content or does not cover
* the entire screen. This is a hint to the CRTC that it might need
* to enable background color fill.
@@ -458,7 +490,8 @@ enum vc4_encoder_type {
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_SMI,
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
+ VC4_ENCODER_TYPE_TXP1,
};
struct vc4_encoder {
@@ -505,7 +538,16 @@ struct vc4_crtc_data {
int hvs_output;
};
-extern const struct vc4_crtc_data vc4_txp_crtc_data;
+struct vc4_txp_data {
+ struct vc4_crtc_data base;
+ enum vc4_encoder_type encoder_type;
+ unsigned int high_addr_ptr_reg;
+ unsigned int has_byte_enable:1;
+ unsigned int size_minus_one:1;
+ unsigned int supports_40bit_addresses:1;
+};
+
+extern const struct vc4_txp_data bcm2835_txp_data;
struct vc4_pv_data {
struct vc4_crtc_data base;
@@ -527,6 +569,8 @@ extern const struct vc4_pv_data bcm2711_pv1_data;
extern const struct vc4_pv_data bcm2711_pv2_data;
extern const struct vc4_pv_data bcm2711_pv3_data;
extern const struct vc4_pv_data bcm2711_pv4_data;
+extern const struct vc4_pv_data bcm2712_pv0_data;
+extern const struct vc4_pv_data bcm2712_pv1_data;
struct vc4_crtc {
struct drm_crtc base;
@@ -637,6 +681,12 @@ struct vc4_crtc_state {
writel(val, hvs->regs + (offset)); \
} while (0)
+#define HVS_READ6(offset) \
+ HVS_READ(hvs->vc4->gen == VC4_GEN_6_C ? SCALER6_ ## offset : SCALER6D_ ## offset)
+
+#define HVS_WRITE6(offset, val) \
+ HVS_WRITE(hvs->vc4->gen == VC4_GEN_6_C ? SCALER6_ ## offset : SCALER6D_ ## offset, val)
+
#define VC4_REG32(reg) { .name = #reg, .offset = reg }
struct vc4_exec_info {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index e3818c48c9b8..47d9ada98430 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -31,6 +31,7 @@
* encoder block has CEC support.
*/
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/display/drm_scdc_helper.h>
@@ -383,7 +384,6 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
enum drm_connector_status status)
{
struct drm_connector *connector = &vc4_hdmi->connector;
- const struct drm_edid *drm_edid;
int ret;
/*
@@ -405,17 +405,14 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
return;
}
- drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
- drm_edid_connector_update(connector, drm_edid);
cec_s_phys_addr(vc4_hdmi->cec_adap,
connector->display_info.source_physical_address, false);
- if (!drm_edid)
+ if (status != connector_status_connected)
return;
- drm_edid_free(drm_edid);
-
for (;;) {
ret = vc4_hdmi_reset_link(connector, ctx);
if (ret == -EDEADLK) {
@@ -470,31 +467,10 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
- const struct drm_edid *drm_edid;
int ret = 0;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in reentrancy issues since cec_s_phys_addr() might call
- * .adap_enable, which leads to that funtion being called with our mutex
- * held.
- *
- * Concurrency isn't an issue at the moment since we don't share
- * any state with any of the other frameworks so we can ignore
- * the lock for now.
- */
-
- drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
- drm_edid_connector_update(connector, drm_edid);
- cec_s_phys_addr(vc4_hdmi->cec_adap,
- connector->display_info.source_physical_address, false);
- if (!drm_edid)
- return 0;
-
ret = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
if (!vc4->hvs->vc5_hdmi_enable_hdmi_20) {
struct drm_device *drm = connector->dev;
@@ -570,6 +546,7 @@ static void vc4_hdmi_connector_reset(struct drm_connector *connector)
}
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
+ .force = drm_atomic_helper_connector_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = vc4_hdmi_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -580,9 +557,11 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs =
.detect_ctx = vc4_hdmi_connector_detect_ctx,
.get_modes = vc4_hdmi_connector_get_modes,
.atomic_check = vc4_hdmi_connector_atomic_check,
+ .mode_valid = drm_hdmi_connector_mode_valid,
};
static const struct drm_connector_hdmi_funcs vc4_hdmi_hdmi_connector_funcs;
+static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs;
static int vc4_hdmi_connector_init(struct drm_device *dev,
struct vc4_hdmi *vc4_hdmi)
@@ -608,6 +587,12 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
+ ret = drm_connector_hdmi_audio_init(connector, dev->dev,
+ &vc4_hdmi_audio_funcs,
+ 8, false, -1);
+ if (ret)
+ return ret;
+
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -845,6 +830,7 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
unsigned long flags;
int idx;
@@ -861,14 +847,25 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_CLRRGB);
+ if (vc4->gen >= VC4_GEN_6_C)
+ HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) |
+ VC4_HD_VID_CTL_BLANKPIX);
+
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
mdelay(1);
- spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- HDMI_WRITE(HDMI_VID_CTL,
- HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
- spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ /*
+ * TODO: This should work on BCM2712, but doesn't for some
+ * reason and result in a system lockup.
+ */
+ if (vc4->gen < VC4_GEN_6_C) {
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ HDMI_WRITE(HDMI_VID_CTL,
+ HDMI_READ(HDMI_VID_CTL) &
+ ~VC4_HD_VID_CTL_ENABLE);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ }
vc4_hdmi_disable_scrambling(encoder);
@@ -1488,7 +1485,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
goto err_put_runtime_pm;
}
-
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (tmds_char_rate > 297000000)
@@ -1594,6 +1590,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
+ (HDMI_READ(HDMI_VID_CTL) &
+ ~(VC4_HD_VID_CTL_VSYNC_LOW | VC4_HD_VID_CTL_HSYNC_LOW)) |
VC4_HD_VID_CTL_ENABLE |
VC4_HD_VID_CTL_CLRRGB |
VC4_HD_VID_CTL_UNDERFLOW_ENABLE |
@@ -1752,7 +1750,6 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- unsigned long long rate;
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
!(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
@@ -1760,8 +1757,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
- rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
- return vc4_hdmi_connector_clock_valid(&vc4_hdmi->connector, mode, rate);
+ return MODE_OK;
}
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
@@ -1909,9 +1905,9 @@ static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi)
return true;
}
-static int vc4_hdmi_audio_startup(struct device *dev, void *data)
+static int vc4_hdmi_audio_startup(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int ret = 0;
@@ -1973,9 +1969,9 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
-static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
+static void vc4_hdmi_audio_shutdown(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
@@ -2045,13 +2041,12 @@ static int sample_rate_to_mai_fmt(int samplerate)
}
/* HDMI audio codec callbacks */
-static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
+static int vc4_hdmi_audio_prepare(struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
- struct drm_connector *connector = &vc4_hdmi->connector;
struct vc4_dev *vc4 = to_vc4_dev(drm);
unsigned int sample_rate = params->sample_rate;
unsigned int channels = params->channels;
@@ -2063,7 +2058,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
int ret = 0;
int idx;
- dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
+ dev_dbg(&vc4_hdmi->pdev->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
sample_rate, params->sample_width, channels);
mutex_lock(&vc4_hdmi->mutex);
@@ -2110,18 +2105,33 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
VC4_HDMI_AUDIO_PACKET_CEA_MASK);
/* Set the MAI threshold */
- if (vc4->gen >= VC4_GEN_5)
+ switch (vc4->gen) {
+ case VC4_GEN_6_D:
+ HDMI_WRITE(HDMI_MAI_THR,
+ VC4_SET_FIELD(0x10, VC6_D_HD_MAI_THR_PANICHIGH) |
+ VC4_SET_FIELD(0x10, VC6_D_HD_MAI_THR_PANICLOW) |
+ VC4_SET_FIELD(0x1c, VC6_D_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x1c, VC6_D_HD_MAI_THR_DREQLOW));
+ break;
+ case VC4_GEN_6_C:
+ case VC4_GEN_5:
HDMI_WRITE(HDMI_MAI_THR,
VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQHIGH) |
VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQLOW));
- else
+ break;
+ case VC4_GEN_4:
HDMI_WRITE(HDMI_MAI_THR,
VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICHIGH) |
VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICLOW) |
VC4_SET_FIELD(0x6, VC4_HD_MAI_THR_DREQHIGH) |
VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_DREQLOW));
+ break;
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
+ break;
+ }
HDMI_WRITE(HDMI_MAI_CONFIG,
VC4_HDMI_MAI_CONFIG_BIT_REVERSE |
@@ -2187,40 +2197,12 @@ static const struct snd_dmaengine_pcm_config pcm_conf = {
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
};
-static int vc4_hdmi_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &vc4_hdmi->connector;
-
- mutex_lock(&vc4_hdmi->mutex);
- memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
- mutex_unlock(&vc4_hdmi->mutex);
-
- return 0;
-}
-
-static const struct hdmi_codec_ops vc4_hdmi_codec_ops = {
- .get_eld = vc4_hdmi_audio_get_eld,
+static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = {
+ .startup = vc4_hdmi_audio_startup,
.prepare = vc4_hdmi_audio_prepare,
- .audio_shutdown = vc4_hdmi_audio_shutdown,
- .audio_startup = vc4_hdmi_audio_startup,
-};
-
-static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
- .ops = &vc4_hdmi_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
+ .shutdown = vc4_hdmi_audio_shutdown,
};
-static void vc4_hdmi_audio_codec_release(void *ptr)
-{
- struct vc4_hdmi *vc4_hdmi = ptr;
-
- platform_device_unregister(vc4_hdmi->audio.codec_pdev);
- vc4_hdmi->audio.codec_pdev = NULL;
-}
-
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2228,7 +2210,6 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
struct snd_soc_dai_link *dai_link = &vc4_hdmi->audio.link;
struct snd_soc_card *card = &vc4_hdmi->audio.card;
struct device *dev = &vc4_hdmi->pdev->dev;
- struct platform_device *codec_pdev;
const __be32 *addr;
int index, len;
int ret;
@@ -2321,20 +2302,6 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
return ret;
}
- codec_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &vc4_hdmi_codec_pdata,
- sizeof(vc4_hdmi_codec_pdata));
- if (IS_ERR(codec_pdev)) {
- dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev));
- return PTR_ERR(codec_pdev);
- }
- vc4_hdmi->audio.codec_pdev = codec_pdev;
-
- ret = devm_add_action_or_reset(dev, vc4_hdmi_audio_codec_release, vc4_hdmi);
- if (ret)
- return ret;
-
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
@@ -2347,7 +2314,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dai_link->stream_name = "MAI PCM";
dai_link->codecs->dai_name = "i2s-hifi";
dai_link->cpus->dai_name = dev_name(dev);
- dai_link->codecs->name = dev_name(&codec_pdev->dev);
+ dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev);
dai_link->platforms->name = dev_name(dev);
card->dai_link = dai_link;
@@ -3121,6 +3088,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ clk_disable_unprepare(vc4_hdmi->audio_clock);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
@@ -3153,6 +3121,10 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
goto err_disable_clk;
}
+ ret = clk_prepare_enable(vc4_hdmi->audio_clock);
+ if (ret)
+ goto err_disable_clk;
+
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
@@ -3273,7 +3245,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return ret;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
- of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
+ of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1") ||
+ of_device_is_compatible(dev->of_node, "brcm,bcm2712-hdmi0") ||
+ of_device_is_compatible(dev->of_node, "brcm,bcm2712-hdmi1")) &&
HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
clk_prepare_enable(vc4_hdmi->pixel_clock);
clk_prepare_enable(vc4_hdmi->hsm_clock);
@@ -3407,10 +3381,66 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.hp_detect = vc5_hdmi_hp_detect,
};
+static const struct vc4_hdmi_variant bcm2712_hdmi0_variant = {
+ .encoder_type = VC4_ENCODER_TYPE_HDMI0,
+ .debugfs_name = "hdmi0_regs",
+ .card_name = "vc4-hdmi-0",
+ .max_pixel_clock = 600000000,
+ .registers = vc6_hdmi_hdmi0_fields,
+ .num_registers = ARRAY_SIZE(vc6_hdmi_hdmi0_fields),
+ .phy_lane_mapping = {
+ PHY_LANE_0,
+ PHY_LANE_1,
+ PHY_LANE_2,
+ PHY_LANE_CK,
+ },
+ .unsupported_odd_h_timings = false,
+ .external_irq_controller = true,
+
+ .init_resources = vc5_hdmi_init_resources,
+ .csc_setup = vc5_hdmi_csc_setup,
+ .reset = vc5_hdmi_reset,
+ .set_timings = vc5_hdmi_set_timings,
+ .phy_init = vc6_hdmi_phy_init,
+ .phy_disable = vc6_hdmi_phy_disable,
+ .channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
+};
+
+static const struct vc4_hdmi_variant bcm2712_hdmi1_variant = {
+ .encoder_type = VC4_ENCODER_TYPE_HDMI1,
+ .debugfs_name = "hdmi1_regs",
+ .card_name = "vc4-hdmi-1",
+ .max_pixel_clock = 600000000,
+ .registers = vc6_hdmi_hdmi1_fields,
+ .num_registers = ARRAY_SIZE(vc6_hdmi_hdmi1_fields),
+ .phy_lane_mapping = {
+ PHY_LANE_0,
+ PHY_LANE_1,
+ PHY_LANE_2,
+ PHY_LANE_CK,
+ },
+ .unsupported_odd_h_timings = false,
+ .external_irq_controller = true,
+
+ .init_resources = vc5_hdmi_init_resources,
+ .csc_setup = vc5_hdmi_csc_setup,
+ .reset = vc5_hdmi_reset,
+ .set_timings = vc5_hdmi_set_timings,
+ .phy_init = vc6_hdmi_phy_init,
+ .phy_disable = vc6_hdmi_phy_disable,
+ .channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
+};
+
static const struct of_device_id vc4_hdmi_dt_match[] = {
{ .compatible = "brcm,bcm2835-hdmi", .data = &bcm2835_variant },
{ .compatible = "brcm,bcm2711-hdmi0", .data = &bcm2711_hdmi0_variant },
{ .compatible = "brcm,bcm2711-hdmi1", .data = &bcm2711_hdmi1_variant },
+ { .compatible = "brcm,bcm2712-hdmi0", .data = &bcm2712_hdmi0_variant },
+ { .compatible = "brcm,bcm2712-hdmi1", .data = &bcm2712_hdmi1_variant },
{}
};
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index b37f1d2c3fe5..e3d989ca302b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -104,8 +104,6 @@ struct vc4_hdmi_audio {
struct snd_soc_dai_link_component codec;
struct snd_soc_dai_link_component platform;
struct snd_dmaengine_dai_dma_data dma_data;
- struct hdmi_audio_infoframe infoframe;
- struct platform_device *codec_pdev;
bool streaming;
};
@@ -237,4 +235,8 @@ void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
+void vc6_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *conn_state);
+void vc6_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
+
#endif /* _VC4_HDMI_H_ */
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
index 1f5507fc7a03..56e6a35da357 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -125,6 +125,48 @@
#define VC4_HDMI_RM_FORMAT_SHIFT_SHIFT 24
#define VC4_HDMI_RM_FORMAT_SHIFT_MASK VC4_MASK(25, 24)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BG_PWRUP BIT(8)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_LDO_PWRUP BIT(7)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BIAS_PWRUP BIT(6)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_RNDGEN_PWRUP BIT(4)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_CK_PWRUP BIT(3)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_2_PWRUP BIT(2)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_1_PWRUP BIT(1)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_0_PWRUP BIT(0)
+
+#define VC6_HDMI_TX_PHY_PLL_REFCLK_REFCLK_SEL_CMOS BIT(13)
+#define VC6_HDMI_TX_PHY_PLL_REFCLK_REFFRQ_MASK VC4_MASK(9, 0)
+
+#define VC6_HDMI_TX_PHY_PLL_POST_KDIV_CLK0_SEL_MASK VC4_MASK(3, 2)
+#define VC6_HDMI_TX_PHY_PLL_POST_KDIV_KDIV_MASK VC4_MASK(1, 0)
+
+#define VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV_EN BIT(10)
+#define VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV_MASK VC4_MASK(9, 0)
+
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL_MASK VC4_MASK(31, 28)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE_MASK VC4_MASK(27, 27)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL_MASK VC4_MASK(26, 26)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN_MASK VC4_MASK(25, 25)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL_MASK VC4_MASK(24, 23)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN_MASK VC4_MASK(22, 22)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL_MASK VC4_MASK(21, 21)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN_MASK VC4_MASK(20, 20)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL_MASK VC4_MASK(19, 18)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN_MASK VC4_MASK(17, 17)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN_MASK VC4_MASK(16, 16)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL_MASK VC4_MASK(15, 12)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN_MASK VC4_MASK(11, 11)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT_MASK VC4_MASK(10, 8)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT_MASK VC4_MASK(7, 5)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING_MASK VC4_MASK(4, 3)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING_MASK VC4_MASK(2, 1)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN_MASK VC4_MASK(0, 0)
+
+#define VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_PLLPOST_RESETB BIT(1)
+#define VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_RESETB BIT(0)
+
+#define VC6_HDMI_TX_PHY_PLL_POWERUP_CTL_PLL_PWRUP BIT(0)
+
#define OSCILLATOR_FREQUENCY 54000000
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
@@ -558,3 +600,601 @@ void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
+
+#define VC6_VCO_MIN_FREQ (8ULL * 1000 * 1000 * 1000)
+#define VC6_VCO_MAX_FREQ (12ULL * 1000 * 1000 * 1000)
+
+static unsigned long long
+vc6_phy_get_vco_freq(unsigned long long tmds_rate, unsigned int *vco_div)
+{
+ unsigned int min_div;
+ unsigned int max_div;
+ unsigned int div;
+
+ div = 0;
+ while (tmds_rate * div * 10 < VC6_VCO_MIN_FREQ)
+ div++;
+ min_div = div;
+
+ while (tmds_rate * (div + 1) * 10 < VC6_VCO_MAX_FREQ)
+ div++;
+ max_div = div;
+
+ div = min_div + (max_div - min_div) / 2;
+
+ *vco_div = div;
+ return tmds_rate * div * 10;
+}
+
+struct vc6_phy_lane_settings {
+ unsigned int ext_current_ctl:4;
+ unsigned int ffe_enable:1;
+ unsigned int slew_rate_ctl:1;
+ unsigned int ffe_post_tap_en:1;
+ unsigned int ldmos_bias_ctl:2;
+ unsigned int com_mode_ldmos_en:1;
+ unsigned int edge_sel:1;
+ unsigned int ext_current_src_hs_en:1;
+ unsigned int term_ctl:2;
+ unsigned int ext_current_src_en:1;
+ unsigned int int_current_src_en:1;
+ unsigned int int_current_ctl:4;
+ unsigned int int_current_src_hs_en:1;
+ unsigned int main_tap_current_select:3;
+ unsigned int post_tap_current_select:3;
+ unsigned int slew_ctl_slow_loading:2;
+ unsigned int slew_ctl_slow_driving:2;
+ unsigned int ffe_pre_tap_en:1;
+};
+
+struct vc6_phy_settings {
+ unsigned long long min_rate;
+ unsigned long long max_rate;
+ struct vc6_phy_lane_settings channel[3];
+ struct vc6_phy_lane_settings clock;
+};
+
+static const struct vc6_phy_settings vc6_hdmi_phy_settings[] = {
+ {
+ 0, 222000000,
+ {
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+ {
+ 222000001, 297000000,
+ {
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+ },
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+ },
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+ },
+ },
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* Internal Current Source Half Swing Enable*/
+ .int_current_src_hs_en = 1,
+ },
+ },
+ {
+ 297000001, 597000044,
+ {
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* External Current Source Half Swing Enable*/
+ .ext_current_src_hs_en = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* Internal Current Source Half Swing Enable*/
+ .int_current_src_hs_en = 1,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+};
+
+static const struct vc6_phy_settings *
+vc6_phy_get_settings(unsigned long long tmds_rate)
+{
+ unsigned int count = ARRAY_SIZE(vc6_hdmi_phy_settings);
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ const struct vc6_phy_settings *s = &vc6_hdmi_phy_settings[i];
+
+ if (tmds_rate >= s->min_rate && tmds_rate <= s->max_rate)
+ return s;
+ }
+
+ /*
+ * If the pixel clock exceeds our max setting, try the max
+ * setting anyway.
+ */
+ return &vc6_hdmi_phy_settings[count - 1];
+}
+
+static const struct vc6_phy_lane_settings *
+vc6_phy_get_channel_settings(enum vc4_hdmi_phy_channel chan,
+ unsigned long long tmds_rate)
+{
+ const struct vc6_phy_settings *settings = vc6_phy_get_settings(tmds_rate);
+
+ if (chan == PHY_LANE_CK)
+ return &settings->clock;
+
+ return &settings->channel[chan];
+}
+
+static void vc6_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
+{
+ lockdep_assert_held(&vc4_hdmi->hw_lock);
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0);
+ HDMI_WRITE(HDMI_TX_PHY_POWERUP_CTL, 0);
+}
+
+void vc6_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *conn_state)
+{
+ const struct vc6_phy_lane_settings *chan0_settings;
+ const struct vc6_phy_lane_settings *chan1_settings;
+ const struct vc6_phy_lane_settings *chan2_settings;
+ const struct vc6_phy_lane_settings *clock_settings;
+ const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
+ unsigned long long pixel_freq = conn_state->hdmi.tmds_char_rate;
+ unsigned long long vco_freq;
+ unsigned char word_sel;
+ unsigned long flags;
+ unsigned int vco_div;
+
+ vco_freq = vc6_phy_get_vco_freq(pixel_freq, &vco_div);
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
+ vc6_hdmi_reset_phy(vc4_hdmi);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_0, 0x810c6000);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_1, 0x00b8c451);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_2, 0x46402e31);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_3, 0x00b8c005);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_4, 0x42410261);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_5, 0xcc021001);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_6, 0xc8301c80);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_7, 0xb0804444);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_8, 0xf80f8000);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_REFCLK,
+ VC6_HDMI_TX_PHY_PLL_REFCLK_REFCLK_SEL_CMOS |
+ VC4_SET_FIELD(54, VC6_HDMI_TX_PHY_PLL_REFCLK_REFFRQ));
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x7f);
+
+ HDMI_WRITE(HDMI_RM_OFFSET,
+ VC4_HDMI_RM_OFFSET_ONLY |
+ VC4_SET_FIELD(phy_get_rm_offset(vco_freq),
+ VC4_HDMI_RM_OFFSET_OFFSET));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_VCOCLK_DIV,
+ VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV_EN |
+ VC4_SET_FIELD(vco_div,
+ VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_CFG,
+ VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CFG_PDIV));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_POST_KDIV,
+ VC4_SET_FIELD(2, VC6_HDMI_TX_PHY_PLL_POST_KDIV_CLK0_SEL) |
+ VC4_SET_FIELD(1, VC6_HDMI_TX_PHY_PLL_POST_KDIV_KDIV));
+
+ chan0_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_0],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_0,
+ VC4_SET_FIELD(chan0_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan0_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(chan0_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(chan0_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(chan0_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(chan0_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(chan0_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(chan0_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan0_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(chan0_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan0_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan0_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan0_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan0_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan0_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan0_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(chan0_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(chan0_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ chan1_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_1],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_1,
+ VC4_SET_FIELD(chan1_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan1_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(chan1_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(chan1_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(chan1_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(chan1_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(chan1_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(chan1_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan1_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(chan1_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan1_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan1_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan1_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan1_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan1_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan1_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(chan1_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(chan1_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ chan2_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_2],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_2,
+ VC4_SET_FIELD(chan2_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan2_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(chan2_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(chan2_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(chan2_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(chan2_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(chan2_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(chan2_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan2_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(chan2_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan2_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan2_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan2_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan2_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan2_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan2_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(chan2_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(chan2_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ clock_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_CK],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_CK,
+ VC4_SET_FIELD(clock_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(clock_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(clock_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(clock_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(clock_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(clock_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(clock_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(clock_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(clock_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(clock_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(clock_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(clock_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(clock_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(clock_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(clock_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(clock_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(clock_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(clock_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ if (pixel_freq >= 340000000)
+ word_sel = 3;
+ else
+ word_sel = 0;
+ HDMI_WRITE(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, word_sel);
+
+ HDMI_WRITE(HDMI_TX_PHY_POWERUP_CTL,
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BG_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_LDO_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BIAS_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_CK_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_2_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_1_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_0_PWRUP);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_POWERUP_CTL,
+ VC6_HDMI_TX_PHY_PLL_POWERUP_CTL_PLL_PWRUP);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_RESET_CTL,
+ HDMI_READ(HDMI_TX_PHY_PLL_RESET_CTL) &
+ ~VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_RESETB);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_RESET_CTL,
+ HDMI_READ(HDMI_TX_PHY_PLL_RESET_CTL) |
+ VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_RESETB);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+}
+
+void vc6_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
+{
+}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 68455ce513e7..59bfd69f54d9 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -111,13 +111,30 @@ enum vc4_hdmi_field {
HDMI_TX_PHY_CTL_1,
HDMI_TX_PHY_CTL_2,
HDMI_TX_PHY_CTL_3,
+ HDMI_TX_PHY_CTL_CK,
HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1,
HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2,
HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4,
HDMI_TX_PHY_PLL_CFG,
+ HDMI_TX_PHY_PLL_CFG_PDIV,
HDMI_TX_PHY_PLL_CTL_0,
HDMI_TX_PHY_PLL_CTL_1,
+ HDMI_TX_PHY_PLL_MISC_0,
+ HDMI_TX_PHY_PLL_MISC_1,
+ HDMI_TX_PHY_PLL_MISC_2,
+ HDMI_TX_PHY_PLL_MISC_3,
+ HDMI_TX_PHY_PLL_MISC_4,
+ HDMI_TX_PHY_PLL_MISC_5,
+ HDMI_TX_PHY_PLL_MISC_6,
+ HDMI_TX_PHY_PLL_MISC_7,
+ HDMI_TX_PHY_PLL_MISC_8,
+ HDMI_TX_PHY_PLL_POST_KDIV,
+ HDMI_TX_PHY_PLL_POWERUP_CTL,
+ HDMI_TX_PHY_PLL_REFCLK,
+ HDMI_TX_PHY_PLL_RESET_CTL,
+ HDMI_TX_PHY_PLL_VCOCLK_DIV,
HDMI_TX_PHY_POWERDOWN_CTL,
+ HDMI_TX_PHY_POWERUP_CTL,
HDMI_TX_PHY_RESET_CTL,
HDMI_TX_PHY_TMDS_CLK_WORD_SEL,
HDMI_VEC_INTERFACE_CFG,
@@ -411,6 +428,206 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
VC5_CSC_REG(HDMI_CSC_CHANNEL_CTL, 0x02c),
};
+static const struct vc4_hdmi_register __maybe_unused vc6_hdmi_hdmi0_fields[] = {
+ VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
+ VC4_HD_REG(HDMI_MAI_CTL, 0x0010),
+ VC4_HD_REG(HDMI_MAI_THR, 0x0014),
+ VC4_HD_REG(HDMI_MAI_FMT, 0x0018),
+ VC4_HD_REG(HDMI_MAI_DATA, 0x001c),
+ VC4_HD_REG(HDMI_MAI_SMP, 0x0020),
+ VC4_HD_REG(HDMI_VID_CTL, 0x0044),
+ VC4_HD_REG(HDMI_FRAME_COUNT, 0x0060),
+
+ VC4_HDMI_REG(HDMI_FIFO_CTL, 0x07c),
+ VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0c0),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0c4),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0cc),
+ VC4_HDMI_REG(HDMI_CRP_CFG, 0x0d0),
+ VC4_HDMI_REG(HDMI_CTS_0, 0x0d4),
+ VC4_HDMI_REG(HDMI_CTS_1, 0x0d8),
+ VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e8),
+ VC4_HDMI_REG(HDMI_HORZA, 0x0ec),
+ VC4_HDMI_REG(HDMI_HORZB, 0x0f0),
+ VC4_HDMI_REG(HDMI_VERTA0, 0x0f4),
+ VC4_HDMI_REG(HDMI_VERTB0, 0x0f8),
+ VC4_HDMI_REG(HDMI_VERTA1, 0x100),
+ VC4_HDMI_REG(HDMI_VERTB1, 0x104),
+ VC4_HDMI_REG(HDMI_MISC_CONTROL, 0x114),
+ VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x0a4),
+ VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a8),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_1, 0x148),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_2, 0x14c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_3, 0x150),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_4, 0x158),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_5, 0x15c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_6, 0x160),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_7, 0x164),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_8, 0x168),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_9, 0x16c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_10, 0x170),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x18c),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x194),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x198),
+ VC4_HDMI_REG(HDMI_HOTPLUG, 0x1c8),
+ VC4_HDMI_REG(HDMI_SCRAMBLER_CTL, 0x1e4),
+
+ VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_CFG, 0x0f0),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f4),
+
+ VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000),
+ VC5_PHY_REG(HDMI_TX_PHY_POWERUP_CTL, 0x004),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_CK, 0x014),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_REFCLK, 0x01c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POST_KDIV, 0x028),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_VCOCLK_DIV, 0x02c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x044),
+ VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x054),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_0, 0x060),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_1, 0x064),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_2, 0x068),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_3, 0x06c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_4, 0x070),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_5, 0x074),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_6, 0x078),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_7, 0x07c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_8, 0x080),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_RESET_CTL, 0x190),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POWERUP_CTL, 0x194),
+
+ VC5_RM_REG(HDMI_RM_CONTROL, 0x000),
+ VC5_RM_REG(HDMI_RM_OFFSET, 0x018),
+ VC5_RM_REG(HDMI_RM_FORMAT, 0x01c),
+
+ VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000),
+
+ VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044),
+
+ VC5_CSC_REG(HDMI_CSC_CTL, 0x000),
+ VC5_CSC_REG(HDMI_CSC_12_11, 0x004),
+ VC5_CSC_REG(HDMI_CSC_14_13, 0x008),
+ VC5_CSC_REG(HDMI_CSC_22_21, 0x00c),
+ VC5_CSC_REG(HDMI_CSC_24_23, 0x010),
+ VC5_CSC_REG(HDMI_CSC_32_31, 0x014),
+ VC5_CSC_REG(HDMI_CSC_34_33, 0x018),
+ VC5_CSC_REG(HDMI_CSC_CHANNEL_CTL, 0x02c),
+};
+
+static const struct vc4_hdmi_register __maybe_unused vc6_hdmi_hdmi1_fields[] = {
+ VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
+ VC4_HD_REG(HDMI_MAI_CTL, 0x0030),
+ VC4_HD_REG(HDMI_MAI_THR, 0x0034),
+ VC4_HD_REG(HDMI_MAI_FMT, 0x0038),
+ VC4_HD_REG(HDMI_MAI_DATA, 0x003c),
+ VC4_HD_REG(HDMI_MAI_SMP, 0x0040),
+ VC4_HD_REG(HDMI_VID_CTL, 0x0048),
+ VC4_HD_REG(HDMI_FRAME_COUNT, 0x0064),
+
+ VC4_HDMI_REG(HDMI_FIFO_CTL, 0x07c),
+ VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0c0),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0c4),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0cc),
+ VC4_HDMI_REG(HDMI_CRP_CFG, 0x0d0),
+ VC4_HDMI_REG(HDMI_CTS_0, 0x0d4),
+ VC4_HDMI_REG(HDMI_CTS_1, 0x0d8),
+ VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e8),
+ VC4_HDMI_REG(HDMI_HORZA, 0x0ec),
+ VC4_HDMI_REG(HDMI_HORZB, 0x0f0),
+ VC4_HDMI_REG(HDMI_VERTA0, 0x0f4),
+ VC4_HDMI_REG(HDMI_VERTB0, 0x0f8),
+ VC4_HDMI_REG(HDMI_VERTA1, 0x100),
+ VC4_HDMI_REG(HDMI_VERTB1, 0x104),
+ VC4_HDMI_REG(HDMI_MISC_CONTROL, 0x114),
+ VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x0a4),
+ VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a8),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_1, 0x148),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_2, 0x14c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_3, 0x150),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_4, 0x158),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_5, 0x15c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_6, 0x160),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_7, 0x164),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_8, 0x168),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_9, 0x16c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_10, 0x170),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x18c),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x194),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x198),
+ VC4_HDMI_REG(HDMI_HOTPLUG, 0x1c8),
+ VC4_HDMI_REG(HDMI_SCRAMBLER_CTL, 0x1e4),
+
+ VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_CFG, 0x0f0),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f4),
+
+ VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000),
+ VC5_PHY_REG(HDMI_TX_PHY_POWERUP_CTL, 0x004),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_CK, 0x014),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_REFCLK, 0x01c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POST_KDIV, 0x028),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_VCOCLK_DIV, 0x02c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x044),
+ VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x054),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_0, 0x060),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_1, 0x064),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_2, 0x068),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_3, 0x06c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_4, 0x070),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_5, 0x074),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_6, 0x078),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_7, 0x07c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_8, 0x080),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_RESET_CTL, 0x190),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POWERUP_CTL, 0x194),
+
+ VC5_RM_REG(HDMI_RM_CONTROL, 0x000),
+ VC5_RM_REG(HDMI_RM_OFFSET, 0x018),
+ VC5_RM_REG(HDMI_RM_FORMAT, 0x01c),
+
+ VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000),
+
+ VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044),
+
+ VC5_CSC_REG(HDMI_CSC_CTL, 0x000),
+ VC5_CSC_REG(HDMI_CSC_12_11, 0x004),
+ VC5_CSC_REG(HDMI_CSC_14_13, 0x008),
+ VC5_CSC_REG(HDMI_CSC_22_21, 0x00c),
+ VC5_CSC_REG(HDMI_CSC_24_23, 0x010),
+ VC5_CSC_REG(HDMI_CSC_32_31, 0x014),
+ VC5_CSC_REG(HDMI_CSC_34_33, 0x018),
+ VC5_CSC_REG(HDMI_CSC_CHANNEL_CTL, 0x02c),
+};
+
static inline
void __iomem *__vc4_hdmi_get_field_base(struct vc4_hdmi *hdmi,
enum vc4_hdmi_regs reg)
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 70623e6b91e9..4811d794001f 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -67,6 +67,140 @@ static const struct debugfs_reg32 vc4_hvs_regs[] = {
VC4_REG32(SCALER_OLEDCOEF2),
};
+static const struct debugfs_reg32 vc6_hvs_regs[] = {
+ VC4_REG32(SCALER6_VERSION),
+ VC4_REG32(SCALER6_CXM_SIZE),
+ VC4_REG32(SCALER6_LBM_SIZE),
+ VC4_REG32(SCALER6_UBM_SIZE),
+ VC4_REG32(SCALER6_COBA_SIZE),
+ VC4_REG32(SCALER6_COB_SIZE),
+ VC4_REG32(SCALER6_CONTROL),
+ VC4_REG32(SCALER6_FETCHER_STATUS),
+ VC4_REG32(SCALER6_FETCH_STATUS),
+ VC4_REG32(SCALER6_HANDLE_ERROR),
+ VC4_REG32(SCALER6_DISP0_CTRL0),
+ VC4_REG32(SCALER6_DISP0_CTRL1),
+ VC4_REG32(SCALER6_DISP0_BGND),
+ VC4_REG32(SCALER6_DISP0_LPTRS),
+ VC4_REG32(SCALER6_DISP0_COB),
+ VC4_REG32(SCALER6_DISP0_STATUS),
+ VC4_REG32(SCALER6_DISP0_DL),
+ VC4_REG32(SCALER6_DISP0_RUN),
+ VC4_REG32(SCALER6_DISP1_CTRL0),
+ VC4_REG32(SCALER6_DISP1_CTRL1),
+ VC4_REG32(SCALER6_DISP1_BGND),
+ VC4_REG32(SCALER6_DISP1_LPTRS),
+ VC4_REG32(SCALER6_DISP1_COB),
+ VC4_REG32(SCALER6_DISP1_STATUS),
+ VC4_REG32(SCALER6_DISP1_DL),
+ VC4_REG32(SCALER6_DISP1_RUN),
+ VC4_REG32(SCALER6_DISP2_CTRL0),
+ VC4_REG32(SCALER6_DISP2_CTRL1),
+ VC4_REG32(SCALER6_DISP2_BGND),
+ VC4_REG32(SCALER6_DISP2_LPTRS),
+ VC4_REG32(SCALER6_DISP2_COB),
+ VC4_REG32(SCALER6_DISP2_STATUS),
+ VC4_REG32(SCALER6_DISP2_DL),
+ VC4_REG32(SCALER6_DISP2_RUN),
+ VC4_REG32(SCALER6_EOLN),
+ VC4_REG32(SCALER6_DL_STATUS),
+ VC4_REG32(SCALER6_BFG_MISC),
+ VC4_REG32(SCALER6_QOS0),
+ VC4_REG32(SCALER6_PROF0),
+ VC4_REG32(SCALER6_QOS1),
+ VC4_REG32(SCALER6_PROF1),
+ VC4_REG32(SCALER6_QOS2),
+ VC4_REG32(SCALER6_PROF2),
+ VC4_REG32(SCALER6_PRI_MAP0),
+ VC4_REG32(SCALER6_PRI_MAP1),
+ VC4_REG32(SCALER6_HISTCTRL),
+ VC4_REG32(SCALER6_HISTBIN0),
+ VC4_REG32(SCALER6_HISTBIN1),
+ VC4_REG32(SCALER6_HISTBIN2),
+ VC4_REG32(SCALER6_HISTBIN3),
+ VC4_REG32(SCALER6_HISTBIN4),
+ VC4_REG32(SCALER6_HISTBIN5),
+ VC4_REG32(SCALER6_HISTBIN6),
+ VC4_REG32(SCALER6_HISTBIN7),
+ VC4_REG32(SCALER6_HDR_CFG_REMAP),
+ VC4_REG32(SCALER6_COL_SPACE),
+ VC4_REG32(SCALER6_HVS_ID),
+ VC4_REG32(SCALER6_CFC1),
+ VC4_REG32(SCALER6_DISP_UPM_ISO0),
+ VC4_REG32(SCALER6_DISP_UPM_ISO1),
+ VC4_REG32(SCALER6_DISP_UPM_ISO2),
+ VC4_REG32(SCALER6_DISP_LBM_ISO0),
+ VC4_REG32(SCALER6_DISP_LBM_ISO1),
+ VC4_REG32(SCALER6_DISP_LBM_ISO2),
+ VC4_REG32(SCALER6_DISP_COB_ISO0),
+ VC4_REG32(SCALER6_DISP_COB_ISO1),
+ VC4_REG32(SCALER6_DISP_COB_ISO2),
+ VC4_REG32(SCALER6_BAD_COB),
+ VC4_REG32(SCALER6_BAD_LBM),
+ VC4_REG32(SCALER6_BAD_UPM),
+ VC4_REG32(SCALER6_BAD_AXI),
+};
+
+static const struct debugfs_reg32 vc6_d_hvs_regs[] = {
+ VC4_REG32(SCALER6D_VERSION),
+ VC4_REG32(SCALER6D_CXM_SIZE),
+ VC4_REG32(SCALER6D_LBM_SIZE),
+ VC4_REG32(SCALER6D_UBM_SIZE),
+ VC4_REG32(SCALER6D_COBA_SIZE),
+ VC4_REG32(SCALER6D_COB_SIZE),
+ VC4_REG32(SCALER6D_CONTROL),
+ VC4_REG32(SCALER6D_FETCHER_STATUS),
+ VC4_REG32(SCALER6D_FETCH_STATUS),
+ VC4_REG32(SCALER6D_HANDLE_ERROR),
+ VC4_REG32(SCALER6D_DISP0_CTRL0),
+ VC4_REG32(SCALER6D_DISP0_CTRL1),
+ VC4_REG32(SCALER6D_DISP0_BGND0),
+ VC4_REG32(SCALER6D_DISP0_BGND1),
+ VC4_REG32(SCALER6D_DISP0_LPTRS),
+ VC4_REG32(SCALER6D_DISP0_COB),
+ VC4_REG32(SCALER6D_DISP0_STATUS),
+ VC4_REG32(SCALER6D_DISP0_DL),
+ VC4_REG32(SCALER6D_DISP0_RUN),
+ VC4_REG32(SCALER6D_DISP1_CTRL0),
+ VC4_REG32(SCALER6D_DISP1_CTRL1),
+ VC4_REG32(SCALER6D_DISP1_BGND0),
+ VC4_REG32(SCALER6D_DISP1_BGND1),
+ VC4_REG32(SCALER6D_DISP1_LPTRS),
+ VC4_REG32(SCALER6D_DISP1_COB),
+ VC4_REG32(SCALER6D_DISP1_STATUS),
+ VC4_REG32(SCALER6D_DISP1_DL),
+ VC4_REG32(SCALER6D_DISP1_RUN),
+ VC4_REG32(SCALER6D_DISP2_CTRL0),
+ VC4_REG32(SCALER6D_DISP2_CTRL1),
+ VC4_REG32(SCALER6D_DISP2_BGND0),
+ VC4_REG32(SCALER6D_DISP2_BGND1),
+ VC4_REG32(SCALER6D_DISP2_LPTRS),
+ VC4_REG32(SCALER6D_DISP2_COB),
+ VC4_REG32(SCALER6D_DISP2_STATUS),
+ VC4_REG32(SCALER6D_DISP2_DL),
+ VC4_REG32(SCALER6D_DISP2_RUN),
+ VC4_REG32(SCALER6D_EOLN),
+ VC4_REG32(SCALER6D_DL_STATUS),
+ VC4_REG32(SCALER6D_QOS0),
+ VC4_REG32(SCALER6D_PROF0),
+ VC4_REG32(SCALER6D_QOS1),
+ VC4_REG32(SCALER6D_PROF1),
+ VC4_REG32(SCALER6D_QOS2),
+ VC4_REG32(SCALER6D_PROF2),
+ VC4_REG32(SCALER6D_PRI_MAP0),
+ VC4_REG32(SCALER6D_PRI_MAP1),
+ VC4_REG32(SCALER6D_HISTCTRL),
+ VC4_REG32(SCALER6D_HISTBIN0),
+ VC4_REG32(SCALER6D_HISTBIN1),
+ VC4_REG32(SCALER6D_HISTBIN2),
+ VC4_REG32(SCALER6D_HISTBIN3),
+ VC4_REG32(SCALER6D_HISTBIN4),
+ VC4_REG32(SCALER6D_HISTBIN5),
+ VC4_REG32(SCALER6D_HISTBIN6),
+ VC4_REG32(SCALER6D_HISTBIN7),
+ VC4_REG32(SCALER6D_HVS_ID),
+};
+
void vc4_hvs_dump_state(struct vc4_hvs *hvs)
{
struct drm_device *drm = &hvs->vc4->base;
@@ -145,6 +279,76 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
return 0;
}
+static int vc6_hvs_debugfs_dlist(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_printer p = drm_seq_file_printer(m);
+ unsigned int dlist_mem_size = hvs->dlist_mem_size;
+ unsigned int next_entry_start;
+ unsigned int i;
+
+ for (i = 0; i < SCALER_CHANNELS_COUNT; i++) {
+ unsigned int active_dlist, dispstat;
+ unsigned int j;
+
+ dispstat = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_STATUS(i)),
+ SCALER6_DISPX_STATUS_MODE);
+ if (dispstat == SCALER6_DISPX_STATUS_MODE_DISABLED ||
+ dispstat == SCALER6_DISPX_STATUS_MODE_EOF) {
+ drm_printf(&p, "HVS chan %u disabled\n", i);
+ continue;
+ }
+
+ drm_printf(&p, "HVS chan %u:\n", i);
+
+ active_dlist = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_DL(i)),
+ SCALER6_DISPX_DL_LACT);
+ next_entry_start = 0;
+
+ for (j = active_dlist; j < dlist_mem_size; j++) {
+ u32 dlist_word;
+
+ dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j);
+ drm_printf(&p, "dlist: %02d: 0x%08x\n", j,
+ dlist_word);
+ if (!next_entry_start ||
+ next_entry_start == j) {
+ if (dlist_word & SCALER_CTL0_END)
+ break;
+ next_entry_start = j +
+ VC4_GET_FIELD(dlist_word,
+ SCALER_CTL0_SIZE);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int vc6_hvs_debugfs_upm_allocs(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct vc4_upm_refcounts *refcount;
+ unsigned int i;
+
+ drm_printf(&p, "UPM Handles:\n");
+ for (i = 1; i <= VC4_NUM_UPM_HANDLES; i++) {
+ refcount = &hvs->upm_refcounts[i];
+ drm_printf(&p, "handle %u: refcount %u, size %zu [%08llx + %08llx]\n",
+ i, refcount_read(&refcount->refcount), refcount->size,
+ refcount->upm.start, refcount->upm.size);
+ }
+
+ return 0;
+}
+
/* The filter kernel is composed of dwords each containing 3 9-bit
* signed integers packed next to each other.
*/
@@ -215,12 +419,15 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
struct vc4_crtc *vc4_crtc)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
struct drm_crtc *crtc = &vc4_crtc->base;
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
int idx;
u32 i;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
@@ -265,25 +472,56 @@ static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
u8 field = 0;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
if (!drm_dev_enter(drm, &idx))
return 0;
- switch (fifo) {
- case 0:
- field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
- SCALER_DISPSTAT1_FRCNT0);
+ switch (vc4->gen) {
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ field = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_STATUS(fifo)),
+ SCALER6_DISPX_STATUS_FRCNT);
break;
- case 1:
- field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
- SCALER_DISPSTAT1_FRCNT1);
+ case VC4_GEN_5:
+ switch (fifo) {
+ case 0:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER5_DISPSTAT1_FRCNT0);
+ break;
+ case 1:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER5_DISPSTAT1_FRCNT1);
+ break;
+ case 2:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
+ SCALER5_DISPSTAT2_FRCNT2);
+ break;
+ }
+ break;
+ case VC4_GEN_4:
+ switch (fifo) {
+ case 0:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER_DISPSTAT1_FRCNT0);
+ break;
+ case 1:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER_DISPSTAT1_FRCNT1);
+ break;
+ case 2:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
+ SCALER_DISPSTAT2_FRCNT2);
+ break;
+ }
break;
- case 2:
- field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
- SCALER_DISPSTAT2_FRCNT2);
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
break;
}
@@ -297,6 +535,8 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
u32 reg;
int ret;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
switch (vc4->gen) {
case VC4_GEN_4:
return output;
@@ -352,6 +592,24 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
return -EPIPE;
}
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ switch (output) {
+ case 0:
+ return 0;
+
+ case 2:
+ return 2;
+
+ case 1:
+ case 3:
+ case 4:
+ return 1;
+
+ default:
+ return -EPIPE;
+ }
+
default:
return -EPIPE;
}
@@ -370,6 +628,8 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
u32 dispctrl;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return -ENODEV;
@@ -420,11 +680,50 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
return 0;
}
-void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+static int vc6_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
+ struct drm_display_mode *mode, bool oneshot)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
+ struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
+ unsigned int chan = vc4_crtc_state->assigned_channel;
+ bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ u32 disp_ctrl1;
+ int idx;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan), SCALER6_DISPX_CTRL0_RESET);
+
+ disp_ctrl1 = HVS_READ(SCALER6_DISPX_CTRL1(chan));
+ disp_ctrl1 &= ~SCALER6_DISPX_CTRL1_INTLACE;
+ HVS_WRITE(SCALER6_DISPX_CTRL1(chan),
+ disp_ctrl1 | (interlace ? SCALER6_DISPX_CTRL1_INTLACE : 0));
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan),
+ SCALER6_DISPX_CTRL0_ENB |
+ VC4_SET_FIELD(mode->hdisplay - 1,
+ SCALER6_DISPX_CTRL0_FWIDTH) |
+ (oneshot ? SCALER6_DISPX_CTRL0_ONESHOT : 0) |
+ VC4_SET_FIELD(mode->vdisplay - 1,
+ SCALER6_DISPX_CTRL0_LINES));
+
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static void __vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
@@ -449,6 +748,44 @@ out:
drm_dev_exit(idx);
}
+static void __vc6_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
+ int idx;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ if (!(HVS_READ(SCALER6_DISPX_CTRL0(chan)) & SCALER6_DISPX_CTRL0_ENB))
+ goto out;
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan),
+ HVS_READ(SCALER6_DISPX_CTRL0(chan)) | SCALER6_DISPX_CTRL0_RESET);
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan),
+ HVS_READ(SCALER6_DISPX_CTRL0(chan)) & ~SCALER6_DISPX_CTRL0_ENB);
+
+ WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_STATUS(chan)),
+ SCALER6_DISPX_STATUS_MODE) !=
+ SCALER6_DISPX_STATUS_MODE_DISABLED);
+
+out:
+ drm_dev_exit(idx);
+}
+
+void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ __vc6_hvs_stop_channel(hvs, chan);
+ else
+ __vc4_hvs_stop_channel(hvs, chan);
+}
+
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
@@ -505,8 +842,13 @@ static void vc4_hvs_install_dlist(struct drm_crtc *crtc)
if (!drm_dev_enter(dev, &idx))
return;
- HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
- vc4_state->mm.start);
+ if (vc4->gen >= VC4_GEN_6_C)
+ HVS_WRITE(SCALER6_DISPX_LPTRS(vc4_state->assigned_channel),
+ VC4_SET_FIELD(vc4_state->mm.start,
+ SCALER6_DISPX_LPTRS_HEADE));
+ else
+ HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
+ vc4_state->mm.start);
drm_dev_exit(idx);
}
@@ -561,7 +903,11 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
vc4_hvs_install_dlist(crtc);
vc4_hvs_update_dlist(crtc);
- vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ vc6_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
+ else
+ vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
}
void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
@@ -590,13 +936,15 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
struct drm_plane *plane;
struct vc4_plane_state *vc4_plane_state;
bool debug_dump_regs = false;
- bool enable_bg_fill = false;
+ bool enable_bg_fill = true;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
unsigned int zpos = 0;
bool found = false;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
if (!drm_dev_enter(dev, &idx)) {
vc4_crtc_send_vblank(crtc);
return;
@@ -645,13 +993,26 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- if (enable_bg_fill)
+ if (vc4->gen >= VC4_GEN_6_C) {
/* This sets a black background color fill, as is the case
* with other DRM drivers.
*/
+ if (enable_bg_fill)
+ HVS_WRITE(SCALER6_DISPX_CTRL1(channel),
+ HVS_READ(SCALER6_DISPX_CTRL1(channel)) |
+ SCALER6_DISPX_CTRL1_BGENB);
+ else
+ HVS_WRITE(SCALER6_DISPX_CTRL1(channel),
+ HVS_READ(SCALER6_DISPX_CTRL1(channel)) &
+ ~SCALER6_DISPX_CTRL1_BGENB);
+ } else {
+ /* we can actually run with a lower core clock when background
+ * fill is enabled on VC4_GEN_5 so leave it enabled always.
+ */
HVS_WRITE(SCALER_DISPBKGNDX(channel),
HVS_READ(SCALER_DISPBKGNDX(channel)) |
SCALER_DISPBKGND_FILL);
+ }
/* Only update DISPLIST if the CRTC was already running and is not
* being disabled.
@@ -668,6 +1029,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
if (crtc->state->color_mgmt_changed) {
u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(channel));
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (crtc->state->gamma_lut) {
vc4_hvs_update_gamma_lut(hvs, vc4_crtc);
dispbkgndx |= SCALER_DISPBKGND_GAMMA;
@@ -697,6 +1060,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
u32 dispctrl;
int idx;
+ WARN_ON(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
@@ -717,6 +1082,8 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
u32 dispctrl;
int idx;
+ WARN_ON(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
@@ -751,6 +1118,8 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
u32 status;
u32 dspeislur;
+ WARN_ON(vc4->gen > VC4_GEN_5);
+
/*
* NOTE: We don't need to protect the register access using
* drm_dev_enter() there because the interrupt handler lifetime
@@ -802,7 +1171,12 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor)
minor->debugfs_root,
&vc4->load_tracker_enabled);
- drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL);
+ if (vc4->gen >= VC4_GEN_6_C) {
+ drm_debugfs_add_file(drm, "hvs_dlists", vc6_hvs_debugfs_dlist, NULL);
+ drm_debugfs_add_file(drm, "hvs_upm", vc6_hvs_debugfs_upm_allocs, NULL);
+ } else {
+ drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL);
+ }
drm_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun, NULL);
@@ -817,6 +1191,10 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4,
{
struct drm_device *drm = &vc4->base;
struct vc4_hvs *hvs;
+ unsigned int dlist_start;
+ size_t dlist_size;
+ size_t lbm_size;
+ unsigned int i;
hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
@@ -828,27 +1206,94 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4,
spin_lock_init(&hvs->mm_lock);
- /* Set up the HVS display list memory manager. We never
- * overwrite the setup from the bootloader (just 128b out of
- * our 16K), since we don't want to scramble the screen when
- * transitioning from the firmware's boot setup to runtime.
- */
- hvs->dlist_mem_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END;
- drm_mm_init(&hvs->dlist_mm,
- HVS_BOOTLOADER_DLIST_END,
- hvs->dlist_mem_size);
+ switch (vc4->gen) {
+ case VC4_GEN_4:
+ case VC4_GEN_5:
+ /* Set up the HVS display list memory manager. We never
+ * overwrite the setup from the bootloader (just 128b
+ * out of our 16K), since we don't want to scramble the
+ * screen when transitioning from the firmware's boot
+ * setup to runtime.
+ */
+ dlist_start = HVS_BOOTLOADER_DLIST_END;
+ dlist_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END;
+ break;
+
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ dlist_start = HVS_BOOTLOADER_DLIST_END;
+
+ /*
+ * If we are running a test, it means that we can't
+ * access a register. Use a plausible size then.
+ */
+ if (!kunit_get_current_test())
+ dlist_size = HVS_READ(SCALER6_CXM_SIZE);
+ else
+ dlist_size = 4096;
+
+ for (i = 0; i < VC4_NUM_UPM_HANDLES; i++) {
+ refcount_set(&hvs->upm_refcounts[i].refcount, 0);
+ hvs->upm_refcounts[i].hvs = hvs;
+ }
+
+ break;
+
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
+ return ERR_PTR(-ENODEV);
+ }
+
+ drm_mm_init(&hvs->dlist_mm, dlist_start, dlist_size);
+
+ hvs->dlist_mem_size = dlist_size;
/* Set up the HVS LBM memory manager. We could have some more
* complicated data structure that allowed reuse of LBM areas
* between planes when they don't overlap on the screen, but
* for now we just allocate globally.
*/
- if (vc4->gen == VC4_GEN_4)
+
+ switch (vc4->gen) {
+ case VC4_GEN_4:
/* 48k words of 2x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
- else
+ lbm_size = 48 * SZ_1K;
+ break;
+
+ case VC4_GEN_5:
/* 60k words of 4x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
+ lbm_size = 60 * SZ_1K;
+ break;
+
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ /*
+ * If we are running a test, it means that we can't
+ * access a register. Use a plausible size then.
+ */
+ lbm_size = 1024;
+ break;
+
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
+ return ERR_PTR(-ENODEV);
+ }
+
+ drm_mm_init(&hvs->lbm_mm, 0, lbm_size);
+
+ if (vc4->gen >= VC4_GEN_6_C) {
+ ida_init(&hvs->upm_handles);
+
+ /*
+ * NOTE: On BCM2712, the size can also be read through
+ * the SCALER_UBM_SIZE register. We would need to do a
+ * register access though, which we can't do with kunit
+ * that also uses this function to create its mock
+ * device.
+ */
+ drm_mm_init(&hvs->upm_mm, 0, 1024 * HVS_UBM_WORD_SIZE);
+ }
+
vc4->hvs = hvs;
@@ -945,10 +1390,150 @@ static int vc4_hvs_hw_init(struct vc4_hvs *hvs)
return 0;
}
+#define CFC1_N_NL_CSC_CTRL(x) (0xa000 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C00(x) (0xa008 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C01(x) (0xa00c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C02(x) (0xa010 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C03(x) (0xa014 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C04(x) (0xa018 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C10(x) (0xa01c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C11(x) (0xa020 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C12(x) (0xa024 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C13(x) (0xa028 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C14(x) (0xa02c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C20(x) (0xa030 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C21(x) (0xa034 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C22(x) (0xa038 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C23(x) (0xa03c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C24(x) (0xa040 + ((x) * 0x3000))
+
+#define SCALER_PI_CMP_CSC_RED0(x) (0x200 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_RED1(x) (0x204 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_RED_CLAMP(x) (0x208 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_CFG(x) (0x20c + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_GREEN0(x) (0x210 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_GREEN1(x) (0x214 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_GREEN_CLAMP(x) (0x218 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_BLUE0(x) (0x220 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_BLUE1(x) (0x224 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_BLUE_CLAMP(x) (0x228 + ((x) * 0x40))
+
+/* 4 S2.22 multiplication factors, and 1 S9.15 addititive element for each of 3
+ * output components
+ */
+struct vc6_csc_coeff_entry {
+ u32 csc[3][5];
+};
+
+static const struct vc6_csc_coeff_entry csc_coeffs[2][3] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ .csc = {
+ { 0x004A8542, 0x0, 0x0066254A, 0x0, 0xFF908A0D },
+ { 0x004A8542, 0xFFE6ED5D, 0xFFCBF856, 0x0, 0x0043C9A3 },
+ { 0x004A8542, 0x00811A54, 0x0, 0x0, 0xFF759502 }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ .csc = {
+ { 0x004A8542, 0x0, 0x0072BC44, 0x0, 0xFF83F312 },
+ { 0x004A8542, 0xFFF25A22, 0xFFDDE4D0, 0x0, 0x00267064 },
+ { 0x004A8542, 0x00873197, 0x0, 0x0, 0xFF6F7DC0 }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ .csc = {
+ { 0x004A8542, 0x0, 0x006B4A17, 0x0, 0xFF8B653F },
+ { 0x004A8542, 0xFFF402D9, 0xFFDDE4D0, 0x0, 0x0024C7AE },
+ { 0x004A8542, 0x008912CC, 0x0, 0x0, 0xFF6D9C8B }
+ }
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ .csc = {
+ { 0x00400000, 0x0, 0x0059BA5E, 0x0, 0xFFA645A1 },
+ { 0x00400000, 0xFFE9F9AC, 0xFFD24B97, 0x0, 0x0043BABB },
+ { 0x00400000, 0x00716872, 0x0, 0x0, 0xFF8E978D }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ .csc = {
+ { 0x00400000, 0x0, 0x0064C985, 0x0, 0xFF9B367A },
+ { 0x00400000, 0xFFF402E1, 0xFFE20A40, 0x0, 0x0029F2DE },
+ { 0x00400000, 0x0076C226, 0x0, 0x0, 0xFF893DD9 }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ .csc = {
+ { 0x00400000, 0x0, 0x005E3F14, 0x0, 0xFFA1C0EB },
+ { 0x00400000, 0xFFF577F6, 0xFFDB580F, 0x0, 0x002F2FFA },
+ { 0x00400000, 0x007868DB, 0x0, 0x0, 0xFF879724 }
+ }
+ }
+ }
+};
+
+static int vc6_hvs_hw_init(struct vc4_hvs *hvs)
+{
+ const struct vc6_csc_coeff_entry *coeffs;
+ unsigned int i;
+
+ HVS_WRITE(SCALER6_CONTROL,
+ SCALER6_CONTROL_HVS_EN |
+ VC4_SET_FIELD(8, SCALER6_CONTROL_PF_LINES) |
+ VC4_SET_FIELD(15, SCALER6_CONTROL_MAX_REQS));
+
+ /* Set HVS arbiter priority to max */
+ HVS_WRITE(SCALER6(PRI_MAP0), 0xffffffff);
+ HVS_WRITE(SCALER6(PRI_MAP1), 0xffffffff);
+
+ if (hvs->vc4->gen == VC4_GEN_6_C) {
+ for (i = 0; i < 6; i++) {
+ coeffs = &csc_coeffs[i / 3][i % 3];
+
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C00(i), coeffs->csc[0][0]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C01(i), coeffs->csc[0][1]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C02(i), coeffs->csc[0][2]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C03(i), coeffs->csc[0][3]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C04(i), coeffs->csc[0][4]);
+
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C10(i), coeffs->csc[1][0]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C11(i), coeffs->csc[1][1]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C12(i), coeffs->csc[1][2]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C13(i), coeffs->csc[1][3]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C14(i), coeffs->csc[1][4]);
+
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C20(i), coeffs->csc[2][0]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C21(i), coeffs->csc[2][1]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C22(i), coeffs->csc[2][2]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C23(i), coeffs->csc[2][3]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C24(i), coeffs->csc[2][4]);
+
+ HVS_WRITE(CFC1_N_NL_CSC_CTRL(i), BIT(15));
+ }
+ } else {
+ for (i = 0; i < 8; i++) {
+ HVS_WRITE(SCALER_PI_CMP_CSC_RED0(i), 0x1f002566);
+ HVS_WRITE(SCALER_PI_CMP_CSC_RED1(i), 0x3994);
+ HVS_WRITE(SCALER_PI_CMP_CSC_RED_CLAMP(i), 0xfff00000);
+ HVS_WRITE(SCALER_PI_CMP_CSC_CFG(i), 0x1);
+ HVS_WRITE(SCALER_PI_CMP_CSC_GREEN0(i), 0x18002566);
+ HVS_WRITE(SCALER_PI_CMP_CSC_GREEN1(i), 0xf927eee2);
+ HVS_WRITE(SCALER_PI_CMP_CSC_GREEN_CLAMP(i), 0xfff00000);
+ HVS_WRITE(SCALER_PI_CMP_CSC_BLUE0(i), 0x18002566);
+ HVS_WRITE(SCALER_PI_CMP_CSC_BLUE1(i), 0x43d80000);
+ HVS_WRITE(SCALER_PI_CMP_CSC_BLUE_CLAMP(i), 0xfff00000);
+ }
+ }
+
+ return 0;
+}
+
static int vc4_hvs_cob_init(struct vc4_hvs *hvs)
{
struct vc4_dev *vc4 = hvs->vc4;
- u32 reg, top;
+ u32 reg, top, base;
/*
* Recompute Composite Output Buffer (COB) allocations for the
@@ -1009,6 +1594,32 @@ static int vc4_hvs_cob_init(struct vc4_hvs *hvs)
HVS_WRITE(SCALER_DISPBASE0, reg);
break;
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ #define VC6_COB_LINE_WIDTH 3840
+ #define VC6_COB_NUM_LINES 4
+ base = 0;
+ top = 3840;
+
+ HVS_WRITE(SCALER6_DISPX_COB(2),
+ VC4_SET_FIELD(top, SCALER6_DISPX_COB_TOP) |
+ VC4_SET_FIELD(base, SCALER6_DISPX_COB_BASE));
+
+ base = top + 16;
+ top += VC6_COB_LINE_WIDTH * VC6_COB_NUM_LINES;
+
+ HVS_WRITE(SCALER6_DISPX_COB(1),
+ VC4_SET_FIELD(top, SCALER6_DISPX_COB_TOP) |
+ VC4_SET_FIELD(base, SCALER6_DISPX_COB_BASE));
+
+ base = top + 16;
+ top += VC6_COB_LINE_WIDTH * VC6_COB_NUM_LINES;
+
+ HVS_WRITE(SCALER6_DISPX_COB(0),
+ VC4_SET_FIELD(top, SCALER6_DISPX_COB_TOP) |
+ VC4_SET_FIELD(base, SCALER6_DISPX_COB_BASE));
+ break;
+
default:
return -EINVAL;
}
@@ -1034,10 +1645,23 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(hvs);
hvs->regset.base = hvs->regs;
- hvs->regset.regs = vc4_hvs_regs;
- hvs->regset.nregs = ARRAY_SIZE(vc4_hvs_regs);
- if (vc4->gen == VC4_GEN_5) {
+ if (vc4->gen == VC4_GEN_6_C) {
+ hvs->regset.regs = vc6_hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(vc6_hvs_regs);
+
+ if (VC4_GET_FIELD(HVS_READ(SCALER6_VERSION), SCALER6_VERSION) ==
+ SCALER6_VERSION_D0) {
+ vc4->gen = VC4_GEN_6_D;
+ hvs->regset.regs = vc6_d_hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(vc6_d_hvs_regs);
+ }
+ } else {
+ hvs->regset.regs = vc4_hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(vc4_hvs_regs);
+ }
+
+ if (vc4->gen >= VC4_GEN_5) {
struct rpi_firmware *firmware;
struct device_node *node;
unsigned int max_rate;
@@ -1051,12 +1675,20 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (!firmware)
return -EPROBE_DEFER;
- hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
+ hvs->core_clk = devm_clk_get(&pdev->dev,
+ (vc4->gen >= VC4_GEN_6_C) ? "core" : NULL);
if (IS_ERR(hvs->core_clk)) {
dev_err(&pdev->dev, "Couldn't get core clock\n");
return PTR_ERR(hvs->core_clk);
}
+ hvs->disp_clk = devm_clk_get(&pdev->dev,
+ (vc4->gen >= VC4_GEN_6_C) ? "disp" : NULL);
+ if (IS_ERR(hvs->disp_clk)) {
+ dev_err(&pdev->dev, "Couldn't get disp clock\n");
+ return PTR_ERR(hvs->disp_clk);
+ }
+
max_rate = rpi_firmware_clk_get_max_rate(firmware,
RPI_FIRMWARE_CORE_CLK_ID);
rpi_firmware_put(firmware);
@@ -1073,14 +1705,23 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
dev_err(&pdev->dev, "Couldn't enable the core clock\n");
return ret;
}
+
+ ret = clk_prepare_enable(hvs->disp_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the disp clock\n");
+ return ret;
+ }
}
- if (vc4->gen == VC4_GEN_4)
- hvs->dlist = hvs->regs + SCALER_DLIST_START;
- else
+ if (vc4->gen >= VC4_GEN_5)
hvs->dlist = hvs->regs + SCALER5_DLIST_START;
+ else
+ hvs->dlist = hvs->regs + SCALER_DLIST_START;
- ret = vc4_hvs_hw_init(hvs);
+ if (vc4->gen >= VC4_GEN_6_C)
+ ret = vc6_hvs_hw_init(hvs);
+ else
+ ret = vc4_hvs_hw_init(hvs);
if (ret)
return ret;
@@ -1097,10 +1738,12 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
- vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
- if (ret)
- return ret;
+ if (vc4->gen < VC4_GEN_6_C) {
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1125,6 +1768,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
drm_mm_remove_node(node);
drm_mm_takedown(&vc4->hvs->lbm_mm);
+ clk_disable_unprepare(hvs->disp_clk);
clk_disable_unprepare(hvs->core_clk);
vc4->hvs = NULL;
@@ -1147,6 +1791,7 @@ static void vc4_hvs_dev_remove(struct platform_device *pdev)
static const struct of_device_id vc4_hvs_dt_match[] = {
{ .compatible = "brcm,bcm2711-hvs" },
+ { .compatible = "brcm,bcm2712-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
{}
};
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 58bbb9efc2df..f5b167417428 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -138,6 +138,8 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
struct drm_color_ctm *ctm = ctm_state->ctm;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (ctm_state->fifo) {
HVS_WRITE(SCALER_OLEDCOEF2,
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
@@ -213,6 +215,8 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct drm_crtc *crtc;
unsigned int i;
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_4);
+
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
@@ -256,6 +260,8 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
unsigned int i;
u32 reg;
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_5);
+
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
@@ -320,17 +326,62 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
}
}
+static void vc6_hvs_pv_muxing_commit(struct vc4_dev *vc4,
+ struct drm_atomic_state *state)
+{
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_6_C && vc4->gen != VC4_GEN_6_D);
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
+ struct vc4_encoder *vc4_encoder;
+ struct drm_encoder *encoder;
+ unsigned char mux;
+ u32 reg;
+
+ if (!vc4_state->update_muxing)
+ continue;
+
+ if (vc4_state->assigned_channel != 1)
+ continue;
+
+ encoder = vc4_get_crtc_encoder(crtc, crtc_state);
+ vc4_encoder = to_vc4_encoder(encoder);
+ switch (vc4_encoder->type) {
+ case VC4_ENCODER_TYPE_HDMI1:
+ mux = 0;
+ break;
+
+ case VC4_ENCODER_TYPE_TXP1:
+ mux = 2;
+ break;
+
+ default:
+ drm_err(&vc4->base, "Unhandled encoder type for PV muxing %d",
+ vc4_encoder->type);
+ mux = 0;
+ break;
+ }
+
+ reg = HVS_READ(SCALER6_CONTROL);
+ HVS_WRITE(SCALER6_CONTROL,
+ (reg & ~SCALER6_CONTROL_DSP1_TARGET_MASK) |
+ VC4_SET_FIELD(mux, SCALER6_CONTROL_DSP1_TARGET));
+ }
+}
+
static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
- struct drm_crtc_state *new_crtc_state;
struct vc4_hvs_state *new_hvs_state;
- struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
unsigned int channel;
- int i;
old_hvs_state = vc4_hvs_get_old_global_state(state);
if (WARN_ON(IS_ERR(old_hvs_state)))
@@ -340,14 +391,20 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
if (WARN_ON(IS_ERR(new_hvs_state)))
return;
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- struct vc4_crtc_state *vc4_crtc_state;
+ if (vc4->gen < VC4_GEN_6_C) {
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
- if (!new_crtc_state->commit)
- continue;
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state;
+
+ if (!new_crtc_state->commit)
+ continue;
- vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
- vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
+ vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
+ vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
+ }
}
for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
@@ -382,16 +439,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* modeset.
*/
WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
+ WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
}
drm_atomic_helper_commit_modeset_disables(dev, state);
- vc4_ctm_commit(vc4, state);
+ if (vc4->gen <= VC4_GEN_5)
+ vc4_ctm_commit(vc4, state);
- if (vc4->gen == VC4_GEN_5)
- vc5_hvs_pv_muxing_commit(vc4, state);
- else
+ switch (vc4->gen) {
+ case VC4_GEN_4:
vc4_hvs_pv_muxing_commit(vc4, state);
+ break;
+
+ case VC4_GEN_5:
+ vc5_hvs_pv_muxing_commit(vc4, state);
+ break;
+
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ vc6_hvs_pv_muxing_commit(vc4, state);
+ break;
+
+ default:
+ drm_err(dev, "Unknown VC4 generation: %d", vc4->gen);
+ break;
+ }
drm_atomic_helper_commit_planes(dev, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
@@ -418,6 +491,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* requirements.
*/
WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
+ WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
clk_get_rate(hvs->core_clk));
@@ -1056,7 +1130,10 @@ int vc4_kms_load(struct drm_device *dev)
return ret;
}
- if (vc4->gen == VC4_GEN_5) {
+ if (vc4->gen >= VC4_GEN_6_C) {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ } else if (vc4->gen >= VC4_GEN_5) {
dev->mode_config.max_width = 7680;
dev->mode_config.max_height = 7680;
} else {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index ba6e86d62a77..d608860d525f 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -278,7 +278,10 @@ static bool plane_enabled(struct drm_plane_state *state)
static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
+ struct vc4_hvs *hvs = vc4->hvs;
struct vc4_plane_state *vc4_state;
+ unsigned int i;
if (WARN_ON(!plane->state))
return NULL;
@@ -288,6 +291,12 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return NULL;
memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
+
+ for (i = 0; i < DRM_FORMAT_MAX_PLANES; i++) {
+ if (vc4_state->upm_handle[i])
+ refcount_inc(&hvs->upm_refcounts[vc4_state->upm_handle[i]].refcount);
+ }
+
vc4_state->dlist_initialized = 0;
__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
@@ -306,18 +315,47 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return &vc4_state->base;
}
+static void vc4_plane_release_upm_ida(struct vc4_hvs *hvs, unsigned int upm_handle)
+{
+ struct vc4_upm_refcounts *refcount = &hvs->upm_refcounts[upm_handle];
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&hvs->mm_lock, irqflags);
+ drm_mm_remove_node(&refcount->upm);
+ spin_unlock_irqrestore(&hvs->mm_lock, irqflags);
+ refcount->upm.start = 0;
+ refcount->upm.size = 0;
+ refcount->size = 0;
+
+ ida_free(&hvs->upm_handles, upm_handle);
+}
+
static void vc4_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
+ struct vc4_hvs *hvs = vc4->hvs;
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int i;
if (drm_mm_node_allocated(&vc4_state->lbm)) {
unsigned long irqflags;
- spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+ spin_lock_irqsave(&hvs->mm_lock, irqflags);
drm_mm_remove_node(&vc4_state->lbm);
- spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+ spin_unlock_irqrestore(&hvs->mm_lock, irqflags);
+ }
+
+ for (i = 0; i < DRM_FORMAT_MAX_PLANES; i++) {
+ struct vc4_upm_refcounts *refcount;
+
+ if (!vc4_state->upm_handle[i])
+ continue;
+
+ refcount = &hvs->upm_refcounts[vc4_state->upm_handle[i]];
+
+ if (refcount_dec_and_test(&refcount->refcount))
+ vc4_plane_release_upm_ida(hvs, vc4_state->upm_handle[i]);
}
kfree(vc4_state->dlist);
@@ -330,7 +368,10 @@ static void vc4_plane_reset(struct drm_plane *plane)
{
struct vc4_plane_state *vc4_state;
- WARN_ON(plane->state);
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
@@ -528,8 +569,11 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
{
+ struct vc4_dev *vc4 = to_vc4_dev(vc4_state->base.plane->dev);
u32 scale, recip;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
scale = src / dst;
/* The specs note that while the reciprocal would be defined
@@ -538,6 +582,11 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
recip = ~0 / scale;
vc4_dlist_write(vc4_state,
+ /*
+ * The BCM2712 is lacking BIT(31) compared to
+ * the previous generations, but we don't use
+ * it.
+ */
VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
vc4_dlist_write(vc4_state,
@@ -550,10 +599,13 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst,
u32 xy, int channel)
{
+ struct vc4_dev *vc4 = to_vc4_dev(vc4_state->base.plane->dev);
u32 scale = src / dst;
s32 offset, offset2;
s32 phase;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
/*
* Start the phase at 1/2 pixel from the 1st pixel at src_x.
* 1/4 pixel for YUV.
@@ -598,10 +650,15 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst,
vc4_dlist_write(vc4_state,
SCALER_PPF_AGC |
VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
+ /*
+ * The register layout documentation is slightly
+ * different to setup the phase in the BCM2712,
+ * but they seem equivalent.
+ */
VC4_SET_FIELD(phase, SCALER_PPF_IPHASE));
}
-static u32 vc4_lbm_size(struct drm_plane_state *state)
+static u32 __vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
@@ -649,11 +706,139 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
return lbm;
}
+static unsigned int vc4_lbm_words_per_component(const struct drm_plane_state *state,
+ unsigned int channel)
+{
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ switch (vc4_state->y_scaling[channel]) {
+ case VC4_SCALING_PPF:
+ return 4;
+
+ case VC4_SCALING_TPZ:
+ return 2;
+
+ default:
+ return 0;
+ }
+}
+
+static unsigned int vc4_lbm_components(const struct drm_plane_state *state,
+ unsigned int channel)
+{
+ const struct drm_format_info *info = state->fb->format;
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ if (vc4_state->y_scaling[channel] == VC4_SCALING_NONE)
+ return 0;
+
+ if (info->is_yuv)
+ return channel ? 2 : 1;
+
+ if (info->has_alpha)
+ return 4;
+
+ return 3;
+}
+
+static unsigned int vc4_lbm_channel_size(const struct drm_plane_state *state,
+ unsigned int channel)
+{
+ const struct drm_format_info *info = state->fb->format;
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int channels_scaled = 0;
+ unsigned int components, words, wpc;
+ unsigned int width, lines;
+ unsigned int i;
+
+ /* LBM is meant to use the smaller of source or dest width, but there
+ * is a issue with UV scaling that the size required for the second
+ * channel is based on the source width only.
+ */
+ if (info->hsub > 1 && channel == 1)
+ width = state->src_w >> 16;
+ else
+ width = min(state->src_w >> 16, state->crtc_w);
+ width = round_up(width / info->hsub, 4);
+
+ wpc = vc4_lbm_words_per_component(state, channel);
+ if (!wpc)
+ return 0;
+
+ components = vc4_lbm_components(state, channel);
+ if (!components)
+ return 0;
+
+ if (state->alpha != DRM_BLEND_ALPHA_OPAQUE && info->has_alpha)
+ components -= 1;
+
+ words = width * wpc * components;
+
+ lines = DIV_ROUND_UP(words, 128 / info->hsub);
+
+ for (i = 0; i < 2; i++)
+ if (vc4_state->y_scaling[channel] != VC4_SCALING_NONE)
+ channels_scaled++;
+
+ if (channels_scaled == 1)
+ lines = lines / 2;
+
+ return lines;
+}
+
+static unsigned int __vc6_lbm_size(const struct drm_plane_state *state)
+{
+ const struct drm_format_info *info = state->fb->format;
+
+ if (info->hsub > 1)
+ return max(vc4_lbm_channel_size(state, 0),
+ vc4_lbm_channel_size(state, 1));
+ else
+ return vc4_lbm_channel_size(state, 0);
+}
+
+static u32 vc4_lbm_size(struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+
+ /* LBM is not needed when there's no vertical scaling. */
+ if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[1] == VC4_SCALING_NONE)
+ return 0;
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ return __vc6_lbm_size(state);
+ else
+ return __vc4_lbm_size(state);
+}
+
+static size_t vc6_upm_size(const struct drm_plane_state *state,
+ unsigned int plane)
+{
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int stride = state->fb->pitches[plane];
+
+ /*
+ * TODO: This only works for raster formats, and is sub-optimal
+ * for buffers with a stride aligned on 32 bytes.
+ */
+ unsigned int words_per_line = (stride + 62) / 32;
+ unsigned int fetch_region_size = words_per_line * 32;
+ unsigned int buffer_lines = 2 << vc4_state->upm_buffer_lines;
+ unsigned int buffer_size = fetch_region_size * buffer_lines;
+
+ return ALIGN(buffer_size, HVS_UBM_WORD_SIZE);
+}
+
static void vc4_write_scaling_parameters(struct drm_plane_state *state,
int channel)
{
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
/* Ch0 H-PPF Word 0: Scaling Parameters */
if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
vc4_write_ppf(vc4_state, vc4_state->src_w[channel],
@@ -750,6 +935,10 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
if (!lbm_size)
return 0;
+ /*
+ * NOTE: BCM2712 doesn't need to be aligned, since the size
+ * returned by vc4_lbm_size() is in words already.
+ */
if (vc4->gen == VC4_GEN_5)
lbm_size = ALIGN(lbm_size, 64);
else if (vc4->gen == VC4_GEN_4)
@@ -787,6 +976,108 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
return 0;
}
+static int vc6_plane_allocate_upm(struct drm_plane_state *state)
+{
+ const struct drm_format_info *info = state->fb->format;
+ struct drm_device *drm = state->plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int i;
+ int ret;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ vc4_state->upm_buffer_lines = SCALER6_PTR0_UPM_BUFF_SIZE_2_LINES;
+
+ for (i = 0; i < info->num_planes; i++) {
+ struct vc4_upm_refcounts *refcount;
+ int upm_handle;
+ unsigned long irqflags;
+ size_t upm_size;
+
+ upm_size = vc6_upm_size(state, i);
+ if (!upm_size)
+ return -EINVAL;
+ upm_handle = vc4_state->upm_handle[i];
+
+ if (upm_handle &&
+ hvs->upm_refcounts[upm_handle].size == upm_size) {
+ /* Allocation is the same size as the previous user of
+ * the plane. Keep the allocation.
+ */
+ vc4_state->upm_handle[i] = upm_handle;
+ } else {
+ if (upm_handle &&
+ refcount_dec_and_test(&hvs->upm_refcounts[upm_handle].refcount)) {
+ vc4_plane_release_upm_ida(hvs, upm_handle);
+ vc4_state->upm_handle[i] = 0;
+ }
+
+ upm_handle = ida_alloc_range(&hvs->upm_handles, 1,
+ VC4_NUM_UPM_HANDLES,
+ GFP_KERNEL);
+ if (upm_handle < 0) {
+ drm_dbg(drm, "Out of upm_handles\n");
+ return upm_handle;
+ }
+ vc4_state->upm_handle[i] = upm_handle;
+
+ refcount = &hvs->upm_refcounts[upm_handle];
+ refcount_set(&refcount->refcount, 1);
+ refcount->size = upm_size;
+
+ spin_lock_irqsave(&hvs->mm_lock, irqflags);
+ ret = drm_mm_insert_node_generic(&hvs->upm_mm,
+ &refcount->upm,
+ upm_size, HVS_UBM_WORD_SIZE,
+ 0, 0);
+ spin_unlock_irqrestore(&hvs->mm_lock, irqflags);
+ if (ret) {
+ drm_err(drm, "Failed to allocate UPM entry: %d\n", ret);
+ refcount_set(&refcount->refcount, 0);
+ ida_free(&hvs->upm_handles, upm_handle);
+ vc4_state->upm_handle[i] = 0;
+ return ret;
+ }
+ }
+
+ refcount = &hvs->upm_refcounts[upm_handle];
+ vc4_state->dlist[vc4_state->ptr0_offset[i]] |=
+ VC4_SET_FIELD(refcount->upm.start / HVS_UBM_WORD_SIZE,
+ SCALER6_PTR0_UPM_BASE) |
+ VC4_SET_FIELD(vc4_state->upm_handle[i] - 1,
+ SCALER6_PTR0_UPM_HANDLE) |
+ VC4_SET_FIELD(vc4_state->upm_buffer_lines,
+ SCALER6_PTR0_UPM_BUFF_SIZE);
+ }
+
+ return 0;
+}
+
+static void vc6_plane_free_upm(struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct drm_device *drm = state->plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
+ unsigned int i;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ for (i = 0; i < DRM_FORMAT_MAX_PLANES; i++) {
+ unsigned int upm_handle;
+
+ upm_handle = vc4_state->upm_handle[i];
+ if (!upm_handle)
+ continue;
+
+ if (refcount_dec_and_test(&hvs->upm_refcounts[upm_handle].refcount))
+ vc4_plane_release_upm_ida(hvs, upm_handle);
+ vc4_state->upm_handle[i] = 0;
+ }
+}
+
/*
* The colorspace conversion matrices are held in 3 entries in the dlist.
* Create an array of them, with entries for each full and limited mode, and
@@ -834,6 +1125,11 @@ static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
{
+ struct drm_device *dev = state->state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_4);
+
if (!state->fb->format->has_alpha)
return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
SCALER_POS2_ALPHA_MODE);
@@ -855,25 +1151,56 @@ static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
static u32 vc4_hvs5_get_alpha_blend_mode(struct drm_plane_state *state)
{
- if (!state->fb->format->has_alpha)
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
- SCALER5_CTL2_ALPHA_MODE);
+ struct drm_device *dev = state->state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
- switch (state->pixel_blend_mode) {
- case DRM_MODE_BLEND_PIXEL_NONE:
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
- SCALER5_CTL2_ALPHA_MODE);
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_5 && vc4->gen != VC4_GEN_6_C &&
+ vc4->gen != VC4_GEN_6_D);
+
+ switch (vc4->gen) {
default:
- case DRM_MODE_BLEND_PREMULTI:
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
- SCALER5_CTL2_ALPHA_MODE) |
- SCALER5_CTL2_ALPHA_PREMULT;
- case DRM_MODE_BLEND_COVERAGE:
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
- SCALER5_CTL2_ALPHA_MODE);
+ case VC4_GEN_5:
+ case VC4_GEN_6_C:
+ if (!state->fb->format->has_alpha)
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
+ SCALER5_CTL2_ALPHA_MODE);
+
+ switch (state->pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
+ SCALER5_CTL2_ALPHA_MODE);
+ default:
+ case DRM_MODE_BLEND_PREMULTI:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
+ SCALER5_CTL2_ALPHA_MODE) |
+ SCALER5_CTL2_ALPHA_PREMULT;
+ case DRM_MODE_BLEND_COVERAGE:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
+ SCALER5_CTL2_ALPHA_MODE);
+ }
+ case VC4_GEN_6_D:
+ /* 2712-D configures fixed alpha mode in CTL0 */
+ return state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ?
+ SCALER5_CTL2_ALPHA_PREMULT : 0;
}
}
+static u32 vc4_hvs6_get_alpha_mask_mode(struct drm_plane_state *state)
+{
+ struct drm_device *dev = state->state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_6_C && vc4->gen != VC4_GEN_6_D);
+
+ if (vc4->gen == VC4_GEN_6_D &&
+ (!state->fb->format->has_alpha ||
+ state->pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE))
+ return VC4_SET_FIELD(SCALER6D_CTL0_ALPHA_MASK_FIXED,
+ SCALER6_CTL0_ALPHA_MASK);
+
+ return VC4_SET_FIELD(SCALER6_CTL0_ALPHA_MASK_NONE, SCALER6_CTL0_ALPHA_MASK);
+}
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
@@ -906,6 +1233,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (ret)
return ret;
+ if (!vc4_state->src_w[0] || !vc4_state->src_h[0] ||
+ !vc4_state->crtc_w || !vc4_state->crtc_h) {
+ /* 0 source size probably means the plane is offscreen */
+ vc4_state->dlist_initialized = 1;
+ return 0;
+ }
+
width = vc4_state->src_w[0] >> 16;
height = vc4_state->src_h[0] >> 16;
@@ -1363,6 +1697,427 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
return 0;
}
+static u32 vc6_plane_get_csc_mode(struct vc4_plane_state *vc4_state)
+{
+ struct drm_plane_state *state = &vc4_state->base;
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+ u32 ret = 0;
+
+ if (vc4_state->is_yuv) {
+ enum drm_color_encoding color_encoding = state->color_encoding;
+ enum drm_color_range color_range = state->color_range;
+
+ /* CSC pre-loaded with:
+ * 0 = BT601 limited range
+ * 1 = BT709 limited range
+ * 2 = BT2020 limited range
+ * 3 = BT601 full range
+ * 4 = BT709 full range
+ * 5 = BT2020 full range
+ */
+ if (color_encoding > DRM_COLOR_YCBCR_BT2020)
+ color_encoding = DRM_COLOR_YCBCR_BT601;
+ if (color_range > DRM_COLOR_YCBCR_FULL_RANGE)
+ color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+
+ if (vc4->gen == VC4_GEN_6_C) {
+ ret |= SCALER6C_CTL2_CSC_ENABLE;
+ ret |= VC4_SET_FIELD(color_encoding + (color_range * 3),
+ SCALER6C_CTL2_BRCM_CFC_CONTROL);
+ } else {
+ ret |= SCALER6D_CTL2_CSC_ENABLE;
+ ret |= VC4_SET_FIELD(color_encoding + (color_range * 3),
+ SCALER6D_CTL2_BRCM_CFC_CONTROL);
+ }
+ }
+
+ return ret;
+}
+
+static int vc6_plane_mode_set(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_device *drm = plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
+ const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+ u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
+ int num_planes = fb->format->num_planes;
+ u32 h_subsample = fb->format->hsub;
+ u32 v_subsample = fb->format->vsub;
+ bool mix_plane_alpha;
+ bool covers_screen;
+ u32 scl0, scl1, pitch0;
+ u32 tiling, src_x, src_y;
+ u32 width, height;
+ u32 hvs_format = format->hvs;
+ u32 offsets[3] = { 0 };
+ unsigned int rotation;
+ int ret, i;
+
+ if (vc4_state->dlist_initialized)
+ return 0;
+
+ ret = vc4_plane_setup_clipping_and_scaling(state);
+ if (ret)
+ return ret;
+
+ if (!vc4_state->src_w[0] || !vc4_state->src_h[0] ||
+ !vc4_state->crtc_w || !vc4_state->crtc_h) {
+ /* 0 source size probably means the plane is offscreen.
+ * 0 destination size is a redundant plane.
+ */
+ vc4_state->dlist_initialized = 1;
+ return 0;
+ }
+
+ width = vc4_state->src_w[0] >> 16;
+ height = vc4_state->src_h[0] >> 16;
+
+ /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB
+ * and 4:4:4, scl1 should be set to scl0 so both channels of
+ * the scaler do the same thing. For YUV, the Y plane needs
+ * to be put in channel 1 and Cb/Cr in channel 0, so we swap
+ * the scl fields here.
+ */
+ if (num_planes == 1) {
+ scl0 = vc4_get_scl_field(state, 0);
+ scl1 = scl0;
+ } else {
+ scl0 = vc4_get_scl_field(state, 1);
+ scl1 = vc4_get_scl_field(state, 0);
+ }
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ /* We must point to the last line when Y reflection is enabled. */
+ src_y = vc4_state->src_y >> 16;
+ if (rotation & DRM_MODE_REFLECT_Y)
+ src_y += height - 1;
+
+ src_x = vc4_state->src_x >> 16;
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_LINEAR:
+ tiling = SCALER6_CTL0_ADDR_MODE_LINEAR;
+
+ /* Adjust the base pointer to the first pixel to be scanned
+ * out.
+ */
+ for (i = 0; i < num_planes; i++) {
+ offsets[i] += src_y / (i ? v_subsample : 1) * fb->pitches[i];
+ offsets[i] += src_x / (i ? h_subsample : 1) * fb->format->cpp[i];
+ }
+
+ break;
+
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+ uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+ u32 components_per_word;
+ u32 starting_offset;
+ u32 fetch_count;
+
+ if (param > SCALER_TILE_HEIGHT_MASK) {
+ DRM_DEBUG_KMS("SAND height too large (%d)\n",
+ param);
+ return -EINVAL;
+ }
+
+ if (fb->format->format == DRM_FORMAT_P030) {
+ hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT;
+ tiling = SCALER6_CTL0_ADDR_MODE_128B;
+ } else {
+ hvs_format = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE;
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tiling = SCALER6_CTL0_ADDR_MODE_128B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tiling = SCALER6_CTL0_ADDR_MODE_256B;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Adjust the base pointer to the first pixel to be scanned
+ * out.
+ *
+ * For P030, y_ptr [31:4] is the 128bit word for the start pixel
+ * y_ptr [3:0] is the pixel (0-11) contained within that 128bit
+ * word that should be taken as the first pixel.
+ * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the
+ * element within the 128bit word, eg for pixel 3 the value
+ * should be 6.
+ */
+ for (i = 0; i < num_planes; i++) {
+ u32 tile_w, tile, x_off, pix_per_tile;
+
+ if (fb->format->format == DRM_FORMAT_P030) {
+ /*
+ * Spec says: bits [31:4] of the given address
+ * should point to the 128-bit word containing
+ * the desired starting pixel, and bits[3:0]
+ * should be between 0 and 11, indicating which
+ * of the 12-pixels in that 128-bit word is the
+ * first pixel to be used
+ */
+ u32 remaining_pixels = src_x % 96;
+ u32 aligned = remaining_pixels / 12;
+ u32 last_bits = remaining_pixels % 12;
+
+ x_off = aligned * 16 + last_bits;
+ tile_w = 128;
+ pix_per_tile = 96;
+ } else {
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tile_w = 128;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tile_w = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pix_per_tile = tile_w / fb->format->cpp[0];
+ x_off = (src_x % pix_per_tile) /
+ (i ? h_subsample : 1) *
+ fb->format->cpp[i];
+ }
+
+ tile = src_x / pix_per_tile;
+
+ offsets[i] += param * tile_w * tile;
+ offsets[i] += src_y / (i ? v_subsample : 1) * tile_w;
+ offsets[i] += x_off & ~(i ? 1 : 0);
+ }
+
+ components_per_word = fb->format->format == DRM_FORMAT_P030 ? 24 : 32;
+ starting_offset = src_x % components_per_word;
+ fetch_count = (width + starting_offset + components_per_word - 1) /
+ components_per_word;
+
+ pitch0 = VC4_SET_FIELD(param, SCALER6_PTR2_PITCH) |
+ VC4_SET_FIELD(fetch_count - 1, SCALER6_PTR2_FETCH_COUNT);
+ break;
+ }
+
+ default:
+ DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
+ (long long)fb->modifier);
+ return -EINVAL;
+ }
+
+ /* fetch an extra pixel if we don't actually line up with the left edge. */
+ if ((vc4_state->src_x & 0xffff) && vc4_state->src_x < (state->fb->width << 16))
+ width++;
+
+ /* same for the right side */
+ if (((vc4_state->src_x + vc4_state->src_w[0]) & 0xffff) &&
+ vc4_state->src_x + vc4_state->src_w[0] < (state->fb->width << 16))
+ width++;
+
+ /* now for the top */
+ if ((vc4_state->src_y & 0xffff) && vc4_state->src_y < (state->fb->height << 16))
+ height++;
+
+ /* and the bottom */
+ if (((vc4_state->src_y + vc4_state->src_h[0]) & 0xffff) &&
+ vc4_state->src_y + vc4_state->src_h[0] < (state->fb->height << 16))
+ height++;
+
+ /* for YUV444 hardware wants double the width, otherwise it doesn't
+ * fetch full width of chroma
+ */
+ if (format->drm == DRM_FORMAT_YUV444 || format->drm == DRM_FORMAT_YVU444)
+ width <<= 1;
+
+ /* Don't waste cycles mixing with plane alpha if the set alpha
+ * is opaque or there is no per-pixel alpha information.
+ * In any case we use the alpha property value as the fixed alpha.
+ */
+ mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
+ fb->format->has_alpha;
+
+ /* Control Word 0: Scaling Configuration & Element Validity*/
+ vc4_dlist_write(vc4_state,
+ SCALER6_CTL0_VALID |
+ VC4_SET_FIELD(tiling, SCALER6_CTL0_ADDR_MODE) |
+ vc4_hvs6_get_alpha_mask_mode(state) |
+ (vc4_state->is_unity ? SCALER6_CTL0_UNITY : 0) |
+ VC4_SET_FIELD(format->pixel_order_hvs5, SCALER6_CTL0_ORDERRGBA) |
+ VC4_SET_FIELD(scl1, SCALER6_CTL0_SCL1_MODE) |
+ VC4_SET_FIELD(scl0, SCALER6_CTL0_SCL0_MODE) |
+ VC4_SET_FIELD(hvs_format, SCALER6_CTL0_PIXEL_FORMAT));
+
+ /* Position Word 0: Image Position */
+ vc4_state->pos0_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->crtc_y, SCALER6_POS0_START_Y) |
+ (rotation & DRM_MODE_REFLECT_X ? SCALER6_POS0_HFLIP : 0) |
+ VC4_SET_FIELD(vc4_state->crtc_x, SCALER6_POS0_START_X));
+
+ /* Control Word 2: Alpha Value & CSC */
+ vc4_dlist_write(vc4_state,
+ vc6_plane_get_csc_mode(vc4_state) |
+ vc4_hvs5_get_alpha_blend_mode(state) |
+ (mix_plane_alpha ? SCALER6_CTL2_ALPHA_MIX : 0) |
+ VC4_SET_FIELD(state->alpha >> 4, SCALER5_CTL2_ALPHA));
+
+ /* Position Word 1: Scaled Image Dimensions */
+ if (!vc4_state->is_unity)
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->crtc_h - 1,
+ SCALER6_POS1_SCL_LINES) |
+ VC4_SET_FIELD(vc4_state->crtc_w - 1,
+ SCALER6_POS1_SCL_WIDTH));
+
+ /* Position Word 2: Source Image Size */
+ vc4_state->pos2_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(height - 1,
+ SCALER6_POS2_SRC_LINES) |
+ VC4_SET_FIELD(width - 1,
+ SCALER6_POS2_SRC_WIDTH));
+
+ /* Position Word 3: Context */
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+
+ /*
+ * TODO: This only covers Raster Scan Order planes
+ */
+ for (i = 0; i < num_planes; i++) {
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, i);
+ dma_addr_t paddr = bo->dma_addr + fb->offsets[i] + offsets[i];
+
+ /* Pointer Word 0 */
+ vc4_state->ptr0_offset[i] = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ (rotation & DRM_MODE_REFLECT_Y ? SCALER6_PTR0_VFLIP : 0) |
+ /*
+ * The UPM buffer will be allocated in
+ * vc6_plane_allocate_upm().
+ */
+ VC4_SET_FIELD(upper_32_bits(paddr) & 0xff,
+ SCALER6_PTR0_UPPER_ADDR));
+
+ /* Pointer Word 1 */
+ vc4_dlist_write(vc4_state, lower_32_bits(paddr));
+
+ /* Pointer Word 2 */
+ if (base_format_mod != DRM_FORMAT_MOD_BROADCOM_SAND128 &&
+ base_format_mod != DRM_FORMAT_MOD_BROADCOM_SAND256) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->pitches[i],
+ SCALER6_PTR2_PITCH));
+ } else {
+ vc4_dlist_write(vc4_state, pitch0);
+ }
+ }
+
+ /*
+ * Palette Word 0
+ * TODO: We're not using the palette mode
+ */
+
+ /*
+ * Trans Word 0
+ * TODO: It's only relevant if we set the trans_rgb bit in the
+ * control word 0, and we don't at the moment.
+ */
+
+ vc4_state->lbm_offset = 0;
+
+ if (!vc4_state->is_unity || fb->format->is_yuv) {
+ /*
+ * Reserve a slot for the LBM Base Address. The real value will
+ * be set when calling vc4_plane_allocate_lbm().
+ */
+ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ vc4_state->lbm_offset = vc4_state->dlist_count;
+ vc4_dlist_counter_increment(vc4_state);
+ }
+
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ if (num_planes > 1)
+ /*
+ * Emit Cb/Cr as channel 0 and Y as channel
+ * 1. This matches how we set up scl0/scl1
+ * above.
+ */
+ vc4_write_scaling_parameters(state, 1);
+
+ vc4_write_scaling_parameters(state, 0);
+ }
+
+ /*
+ * If any PPF setup was done, then all the kernel
+ * pointers get uploaded.
+ */
+ if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
+ vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
+ vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
+ vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
+ u32 kernel =
+ VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
+ SCALER_PPF_KERNEL_OFFSET);
+
+ /* HPPF plane 0 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* VPPF plane 0 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* HPPF plane 1 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* VPPF plane 1 */
+ vc4_dlist_write(vc4_state, kernel);
+ }
+ }
+
+ vc4_dlist_write(vc4_state, SCALER6_CTL0_END);
+
+ vc4_state->dlist[0] |=
+ VC4_SET_FIELD(vc4_state->dlist_count, SCALER6_CTL0_NEXT);
+
+ /* crtc_* are already clipped coordinates. */
+ covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 &&
+ vc4_state->crtc_w == state->crtc->mode.hdisplay &&
+ vc4_state->crtc_h == state->crtc->mode.vdisplay;
+
+ /*
+ * Background fill might be necessary when the plane has per-pixel
+ * alpha content or a non-opaque plane alpha and could blend from the
+ * background or does not cover the entire screen.
+ */
+ vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
+ state->alpha != DRM_BLEND_ALPHA_OPAQUE;
+
+ /*
+ * Flag the dlist as initialized to avoid checking it twice in case
+ * the async update check already called vc4_plane_mode_set() and
+ * decided to fallback to sync update because async update was not
+ * possible.
+ */
+ vc4_state->dlist_initialized = 1;
+
+ vc4_plane_calc_load(state);
+
+ drm_dbg_driver(drm, "[PLANE:%d:%s] Computed DLIST size: %u\n",
+ plane->base.id, plane->name, vc4_state->dlist_count);
+
+ return 0;
+}
+
/* If a modeset involves changing the setup of a plane, the atomic
* infrastructure will call this to validate a proposed plane setup.
* However, if a plane isn't getting updated, this (and the
@@ -1373,6 +2128,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
static int vc4_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(new_plane_state);
@@ -1380,17 +2136,38 @@ static int vc4_plane_atomic_check(struct drm_plane *plane,
vc4_state->dlist_count = 0;
- if (!plane_enabled(new_plane_state))
+ if (!plane_enabled(new_plane_state)) {
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+
+ if (vc4->gen >= VC4_GEN_6_C && old_plane_state &&
+ plane_enabled(old_plane_state)) {
+ vc6_plane_free_upm(new_plane_state);
+ }
return 0;
+ }
- ret = vc4_plane_mode_set(plane, new_plane_state);
+ if (vc4->gen >= VC4_GEN_6_C)
+ ret = vc6_plane_mode_set(plane, new_plane_state);
+ else
+ ret = vc4_plane_mode_set(plane, new_plane_state);
if (ret)
return ret;
+ if (!vc4_state->src_w[0] || !vc4_state->src_h[0] ||
+ !vc4_state->crtc_w || !vc4_state->crtc_h)
+ return 0;
+
ret = vc4_plane_allocate_lbm(new_plane_state);
if (ret)
return ret;
+ if (vc4->gen >= VC4_GEN_6_C) {
+ ret = vc6_plane_allocate_upm(new_plane_state);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1439,7 +2216,8 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
- uint32_t addr;
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
+ dma_addr_t dma_addr = bo->dma_addr + fb->offsets[0];
int idx;
if (!drm_dev_enter(plane->dev, &idx))
@@ -1449,19 +2227,38 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
* because this is only called on the primary plane.
*/
WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
- addr = bo->dma_addr + fb->offsets[0];
- /* Write the new address into the hardware immediately. The
- * scanout will start from this address as soon as the FIFO
- * needs to refill with pixels.
- */
- writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]);
+ if (vc4->gen == VC4_GEN_6_C) {
+ u32 value;
- /* Also update the CPU-side dlist copy, so that any later
- * atomic updates that don't do a new modeset on our plane
- * also use our updated address.
- */
- vc4_state->dlist[vc4_state->ptr0_offset[0]] = addr;
+ value = vc4_state->dlist[vc4_state->ptr0_offset[0]] &
+ ~SCALER6_PTR0_UPPER_ADDR_MASK;
+ value |= VC4_SET_FIELD(upper_32_bits(dma_addr) & 0xff,
+ SCALER6_PTR0_UPPER_ADDR);
+
+ writel(value, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]);
+ vc4_state->dlist[vc4_state->ptr0_offset[0]] = value;
+
+ value = lower_32_bits(dma_addr);
+ writel(value, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0] + 1]);
+ vc4_state->dlist[vc4_state->ptr0_offset[0] + 1] = value;
+ } else {
+ u32 addr;
+
+ addr = (u32)dma_addr;
+
+ /* Write the new address into the hardware immediately. The
+ * scanout will start from this address as soon as the FIFO
+ * needs to refill with pixels.
+ */
+ writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]);
+
+ /* Also update the CPU-side dlist copy, so that any later
+ * atomic updates that don't do a new modeset on our plane
+ * also use our updated address.
+ */
+ vc4_state->dlist[vc4_state->ptr0_offset[0]] = addr;
+ }
drm_dev_exit(idx);
}
@@ -1543,13 +2340,17 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *old_vc4_state, *new_vc4_state;
int ret;
u32 i;
- ret = vc4_plane_mode_set(plane, new_plane_state);
+ if (vc4->gen <= VC4_GEN_5)
+ ret = vc4_plane_mode_set(plane, new_plane_state);
+ else
+ ret = vc6_plane_mode_set(plane, new_plane_state);
if (ret)
return ret;
@@ -1723,7 +2524,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
};
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
- if (!hvs_formats[i].hvs5_only || vc4->gen == VC4_GEN_5) {
+ if (!hvs_formats[i].hvs5_only || vc4->gen >= VC4_GEN_5) {
formats[num_formats] = hvs_formats[i].drm;
num_formats++;
}
@@ -1738,7 +2539,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
return ERR_CAST(vc4_plane);
plane = &vc4_plane->base;
- if (vc4->gen == VC4_GEN_5)
+ if (vc4->gen >= VC4_GEN_5)
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
else
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index c55dec383929..27158be19952 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -19,6 +19,20 @@
#define VC4_GET_FIELD(word, field) FIELD_GET(field##_MASK, word)
+#define VC6_SET_FIELD(value, field) \
+ ({ \
+ WARN_ON(!FIELD_FIT(hvs->vc4->gen == VC4_GEN_6_C ? \
+ SCALER6_ ## field ## _MASK : \
+ SCALER6D_ ## field ## _MASK, value));\
+ FIELD_PREP(hvs->vc4->gen == VC4_GEN_6_C ? \
+ SCALER6_ ## field ## _MASK : \
+ SCALER6D_ ## field ## _MASK, value); \
+ })
+
+#define VC6_GET_FIELD(word, field) FIELD_GET(hvs->vc4->gen == VC4_GEN_6_C ? \
+ SCALER6_ ## field ## _MASK : \
+ SCALER6D_ ## field ## _MASK, word)
+
#define V3D_IDENT0 0x00000
# define V3D_EXPECTED_IDENT0 \
((2 << 24) | \
@@ -155,6 +169,7 @@
# define PV_CONTROL_EN BIT(0)
#define PV_V_CONTROL 0x04
+# define PV_VCONTROL_ODD_TIMING BIT(29)
# define PV_VCONTROL_ODD_DELAY_MASK VC4_MASK(22, 6)
# define PV_VCONTROL_ODD_DELAY_SHIFT 6
# define PV_VCONTROL_ODD_FIRST BIT(5)
@@ -215,6 +230,11 @@
# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_SHIFT 2
# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP 8
+#define PV_PIPE_INIT_CTRL 0x94
+# define PV_PIPE_INIT_CTRL_PV_INIT_WIDTH_MASK VC4_MASK(11, 8)
+# define PV_PIPE_INIT_CTRL_PV_INIT_IDLE_MASK VC4_MASK(7, 4)
+# define PV_PIPE_INIT_CTRL_PV_INIT_EN BIT(0)
+
#define SCALER_CHANNELS_COUNT 3
#define SCALER_DISPCTRL 0x00000000
@@ -418,6 +438,10 @@
# define SCALER_DISPSTAT1_FRCNT0_SHIFT 18
# define SCALER_DISPSTAT1_FRCNT1_MASK VC4_MASK(17, 12)
# define SCALER_DISPSTAT1_FRCNT1_SHIFT 12
+# define SCALER5_DISPSTAT1_FRCNT0_MASK VC4_MASK(25, 20)
+# define SCALER5_DISPSTAT1_FRCNT0_SHIFT 20
+# define SCALER5_DISPSTAT1_FRCNT1_MASK VC4_MASK(19, 14)
+# define SCALER5_DISPSTAT1_FRCNT1_SHIFT 14
#define SCALER_DISPSTATX(x) (SCALER_DISPSTAT0 + \
(x) * (SCALER_DISPSTAT1 - \
@@ -436,6 +460,8 @@
#define SCALER_DISPSTAT2 0x00000068
# define SCALER_DISPSTAT2_FRCNT2_MASK VC4_MASK(17, 12)
# define SCALER_DISPSTAT2_FRCNT2_SHIFT 12
+# define SCALER5_DISPSTAT2_FRCNT2_MASK VC4_MASK(19, 14)
+# define SCALER5_DISPSTAT2_FRCNT2_SHIFT 14
#define SCALER_DISPBASE2 0x0000006c
#define SCALER_DISPALPHA2 0x00000070
@@ -514,6 +540,206 @@
#define SCALER5_DLIST_START 0x00004000
+#define SCALER6_VERSION 0x00000000
+# define SCALER6_VERSION_MASK VC4_MASK(7, 0)
+# define SCALER6_VERSION_C0 0x00000053
+# define SCALER6_VERSION_D0 0x00000054
+#define SCALER6_CXM_SIZE 0x00000004
+#define SCALER6_LBM_SIZE 0x00000008
+#define SCALER6_UBM_SIZE 0x0000000c
+#define SCALER6_COBA_SIZE 0x00000010
+#define SCALER6_COB_SIZE 0x00000014
+
+#define SCALER6_CONTROL 0x00000020
+# define SCALER6_CONTROL_HVS_EN BIT(31)
+# define SCALER6_CONTROL_PF_LINES_MASK VC4_MASK(22, 18)
+# define SCALER6_CONTROL_ABORT_ON_EMPTY BIT(16)
+# define SCALER6_CONTROL_DSP1_TARGET_MASK VC4_MASK(13, 12)
+# define SCALER6_CONTROL_MAX_REQS_MASK VC4_MASK(7, 4)
+
+#define SCALER6_FETCHER_STATUS 0x00000024
+#define SCALER6_FETCH_STATUS 0x00000028
+#define SCALER6_HANDLE_ERROR 0x0000002c
+
+#define SCALER6_DISP0_CTRL0 0x00000030
+#define SCALER6_DISPX_CTRL0(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_CTRL0 + ((x) * (SCALER6_DISP1_CTRL0 - SCALER6_DISP0_CTRL0))) : \
+ (SCALER6D_DISP0_CTRL0 + ((x) * (SCALER6D_DISP1_CTRL0 - SCALER6D_DISP0_CTRL0))))
+# define SCALER6_DISPX_CTRL0_ENB BIT(31)
+# define SCALER6_DISPX_CTRL0_RESET BIT(30)
+# define SCALER6_DISPX_CTRL0_FWIDTH_MASK VC4_MASK(28, 16)
+# define SCALER6_DISPX_CTRL0_ONESHOT BIT(15)
+# define SCALER6_DISPX_CTRL0_ONECTX_MASK VC4_MASK(14, 13)
+# define SCALER6_DISPX_CTRL0_LINES_MASK VC4_MASK(12, 0)
+
+#define SCALER6_DISP0_CTRL1 0x00000034
+#define SCALER6_DISPX_CTRL1(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_CTRL1 + ((x) * (SCALER6_DISP1_CTRL1 - SCALER6_DISP0_CTRL1))) : \
+ (SCALER6D_DISP0_CTRL1 + ((x) * (SCALER6D_DISP1_CTRL1 - SCALER6D_DISP0_CTRL1))))
+# define SCALER6_DISPX_CTRL1_BGENB BIT(8)
+# define SCALER6_DISPX_CTRL1_INTLACE BIT(0)
+
+#define SCALER6_DISP0_BGND 0x00000038
+#define SCALER6_DISPX_BGND(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_BGND + ((x) * (SCALER6_DISP1_BGND - SCALER6_DISP0_BGND))) : \
+ (SCALER6D_DISP0_BGND + ((x) * (SCALER6D_DISP1_BGND - SCALER6D_DISP0_BGND))))
+
+#define SCALER6_DISP0_LPTRS 0x0000003c
+#define SCALER6_DISPX_LPTRS(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_LPTRS + ((x) * (SCALER6_DISP1_LPTRS - SCALER6_DISP0_LPTRS))) : \
+ (SCALER6D_DISP0_LPTRS + ((x) * (SCALER6D_DISP1_LPTRS - SCALER6D_DISP0_LPTRS))))
+# define SCALER6_DISPX_LPTRS_HEADE_MASK VC4_MASK(11, 0)
+
+#define SCALER6_DISP0_COB 0x00000040
+#define SCALER6_DISPX_COB(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_COB + ((x) * (SCALER6_DISP1_COB - SCALER6_DISP0_COB))) : \
+ (SCALER6D_DISP0_COB + ((x) * (SCALER6D_DISP1_COB - SCALER6D_DISP0_COB))))
+# define SCALER6_DISPX_COB_TOP_MASK VC4_MASK(31, 16)
+# define SCALER6_DISPX_COB_BASE_MASK VC4_MASK(15, 0)
+
+#define SCALER6_DISP0_STATUS 0x00000044
+#define SCALER6_DISPX_STATUS(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_STATUS + ((x) * (SCALER6_DISP1_STATUS - SCALER6_DISP0_STATUS))) : \
+ (SCALER6D_DISP0_STATUS + ((x) * (SCALER6D_DISP1_STATUS - SCALER6D_DISP0_STATUS))))
+# define SCALER6_DISPX_STATUS_EMPTY BIT(22)
+# define SCALER6_DISPX_STATUS_FRCNT_MASK VC4_MASK(21, 16)
+# define SCALER6_DISPX_STATUS_OFIELD BIT(15)
+# define SCALER6_DISPX_STATUS_MODE_MASK VC4_MASK(14, 13)
+# define SCALER6_DISPX_STATUS_MODE_DISABLED 0
+# define SCALER6_DISPX_STATUS_MODE_INIT 1
+# define SCALER6_DISPX_STATUS_MODE_RUN 2
+# define SCALER6_DISPX_STATUS_MODE_EOF 3
+# define SCALER6_DISPX_STATUS_YLINE_MASK VC4_MASK(12, 0)
+
+#define SCALER6_DISP0_DL 0x00000048
+
+#define SCALER6_DISPX_DL(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_DL + ((x) * (SCALER6_DISP1_DL - SCALER6_DISP0_DL))) : \
+ (SCALER6D_DISP0_DL + ((x) * (SCALER6D_DISP1_DL - SCALER6D_DISP0_DL))))
+# define SCALER6_DISPX_DL_LACT_MASK VC4_MASK(11, 0)
+
+#define SCALER6_DISP0_RUN 0x0000004c
+#define SCALER6_DISP1_CTRL0 0x00000050
+#define SCALER6_DISP1_CTRL1 0x00000054
+#define SCALER6_DISP1_BGND 0x00000058
+#define SCALER6_DISP1_LPTRS 0x0000005c
+#define SCALER6_DISP1_COB 0x00000060
+#define SCALER6_DISP1_STATUS 0x00000064
+#define SCALER6_DISP1_DL 0x00000068
+#define SCALER6_DISP1_RUN 0x0000006c
+#define SCALER6_DISP2_CTRL0 0x00000070
+#define SCALER6_DISP2_CTRL1 0x00000074
+#define SCALER6_DISP2_BGND 0x00000078
+#define SCALER6_DISP2_LPTRS 0x0000007c
+#define SCALER6_DISP2_COB 0x00000080
+#define SCALER6_DISP2_STATUS 0x00000084
+#define SCALER6_DISP2_DL 0x00000088
+#define SCALER6_DISP2_RUN 0x0000008c
+#define SCALER6_EOLN 0x00000090
+#define SCALER6_DL_STATUS 0x00000094
+#define SCALER6_BFG_MISC 0x0000009c
+#define SCALER6_QOS0 0x000000a0
+#define SCALER6_PROF0 0x000000a4
+#define SCALER6_QOS1 0x000000a8
+#define SCALER6_PROF1 0x000000ac
+#define SCALER6_QOS2 0x000000b0
+#define SCALER6_PROF2 0x000000b4
+#define SCALER6_PRI_MAP0 0x000000b8
+#define SCALER6_PRI_MAP1 0x000000bc
+#define SCALER6_HISTCTRL 0x000000c0
+#define SCALER6_HISTBIN0 0x000000c4
+#define SCALER6_HISTBIN1 0x000000c8
+#define SCALER6_HISTBIN2 0x000000cc
+#define SCALER6_HISTBIN3 0x000000d0
+#define SCALER6_HISTBIN4 0x000000d4
+#define SCALER6_HISTBIN5 0x000000d8
+#define SCALER6_HISTBIN6 0x000000dc
+#define SCALER6_HISTBIN7 0x000000e0
+#define SCALER6_HDR_CFG_REMAP 0x000000f4
+#define SCALER6_COL_SPACE 0x000000f8
+#define SCALER6_HVS_ID 0x000000fc
+#define SCALER6_CFC1 0x00000100
+#define SCALER6_DISP_UPM_ISO0 0x00000200
+#define SCALER6_DISP_UPM_ISO1 0x00000204
+#define SCALER6_DISP_UPM_ISO2 0x00000208
+#define SCALER6_DISP_LBM_ISO0 0x0000020c
+#define SCALER6_DISP_LBM_ISO1 0x00000210
+#define SCALER6_DISP_LBM_ISO2 0x00000214
+#define SCALER6_DISP_COB_ISO0 0x00000218
+#define SCALER6_DISP_COB_ISO1 0x0000021c
+#define SCALER6_DISP_COB_ISO2 0x00000220
+#define SCALER6_BAD_COB 0x00000224
+#define SCALER6_BAD_LBM 0x00000228
+#define SCALER6_BAD_UPM 0x0000022c
+#define SCALER6_BAD_AXI 0x00000230
+
+#define SCALER6D_VERSION 0x00000000
+#define SCALER6D_CXM_SIZE 0x00000004
+#define SCALER6D_LBM_SIZE 0x00000008
+#define SCALER6D_UBM_SIZE 0x0000000c
+#define SCALER6D_COBA_SIZE 0x00000010
+#define SCALER6D_COB_SIZE 0x00000014
+#define SCALER6D_CONTROL 0x00000020
+#define SCALER6D_FETCHER_STATUS 0x00000024
+#define SCALER6D_FETCH_STATUS 0x00000028
+#define SCALER6D_HANDLE_ERROR 0x0000002c
+#define SCALER6D_EOLN 0x00000030
+#define SCALER6D_DL_STATUS 0x00000034
+#define SCALER6D_PRI_MAP0 0x00000038
+#define SCALER6D_PRI_MAP1 0x0000003c
+#define SCALER6D_HISTCTRL 0x000000d0
+#define SCALER6D_HISTBIN0 0x000000d4
+#define SCALER6D_HISTBIN1 0x000000d8
+#define SCALER6D_HISTBIN2 0x000000dc
+#define SCALER6D_HISTBIN3 0x000000e0
+#define SCALER6D_HISTBIN4 0x000000e4
+#define SCALER6D_HISTBIN5 0x000000e8
+#define SCALER6D_HISTBIN6 0x000000ec
+#define SCALER6D_HISTBIN7 0x000000f0
+#define SCALER6D_HVS_ID 0x000000fc
+
+#define SCALER6D_DISP0_CTRL0 0x00000100
+#define SCALER6D_DISP0_CTRL1 0x00000104
+#define SCALER6D_DISP0_BGND 0x00000108
+#define SCALER6D_DISP0_LPTRS 0x00000110
+#define SCALER6D_DISP0_COB 0x00000114
+#define SCALER6D_DISP0_STATUS 0x00000118
+#define SCALER6D_DISP0_CTRL0 0x00000100
+#define SCALER6D_DISP0_CTRL1 0x00000104
+#define SCALER6D_DISP0_BGND0 0x00000108
+#define SCALER6D_DISP0_BGND1 0x0000010c
+#define SCALER6D_DISP0_LPTRS 0x00000110
+#define SCALER6D_DISP0_COB 0x00000114
+#define SCALER6D_DISP0_STATUS 0x00000118
+#define SCALER6D_DISP0_DL 0x0000011c
+#define SCALER6D_DISP0_RUN 0x00000120
+#define SCALER6D_QOS0 0x00000124
+#define SCALER6D_PROF0 0x00000128
+#define SCALER6D_DISP1_CTRL0 0x00000140
+#define SCALER6D_DISP1_CTRL1 0x00000144
+#define SCALER6D_DISP1_BGND0 0x00000148
+#define SCALER6D_DISP1_BGND1 0x0000014c
+#define SCALER6D_DISP1_LPTRS 0x00000150
+#define SCALER6D_DISP1_COB 0x00000154
+#define SCALER6D_DISP1_STATUS 0x00000158
+#define SCALER6D_DISP1_DL 0x0000015c
+#define SCALER6D_DISP1_RUN 0x00000160
+#define SCALER6D_QOS1 0x00000164
+#define SCALER6D_PROF1 0x00000168
+#define SCALER6D_DISP2_CTRL0 0x00000180
+#define SCALER6D_DISP2_CTRL1 0x00000184
+#define SCALER6D_DISP2_BGND0 0x00000188
+#define SCALER6D_DISP2_BGND1 0x0000018c
+#define SCALER6D_DISP2_LPTRS 0x00000190
+#define SCALER6D_DISP2_COB 0x00000194
+#define SCALER6D_DISP2_STATUS 0x00000198
+#define SCALER6D_DISP2_DL 0x0000019c
+#define SCALER6D_DISP2_RUN 0x000001a0
+#define SCALER6D_QOS2 0x000001a4
+#define SCALER6D_PROF2 0x000001a8
+
+#define SCALER6(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? SCALER6_ ## x : SCALER6D_ ## x)
+
# define VC4_HDMI_SW_RESET_FORMAT_DETECT BIT(1)
# define VC4_HDMI_SW_RESET_HDMI BIT(0)
@@ -761,6 +987,15 @@ enum {
# define VC4_HD_MAI_THR_DREQLOW_MASK VC4_MASK(5, 0)
# define VC4_HD_MAI_THR_DREQLOW_SHIFT 0
+# define VC6_D_HD_MAI_THR_PANICHIGH_MASK VC4_MASK(29, 23)
+# define VC6_D_HD_MAI_THR_PANICHIGH_SHIFT 23
+# define VC6_D_HD_MAI_THR_PANICLOW_MASK VC4_MASK(21, 15)
+# define VC6_D_HD_MAI_THR_PANICLOW_SHIFT 15
+# define VC6_D_HD_MAI_THR_DREQHIGH_MASK VC4_MASK(13, 7)
+# define VC6_D_HD_MAI_THR_DREQHIGH_SHIFT 7
+# define VC6_D_HD_MAI_THR_DREQLOW_MASK VC4_MASK(6, 0)
+# define VC6_D_HD_MAI_THR_DREQLOW_SHIFT 0
+
/* Divider from HDMI HSM clock to MAI serial clock. Sampling period
* converges to N / (M + 1) cycles.
*/
@@ -968,6 +1203,9 @@ enum hvs_pixel_format {
#define SCALER5_CTL2_ALPHA_MASK VC4_MASK(15, 4)
#define SCALER5_CTL2_ALPHA_SHIFT 4
+#define SCALER6D_CTL2_CSC_ENABLE BIT(19)
+#define SCALER6D_CTL2_BRCM_CFC_CONTROL_MASK VC4_MASK(22, 20)
+
#define SCALER_POS1_SCL_HEIGHT_MASK VC4_MASK(27, 16)
#define SCALER_POS1_SCL_HEIGHT_SHIFT 16
@@ -1109,4 +1347,63 @@ enum hvs_pixel_format {
#define SCALER_PITCH0_TILE_WIDTH_R_MASK VC4_MASK(6, 0)
#define SCALER_PITCH0_TILE_WIDTH_R_SHIFT 0
+#define SCALER6_CTL0_END BIT(31)
+#define SCALER6_CTL0_VALID BIT(30)
+#define SCALER6_CTL0_NEXT_MASK VC4_MASK(29, 24)
+#define SCALER6_CTL0_RGB_TRANS BIT(23)
+#define SCALER6_CTL0_ADDR_MODE_MASK VC4_MASK(22, 20)
+#define SCALER6_CTL0_ADDR_MODE_LINEAR 0
+#define SCALER6_CTL0_ADDR_MODE_128B 1
+#define SCALER6_CTL0_ADDR_MODE_256B 2
+#define SCALER6_CTL0_ADDR_MODE_MAP8 3
+#define SCALER6_CTL0_ADDR_MODE_UIF 4
+
+#define SCALER6_CTL0_ALPHA_MASK_MASK VC4_MASK(19, 18)
+#define SCALER6_CTL0_ALPHA_MASK_NONE 0
+#define SCALER6D_CTL0_ALPHA_MASK_FIXED 3
+#define SCALER6_CTL0_UNITY BIT(15)
+#define SCALER6_CTL0_ORDERRGBA_MASK VC4_MASK(14, 13)
+#define SCALER6_CTL0_SCL1_MODE_MASK VC4_MASK(10, 8)
+#define SCALER6_CTL0_SCL0_MODE_MASK VC4_MASK(7, 5)
+#define SCALER6_CTL0_PIXEL_FORMAT_MASK VC4_MASK(4, 0)
+
+#define SCALER6_POS0_START_Y_MASK VC4_MASK(28, 16)
+#define SCALER6_POS0_HFLIP BIT(15)
+#define SCALER6_POS0_START_X_MASK VC4_MASK(12, 0)
+
+#define SCALER6_CTL2_ALPHA_MODE_MASK VC4_MASK(31, 30)
+#define SCALER6_CTL2_ALPHA_PREMULT BIT(29)
+#define SCALER6_CTL2_ALPHA_MIX BIT(28)
+#define SCALER6_CTL2_BFG BIT(26)
+#define SCALER6C_CTL2_CSC_ENABLE BIT(25)
+#define SCALER6C_CTL2_BRCM_CFC_CONTROL_MASK VC4_MASK(18, 16)
+#define SCALER6_CTL2_ALPHA_MASK VC4_MASK(15, 4)
+
+#define SCALER6_POS1_SCL_LINES_MASK VC4_MASK(28, 16)
+#define SCALER6_POS1_SCL_WIDTH_MASK VC4_MASK(12, 0)
+
+#define SCALER6_POS2_SRC_LINES_MASK VC4_MASK(28, 16)
+#define SCALER6_POS2_SRC_WIDTH_MASK VC4_MASK(12, 0)
+
+#define SCALER6_PTR0_VFLIP BIT(31)
+#define SCALER6_PTR0_UPM_BASE_MASK VC4_MASK(28, 16)
+#define SCALER6_PTR0_UPM_HANDLE_MASK VC4_MASK(14, 10)
+#define SCALER6_PTR0_UPM_BUFF_SIZE_MASK VC4_MASK(9, 8)
+#define SCALER6_PTR0_UPM_BUFF_SIZE_16_LINES 3
+#define SCALER6_PTR0_UPM_BUFF_SIZE_8_LINES 2
+#define SCALER6_PTR0_UPM_BUFF_SIZE_4_LINES 1
+#define SCALER6_PTR0_UPM_BUFF_SIZE_2_LINES 0
+#define SCALER6_PTR0_UPPER_ADDR_MASK VC4_MASK(7, 0)
+
+#define SCALER6_PTR2_ALPHA_BPP_MASK VC4_MASK(31, 31)
+#define SCALER6_PTR2_ALPHA_BPP_1BPP 1
+#define SCALER6_PTR2_ALPHA_BPP_8BPP 0
+#define SCALER6_PTR2_ALPHA_ORDER_MASK VC4_MASK(30, 30)
+#define SCALER6_PTR2_ALPHA_ORDER_MSB_TO_LSB 1
+#define SCALER6_PTR2_ALPHA_ORDER_LSB_TO_MSB 0
+#define SCALER6_PTR2_ALPHA_OFFS_MASK VC4_MASK(29, 27)
+#define SCALER6_PTR2_LSKIP_MASK VC4_MASK(26, 24)
+#define SCALER6_PTR2_PITCH_MASK VC4_MASK(16, 0)
+#define SCALER6_PTR2_FETCH_COUNT_MASK VC4_MASK(26, 16)
+
#endif /* VC4_REGS_H */
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 3e38a1d2d55e..4eab069cda75 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -145,6 +145,9 @@
/* Number of lines received and committed to memory. */
#define TXP_PROGRESS 0x10
+#define TXP_DST_PTR_HIGH_MOPLET 0x1c
+#define TXP_DST_PTR_HIGH_MOP 0x24
+
#define TXP_READ(offset) \
({ \
kunit_fail_current_test("Accessing a register in a unit test!\n"); \
@@ -159,6 +162,7 @@
struct vc4_txp {
struct vc4_crtc base;
+ const struct vc4_txp_data *data;
struct platform_device *pdev;
@@ -286,9 +290,13 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
+ const struct vc4_txp_data *txp_data = txp->data;
struct drm_gem_dma_object *gem;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
+ unsigned int hdisplay;
+ unsigned int vdisplay;
+ dma_addr_t addr;
u32 ctrl;
int idx;
int i;
@@ -308,9 +316,11 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
return;
ctrl = TXP_GO | TXP_EI |
- VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
+ if (txp_data->has_byte_enable)
+ ctrl |= VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE);
+
if (fb->format->has_alpha)
ctrl |= TXP_ALPHA_ENABLE;
else
@@ -324,11 +334,25 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
return;
gem = drm_fb_dma_get_gem_obj(fb, 0);
- TXP_WRITE(TXP_DST_PTR, gem->dma_addr + fb->offsets[0]);
+ addr = gem->dma_addr + fb->offsets[0];
+
+ TXP_WRITE(TXP_DST_PTR, lower_32_bits(addr));
+
+ if (txp_data->supports_40bit_addresses)
+ TXP_WRITE(txp_data->high_addr_ptr_reg, upper_32_bits(addr) & 0xff);
+
TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
+
+ hdisplay = mode->hdisplay ?: 1;
+ vdisplay = mode->vdisplay ?: 1;
+ if (txp_data->size_minus_one) {
+ hdisplay -= 1;
+ vdisplay -= 1;
+ }
+
TXP_WRITE(TXP_DIM,
- VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
- VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT));
+ VC4_SET_FIELD(hdisplay, TXP_WIDTH) |
+ VC4_SET_FIELD(vdisplay, TXP_HEIGHT));
TXP_WRITE(TXP_DST_CTRL, ctrl);
@@ -362,6 +386,7 @@ static const struct drm_connector_funcs vc4_txp_connector_funcs = {
static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
int idx;
@@ -380,7 +405,8 @@ static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY);
}
- TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+ if (vc4->gen < VC4_GEN_6_C)
+ TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
drm_dev_exit(idx);
}
@@ -484,17 +510,49 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-const struct vc4_crtc_data vc4_txp_crtc_data = {
- .name = "txp",
- .debugfs_name = "txp_regs",
- .hvs_available_channels = BIT(2),
- .hvs_output = 2,
+static const struct vc4_txp_data bcm2712_mop_data = {
+ .base = {
+ .name = "mop",
+ .debugfs_name = "mop_regs",
+ .hvs_available_channels = BIT(2),
+ .hvs_output = 2,
+ },
+ .encoder_type = VC4_ENCODER_TYPE_TXP0,
+ .high_addr_ptr_reg = TXP_DST_PTR_HIGH_MOP,
+ .has_byte_enable = true,
+ .size_minus_one = true,
+ .supports_40bit_addresses = true,
+};
+
+static const struct vc4_txp_data bcm2712_moplet_data = {
+ .base = {
+ .name = "moplet",
+ .debugfs_name = "moplet_regs",
+ .hvs_available_channels = BIT(1),
+ .hvs_output = 4,
+ },
+ .encoder_type = VC4_ENCODER_TYPE_TXP1,
+ .high_addr_ptr_reg = TXP_DST_PTR_HIGH_MOPLET,
+ .size_minus_one = true,
+ .supports_40bit_addresses = true,
+};
+
+const struct vc4_txp_data bcm2835_txp_data = {
+ .base = {
+ .name = "txp",
+ .debugfs_name = "txp_regs",
+ .hvs_available_channels = BIT(2),
+ .hvs_output = 2,
+ },
+ .encoder_type = VC4_ENCODER_TYPE_TXP0,
+ .has_byte_enable = true,
};
static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
+ const struct vc4_txp_data *txp_data;
struct vc4_encoder *vc4_encoder;
struct drm_encoder *encoder;
struct vc4_crtc *vc4_crtc;
@@ -509,6 +567,11 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
if (!txp)
return -ENOMEM;
+ txp_data = of_device_get_match_data(dev);
+ if (!txp_data)
+ return -ENODEV;
+
+ txp->data = txp_data;
txp->pdev = pdev;
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
@@ -519,13 +582,13 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
vc4_crtc->regset.regs = txp_regs;
vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
- ret = vc4_crtc_init(drm, pdev, vc4_crtc, &vc4_txp_crtc_data,
+ ret = vc4_crtc_init(drm, pdev, vc4_crtc, &txp_data->base,
&vc4_txp_crtc_funcs, &vc4_txp_crtc_helper_funcs, true);
if (ret)
return ret;
vc4_encoder = &txp->encoder;
- txp->encoder.type = VC4_ENCODER_TYPE_TXP;
+ txp->encoder.type = txp_data->encoder_type;
encoder = &vc4_encoder->base;
encoder->possible_crtcs = drm_crtc_mask(&vc4_crtc->base);
@@ -579,7 +642,9 @@ static void vc4_txp_remove(struct platform_device *pdev)
}
static const struct of_device_id vc4_txp_dt_match[] = {
- { .compatible = "brcm,bcm2835-txp" },
+ { .compatible = "brcm,bcm2712-mop", .data = &bcm2712_mop_data },
+ { .compatible = "brcm,bcm2712-moplet", .data = &bcm2712_moplet_data },
+ { .compatible = "brcm,bcm2835-txp", .data = &bcm2835_txp_data },
{ /* sentinel */ },
};
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index c5e3e5457737..2752ab4f1c97 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -47,7 +47,6 @@
#define DRIVER_NAME "vgem"
#define DRIVER_DESC "Virtual GEM provider"
-#define DRIVER_DATE "20120112"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -121,7 +120,6 @@ static const struct drm_driver vgem_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ffca6e2e1c9a..6a67c6297d58 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -32,9 +32,9 @@
#include <linux/poll.h>
#include <linux/wait.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
@@ -184,7 +184,6 @@ static const struct drm_driver driver = {
.postclose = virtio_gpu_driver_postclose,
.dumb_create = virtio_gpu_mode_dumb_create,
- .dumb_map_offset = virtio_gpu_mode_dumb_mmap,
DRM_FBDEV_SHMEM_DRIVER_OPS,
@@ -202,7 +201,6 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 64c236169db8..f42ca9d8ed10 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -45,7 +45,6 @@
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
-#define DRIVER_DATE "0"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
@@ -89,9 +88,11 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
+ struct sg_table *sgt;
uint32_t hw_res_handle;
bool dumb;
bool created;
+ bool attached;
bool host3d_blob, guest_blob;
uint32_t blob_mem, blob_flags;
@@ -194,6 +195,13 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
+struct virtio_gpu_plane_state {
+ struct drm_plane_state base;
+ struct virtio_gpu_fence *fence;
+};
+#define to_virtio_gpu_plane_state(x) \
+ container_of(x, struct virtio_gpu_plane_state, base)
+
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -301,9 +309,6 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p);
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
struct virtio_gpu_object_array*
@@ -349,6 +354,10 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_mem_entry *ents,
unsigned int nents);
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence);
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output);
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
@@ -468,6 +477,10 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach);
/* virtgpu_debugfs.c */
void virtio_gpu_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 7db48d17ee3a..5aab588fc400 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -99,21 +99,6 @@ fail:
return ret;
}
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- struct drm_gem_object *gobj;
-
- BUG_ON(!offset_p);
- gobj = drm_gem_object_lookup(file_priv, handle);
- if (gobj == NULL)
- return -ENOENT;
- *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
- drm_gem_object_put(gobj);
- return 0;
-}
-
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file)
{
@@ -127,15 +112,17 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
/* the context might still be missing when the first ioctl is
* DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
*/
- virtio_gpu_create_context(obj->dev, file);
+ if (!vgdev->has_context_init)
+ virtio_gpu_create_context(obj->dev, file);
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
virtio_gpu_array_add_obj(objs, obj);
- virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- objs);
+ if (vfpriv->ctx_id)
+ virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
+
out_notify:
virtio_gpu_notify(vgdev);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index e4f76f315550..c33c057365f8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -80,9 +80,9 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
- return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
- virtio_gpu_map->handle,
- &virtio_gpu_map->offset);
+ return drm_gem_dumb_map_offset(file, vgdev->ddev,
+ virtio_gpu_map->handle,
+ &virtio_gpu_map->offset);
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index c7e74cf13022..5517cff8715c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -80,6 +80,9 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
drm_gem_free_mmap_offset(&vram->base.base.base);
drm_gem_object_release(&vram->base.base.base);
kfree(vram);
+ } else {
+ drm_gem_object_release(&bo->base.base);
+ kfree(bo);
}
}
@@ -97,6 +100,27 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
virtio_gpu_cleanup_object(bo);
}
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_fence *fence;
+
+ if (!bo->attached)
+ return 0;
+
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
+ if (!fence)
+ return -ENOMEM;
+
+ virtio_gpu_object_detach(vgdev, bo, fence);
+ virtio_gpu_notify(vgdev);
+
+ dma_fence_wait(&fence->f, false);
+ dma_fence_put(&fence->f);
+
+ return 0;
+}
+
static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a72a2dbda031..42aa554eca9f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,6 +26,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <linux/virtio_dma_buf.h>
#include "virtgpu_drv.h"
@@ -66,11 +68,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
}
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct virtio_gpu_plane_state *new;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+ return &new->base;
+}
+
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
@@ -138,11 +157,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- if (vgfb->fence) {
+ if (vgplane_st->fence) {
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
@@ -151,13 +172,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
+ width, height, objs,
+ vgplane_st->fence);
virtio_gpu_notify(vgdev);
-
- dma_fence_wait_timeout(&vgfb->fence->f, true,
+ dma_fence_wait_timeout(&vgplane_st->fence->f, true,
msecs_to_jiffies(50));
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
} else {
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
width, height, NULL, NULL);
@@ -241,45 +260,113 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
rect.y2 - rect.y1);
}
+static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane,
+ struct drm_plane_state *new_state,
+ struct drm_gem_object *obj)
+{
+ struct virtio_gpu_device *vgdev = plane->dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret) {
+ dma_resv_unlock(resv);
+ return ret;
+ }
+
+ if (!bo->sgt) {
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents,
+ bo, attach);
+ if (ret)
+ goto err;
+
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ }
+
+ dma_resv_unlock(resv);
+ return 0;
+
+err:
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+ return ret;
+}
+
static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
+ struct drm_gem_object *obj;
+ int ret;
if (!new_state->fb)
return 0;
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(new_state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
return 0;
- if (bo->dumb && (plane->state->fb != new_state->fb)) {
- vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ obj = new_state->fb->obj[0];
+ if (obj->import_attach) {
+ ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
+ if (ret)
+ return ret;
+ }
+
+ if (bo->dumb || obj->import_attach) {
+ vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
+ vgdev->fence_drv.context,
0);
- if (!vgfb->fence)
+ if (!vgplane_st->fence)
return -ENOMEM;
}
return 0;
}
+static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
+{
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+
+ dma_resv_lock(resv, NULL);
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+}
+
static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
+ struct drm_gem_object *obj;
if (!state->fb)
return;
- vgfb = to_virtio_gpu_framebuffer(state->fb);
- if (vgfb->fence) {
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ vgplane_st = to_virtio_gpu_plane_state(state);
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
}
+
+ obj = state->fb->obj[0];
+ if (obj->import_attach)
+ virtio_gpu_cleanup_imported_obj(obj);
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
@@ -291,6 +378,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
@@ -303,6 +391,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
@@ -322,11 +411,9 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, 0,
plane->state->crtc_w,
plane->state->crtc_h,
- 0, 0, objs, vgfb->fence);
+ 0, 0, objs, vgplane_st->fence);
virtio_gpu_notify(vgdev);
- dma_fence_wait(&vgfb->fence->f, true);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ dma_fence_wait(&vgplane_st->fence->f, true);
}
if (plane->state->fb != old_state->fb) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 44425f20d91a..b3664c12843d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -27,6 +27,8 @@
#include "virtgpu_drv.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
uuid_t *uuid)
{
@@ -142,10 +144,159 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
return buf;
}
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct scatterlist *sl;
+ struct sg_table *sgt;
+ long i, ret;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return ret < 0 ? ret : -ETIMEDOUT;
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ *ents = kvmalloc_array(sgt->nents,
+ sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+
+ *nents = sgt->nents;
+ for_each_sgtable_dma_sg(sgt, sl, i) {
+ (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
+ (*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
+ (*ents)[i].padding = 0;
+ }
+
+ bo->sgt = sgt;
+ return 0;
+}
+
+static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+
+ if (attach) {
+ dma_resv_lock(resv, NULL);
+
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ dma_resv_unlock(resv);
+
+ dma_buf_detach(attach->dmabuf, attach);
+ dma_buf_put(attach->dmabuf);
+ }
+
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
+}
+
+static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_object_params params = { 0 };
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ if (ret) {
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+ }
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret)
+ goto err_pin;
+
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
+ if (ret)
+ goto err_import;
+
+ params.blob = true;
+ params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+ params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ params.size = attach->dmabuf->size;
+
+ virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
+ ents, nents);
+ bo->guest_blob = true;
+ bo->attached = true;
+
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+
+ return 0;
+
+err_import:
+ dma_buf_unpin(attach);
+err_pin:
+ dma_resv_unlock(resv);
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+}
+
+static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
+ .free = virtgpu_dma_buf_free_obj,
+};
+
+static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (bo->created && kref_read(&obj->refcount)) {
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ bo->sgt = NULL;
+ }
+}
+
+static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
+ .move_notify = virtgpu_dma_buf_move_notify
+};
+
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct dma_buf_attachment *attach;
+ struct virtio_gpu_object *bo;
struct drm_gem_object *obj;
+ int ret;
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
obj = buf->priv;
@@ -159,7 +310,32 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, buf);
+ if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
+ return drm_gem_prime_import(dev, buf);
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &bo->base.base;
+ obj->funcs = &virtgpu_gem_dma_buf_funcs;
+ drm_gem_private_object_init(dev, obj, buf->size);
+
+ attach = dma_buf_dynamic_attach(buf, dev->dev,
+ &virtgpu_dma_buf_attach_ops, obj);
+ if (IS_ERR(attach)) {
+ kfree(bo);
+ return ERR_CAST(attach);
+ }
+
+ obj->import_attach = attach;
+ get_dma_buf(buf);
+
+ ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return obj;
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 0d3d0d09f39b..ad91624df42d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -645,6 +645,23 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
+static void
+virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_fence *fence)
+{
+ struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+}
+
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -1103,8 +1120,26 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_mem_entry *ents,
unsigned int nents)
{
+ if (obj->attached)
+ return;
+
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
+
+ obj->attached = true;
+}
+
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence)
+{
+ if (!obj->attached)
+ return;
+
+ virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle,
+ fence);
+
+ obj->attached = false;
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 3f0977d746be..b20ac1705726 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -24,64 +24,33 @@ static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
/**
* pre_mul_alpha_blend - alpha blending equation
- * @frame_info: Source framebuffer's metadata
* @stage_buffer: The line with the pixels from src_plane
* @output_buffer: A line buffer that receives all the blends output
+ * @x_start: The start offset
+ * @pixel_count: The number of pixels to blend
*
- * Using the information from the `frame_info`, this blends only the
- * necessary pixels from the `stage_buffer` to the `output_buffer`
- * using premultiplied blend formula.
+ * The pixels [@x_start;@x_start+@pixel_count) in stage_buffer are blended at
+ * [@x_start;@x_start+@pixel_count) in output_buffer.
*
* The current DRM assumption is that pixel color values have been already
* pre-multiplied with the alpha channel values. See more
* drm_plane_create_blend_mode_property(). Also, this formula assumes a
* completely opaque background.
*/
-static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
- struct line_buffer *stage_buffer,
- struct line_buffer *output_buffer)
+static void pre_mul_alpha_blend(const struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer, int x_start, int pixel_count)
{
- int x_dst = frame_info->dst.x1;
- struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
- struct pixel_argb_u16 *in = stage_buffer->pixels;
- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
- stage_buffer->n_pixels);
-
- for (int x = 0; x < x_limit; x++) {
- out[x].a = (u16)0xffff;
- out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
- out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
- out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
+ struct pixel_argb_u16 *out = &output_buffer->pixels[x_start];
+ const struct pixel_argb_u16 *in = &stage_buffer->pixels[x_start];
+
+ for (int i = 0; i < pixel_count; i++) {
+ out[i].a = (u16)0xffff;
+ out[i].r = pre_mul_blend_channel(in[i].r, out[i].r, in[i].a);
+ out[i].g = pre_mul_blend_channel(in[i].g, out[i].g, in[i].a);
+ out[i].b = pre_mul_blend_channel(in[i].b, out[i].b, in[i].a);
}
}
-static int get_y_pos(struct vkms_frame_info *frame_info, int y)
-{
- if (frame_info->rotation & DRM_MODE_REFLECT_Y)
- return drm_rect_height(&frame_info->rotated) - y - 1;
-
- switch (frame_info->rotation & DRM_MODE_ROTATE_MASK) {
- case DRM_MODE_ROTATE_90:
- return frame_info->rotated.x2 - y - 1;
- case DRM_MODE_ROTATE_270:
- return y + frame_info->rotated.x1;
- default:
- return y;
- }
-}
-
-static bool check_limit(struct vkms_frame_info *frame_info, int pos)
-{
- if (drm_rotation_90_or_270(frame_info->rotation)) {
- if (pos >= 0 && pos < drm_rect_width(&frame_info->rotated))
- return true;
- } else {
- if (pos >= frame_info->rotated.y1 && pos < frame_info->rotated.y2)
- return true;
- }
-
- return false;
-}
static void fill_background(const struct pixel_argb_u16 *background_color,
struct line_buffer *output_buffer)
@@ -96,7 +65,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
s64 a_fp = drm_int2fixp(a);
s64 b_fp = drm_int2fixp(b);
- s64 delta = drm_fixp_mul(b_fp - a_fp, t);
+ s64 delta = drm_fixp_mul(b_fp - a_fp, t);
return drm_fixp2int(a_fp + delta);
}
@@ -164,6 +133,226 @@ static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buff
}
/**
+ * direction_for_rotation() - Get the correct reading direction for a given rotation
+ *
+ * @rotation: Rotation to analyze. It correspond the field @frame_info.rotation.
+ *
+ * This function will use the @rotation setting of a source plane to compute the reading
+ * direction in this plane which correspond to a "left to right writing" in the CRTC.
+ * For example, if the buffer is reflected on X axis, the pixel must be read from right to left
+ * to be written from left to right on the CRTC.
+ */
+static enum pixel_read_direction direction_for_rotation(unsigned int rotation)
+{
+ struct drm_rect tmp_a, tmp_b;
+ int x, y;
+
+ /*
+ * Points A and B are depicted as zero-size rectangles on the CRTC.
+ * The CRTC writing direction is from A to B. The plane reading direction
+ * is discovered by inverse-transforming A and B.
+ * The reading direction is computed by rotating the vector AB (top-left to top-right) in a
+ * 1x1 square.
+ */
+
+ tmp_a = DRM_RECT_INIT(0, 0, 0, 0);
+ tmp_b = DRM_RECT_INIT(1, 0, 0, 0);
+ drm_rect_rotate_inv(&tmp_a, 1, 1, rotation);
+ drm_rect_rotate_inv(&tmp_b, 1, 1, rotation);
+
+ x = tmp_b.x1 - tmp_a.x1;
+ y = tmp_b.y1 - tmp_a.y1;
+
+ if (x == 1 && y == 0)
+ return READ_LEFT_TO_RIGHT;
+ else if (x == -1 && y == 0)
+ return READ_RIGHT_TO_LEFT;
+ else if (y == 1 && x == 0)
+ return READ_TOP_TO_BOTTOM;
+ else if (y == -1 && x == 0)
+ return READ_BOTTOM_TO_TOP;
+
+ WARN_ONCE(true, "The inverse of the rotation gives an incorrect direction.");
+ return READ_LEFT_TO_RIGHT;
+}
+
+/**
+ * clamp_line_coordinates() - Compute and clamp the coordinate to read and write during the blend
+ * process.
+ *
+ * @direction: direction of the reading
+ * @current_plane: current plane blended
+ * @src_line: source line of the reading. Only the top-left coordinate is used. This rectangle
+ * must be rotated and have a shape of 1*pixel_count if @direction is vertical and a shape of
+ * pixel_count*1 if @direction is horizontal.
+ * @src_x_start: x start coordinate for the line reading
+ * @src_y_start: y start coordinate for the line reading
+ * @dst_x_start: x coordinate to blend the read line
+ * @pixel_count: number of pixels to blend
+ *
+ * This function is mainly a safety net to avoid reading outside the source buffer. As the
+ * userspace should never ask to read outside the source plane, all the cases covered here should
+ * be dead code.
+ */
+static void clamp_line_coordinates(enum pixel_read_direction direction,
+ const struct vkms_plane_state *current_plane,
+ const struct drm_rect *src_line, int *src_x_start,
+ int *src_y_start, int *dst_x_start, int *pixel_count)
+{
+ /* By default the start points are correct */
+ *src_x_start = src_line->x1;
+ *src_y_start = src_line->y1;
+ *dst_x_start = current_plane->frame_info->dst.x1;
+
+ /* Get the correct number of pixel to blend, it depends of the direction */
+ switch (direction) {
+ case READ_LEFT_TO_RIGHT:
+ case READ_RIGHT_TO_LEFT:
+ *pixel_count = drm_rect_width(src_line);
+ break;
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ *pixel_count = drm_rect_height(src_line);
+ break;
+ }
+
+ /*
+ * Clamp the coordinates to avoid reading outside the buffer
+ *
+ * This is mainly a security check to avoid reading outside the buffer, the userspace
+ * should never request to read outside the source buffer.
+ */
+ switch (direction) {
+ case READ_LEFT_TO_RIGHT:
+ case READ_RIGHT_TO_LEFT:
+ if (*src_x_start < 0) {
+ *pixel_count += *src_x_start;
+ *dst_x_start -= *src_x_start;
+ *src_x_start = 0;
+ }
+ if (*src_x_start + *pixel_count > current_plane->frame_info->fb->width)
+ *pixel_count = max(0, (int)current_plane->frame_info->fb->width -
+ *src_x_start);
+ break;
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ if (*src_y_start < 0) {
+ *pixel_count += *src_y_start;
+ *dst_x_start -= *src_y_start;
+ *src_y_start = 0;
+ }
+ if (*src_y_start + *pixel_count > current_plane->frame_info->fb->height)
+ *pixel_count = max(0, (int)current_plane->frame_info->fb->height -
+ *src_y_start);
+ break;
+ }
+}
+
+/**
+ * blend_line() - Blend a line from a plane to the output buffer
+ *
+ * @current_plane: current plane to work on
+ * @y: line to write in the output buffer
+ * @crtc_x_limit: width of the output buffer
+ * @stage_buffer: temporary buffer to convert the pixel line from the source buffer
+ * @output_buffer: buffer to blend the read line into.
+ */
+static void blend_line(struct vkms_plane_state *current_plane, int y,
+ int crtc_x_limit, struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer)
+{
+ int src_x_start, src_y_start, dst_x_start, pixel_count;
+ struct drm_rect dst_line, tmp_src, src_line;
+
+ /* Avoid rendering useless lines */
+ if (y < current_plane->frame_info->dst.y1 ||
+ y >= current_plane->frame_info->dst.y2)
+ return;
+
+ /*
+ * dst_line is the line to copy. The initial coordinates are inside the
+ * destination framebuffer, and then drm_rect_* helpers are used to
+ * compute the correct position into the source framebuffer.
+ */
+ dst_line = DRM_RECT_INIT(current_plane->frame_info->dst.x1, y,
+ drm_rect_width(&current_plane->frame_info->dst),
+ 1);
+
+ drm_rect_fp_to_int(&tmp_src, &current_plane->frame_info->src);
+
+ /*
+ * [1]: Clamping src_line to the crtc_x_limit to avoid writing outside of
+ * the destination buffer
+ */
+ dst_line.x1 = max_t(int, dst_line.x1, 0);
+ dst_line.x2 = min_t(int, dst_line.x2, crtc_x_limit);
+ /* The destination is completely outside of the crtc. */
+ if (dst_line.x2 <= dst_line.x1)
+ return;
+
+ src_line = dst_line;
+
+ /*
+ * Transform the coordinate x/y from the crtc to coordinates into
+ * coordinates for the src buffer.
+ *
+ * - Cancel the offset of the dst buffer.
+ * - Invert the rotation. This assumes that
+ * dst = drm_rect_rotate(src, rotation) (dst and src have the
+ * same size, but can be rotated).
+ * - Apply the offset of the source rectangle to the coordinate.
+ */
+ drm_rect_translate(&src_line, -current_plane->frame_info->dst.x1,
+ -current_plane->frame_info->dst.y1);
+ drm_rect_rotate_inv(&src_line, drm_rect_width(&tmp_src),
+ drm_rect_height(&tmp_src),
+ current_plane->frame_info->rotation);
+ drm_rect_translate(&src_line, tmp_src.x1, tmp_src.y1);
+
+ /* Get the correct reading direction in the source buffer. */
+
+ enum pixel_read_direction direction =
+ direction_for_rotation(current_plane->frame_info->rotation);
+
+ /* [2]: Compute and clamp the number of pixel to read */
+ clamp_line_coordinates(direction, current_plane, &src_line, &src_x_start, &src_y_start,
+ &dst_x_start, &pixel_count);
+
+ if (pixel_count <= 0) {
+ /* Nothing to read, so avoid multiple function calls */
+ return;
+ }
+
+ /*
+ * Modify the starting point to take in account the rotation
+ *
+ * src_line is the top-left corner, so when reading READ_RIGHT_TO_LEFT or
+ * READ_BOTTOM_TO_TOP, it must be changed to the top-right/bottom-left
+ * corner.
+ */
+ if (direction == READ_RIGHT_TO_LEFT) {
+ // src_x_start is now the right point
+ src_x_start += pixel_count - 1;
+ } else if (direction == READ_BOTTOM_TO_TOP) {
+ // src_y_start is now the bottom point
+ src_y_start += pixel_count - 1;
+ }
+
+ /*
+ * Perform the conversion and the blending
+ *
+ * Here we know that the read line (x_start, y_start, pixel_count) is
+ * inside the source buffer [2] and we don't write outside the stage
+ * buffer [1].
+ */
+ current_plane->pixel_read_line(current_plane, src_x_start, src_y_start, direction,
+ pixel_count, &stage_buffer->pixels[dst_x_start]);
+
+ pre_mul_alpha_blend(stage_buffer, output_buffer,
+ dst_x_start, pixel_count);
+}
+
+/**
* blend - blend the pixels from all planes and compute crc
* @wb: The writeback frame buffer metadata
* @crtc_state: The crtc state
@@ -183,32 +372,25 @@ static void blend(struct vkms_writeback_job *wb,
{
struct vkms_plane_state **plane = crtc_state->active_planes;
u32 n_active_planes = crtc_state->num_active_planes;
- int y_pos;
const struct pixel_argb_u16 background_color = { .a = 0xffff };
- size_t crtc_y_limit = crtc_state->base.mode.vdisplay;
+ int crtc_y_limit = crtc_state->base.mode.vdisplay;
+ int crtc_x_limit = crtc_state->base.mode.hdisplay;
/*
* The planes are composed line-by-line to avoid heavy memory usage. It is a necessary
* complexity to avoid poor blending performance.
*
- * The function vkms_compose_row() is used to read a line, pixel-by-pixel, into the staging
- * buffer.
+ * The function pixel_read_line callback is used to read a line, using an efficient
+ * algorithm for a specific format, into the staging buffer.
*/
- for (size_t y = 0; y < crtc_y_limit; y++) {
+ for (int y = 0; y < crtc_y_limit; y++) {
fill_background(&background_color, output_buffer);
/* The active planes are composed associatively in z-order. */
for (size_t i = 0; i < n_active_planes; i++) {
- y_pos = get_y_pos(plane[i]->frame_info, y);
-
- if (!check_limit(plane[i]->frame_info, y_pos))
- continue;
-
- vkms_compose_row(stage_buffer, plane[i], y_pos);
- pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
- output_buffer);
+ blend_line(plane[i], y, crtc_x_limit, stage_buffer, output_buffer);
}
apply_lut(crtc_state, output_buffer);
@@ -216,7 +398,7 @@ static void blend(struct vkms_writeback_job *wb,
*crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
if (wb)
- vkms_writeback_row(wb, output_buffer, y_pos);
+ vkms_writeback_row(wb, output_buffer, y);
}
}
@@ -227,7 +409,7 @@ static int check_format_funcs(struct vkms_crtc_state *crtc_state,
u32 n_active_planes = crtc_state->num_active_planes;
for (size_t i = 0; i < n_active_planes; i++)
- if (!planes[i]->pixel_read)
+ if (!planes[i]->pixel_read_line)
return -1;
if (active_wb && !active_wb->pixel_write)
@@ -309,8 +491,8 @@ free_stage_buffer:
void vkms_composer_worker(struct work_struct *work)
{
struct vkms_crtc_state *crtc_state = container_of(work,
- struct vkms_crtc_state,
- composer_work);
+ struct vkms_crtc_state,
+ composer_work);
struct drm_crtc *crtc = crtc_state->base.crtc;
struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
@@ -335,7 +517,7 @@ void vkms_composer_worker(struct work_struct *work)
crtc_state->gamma_lut.base = (struct drm_color_lut *)crtc->state->gamma_lut->data;
crtc_state->gamma_lut.lut_length =
crtc->state->gamma_lut->length / sizeof(struct drm_color_lut);
- max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
+ max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
crtc_state->gamma_lut.channel_value2index_ratio = drm_fixp_div(max_lut_index_fp,
u16_max_fp);
@@ -374,7 +556,7 @@ void vkms_composer_worker(struct work_struct *work)
drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
}
-static const char * const pipe_crc_sources[] = {"auto"};
+static const char *const pipe_crc_sources[] = { "auto" };
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count)
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index bbf080d32d2c..28a57ae109fc 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -186,8 +186,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
- plane);
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
WARN_ON(!plane_state);
if (!plane_state->visible)
@@ -203,8 +202,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
i = 0;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
- plane);
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
if (!plane_state->visible)
continue;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 2d1e95cb66e5..e0409aba9349 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -13,10 +13,10 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
@@ -34,7 +34,6 @@
#define DRIVER_NAME "vkms"
#define DRIVER_DESC "Virtual Kernel Mode Setting"
-#define DRIVER_DATE "20180514"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -82,8 +81,7 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_wait_for_flip_done(dev, old_state);
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- struct vkms_crtc_state *vkms_state =
- to_vkms_crtc_state(old_crtc_state);
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(old_crtc_state);
flush_work(&vkms_state->composer_work);
}
@@ -117,7 +115,6 @@ static const struct drm_driver vkms_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -174,7 +171,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.preferred_depth = 0;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
- return vkms_output_init(vkmsdev, 0);
+ return vkms_output_init(vkmsdev);
}
static int vkms_create(struct vkms_config *config)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 672fe191e239..00541eff3d1b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -39,12 +39,8 @@
struct vkms_frame_info {
struct drm_framebuffer *fb;
struct drm_rect src, dst;
- struct drm_rect rotated;
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
unsigned int rotation;
- unsigned int offset;
- unsigned int pitch;
- unsigned int cpp;
};
struct pixel_argb_u16 {
@@ -56,23 +52,65 @@ struct line_buffer {
struct pixel_argb_u16 *pixels;
};
+/**
+ * typedef pixel_write_t - These functions are used to read a pixel from a
+ * &struct pixel_argb_u16, convert it in a specific format and write it in the @out_pixel
+ * buffer.
+ *
+ * @out_pixel: destination address to write the pixel
+ * @in_pixel: pixel to write
+ */
+typedef void (*pixel_write_t)(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel);
+
struct vkms_writeback_job {
struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct vkms_frame_info wb_frame_info;
- void (*pixel_write)(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel);
+ pixel_write_t pixel_write;
};
/**
+ * enum pixel_read_direction - Enum used internally by VKMS to represent a reading direction in a
+ * plane.
+ */
+enum pixel_read_direction {
+ READ_BOTTOM_TO_TOP,
+ READ_TOP_TO_BOTTOM,
+ READ_RIGHT_TO_LEFT,
+ READ_LEFT_TO_RIGHT
+};
+
+struct vkms_plane_state;
+
+/**
+ * typedef pixel_read_line_t - These functions are used to read a pixel line in the source frame,
+ * convert it to `struct pixel_argb_u16` and write it to @out_pixel.
+ *
+ * @plane: plane used as source for the pixel value
+ * @x_start: X (width) coordinate of the first pixel to copy. The caller must ensure that x_start
+ * is non-negative and smaller than @plane->frame_info->fb->width.
+ * @y_start: Y (height) coordinate of the first pixel to copy. The caller must ensure that y_start
+ * is non-negative and smaller than @plane->frame_info->fb->height.
+ * @direction: direction to use for the copy, starting at @x_start/@y_start
+ * @count: number of pixels to copy
+ * @out_pixel: pointer where to write the pixel values. They will be written from @out_pixel[0]
+ * (included) to @out_pixel[@count] (excluded). The caller must ensure that out_pixel have a
+ * length of at least @count.
+ */
+typedef void (*pixel_read_line_t)(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[]);
+
+/**
* struct vkms_plane_state - Driver specific plane state
* @base: base plane state
* @frame_info: data required for composing computation
- * @pixel_read: function to read a pixel in this plane. The creator of a struct vkms_plane_state
- * must ensure that this pointer is valid
+ * @pixel_read_line: function to read a pixel line in this plane. The creator of a
+ * struct vkms_plane_state must ensure that this pointer is valid
*/
struct vkms_plane_state {
struct drm_shadow_plane_state base;
struct vkms_frame_info *frame_info;
- void (*pixel_read)(u8 *src_buffer, struct pixel_argb_u16 *out_pixel);
+ pixel_read_line_t pixel_read_line;
};
struct vkms_plane {
@@ -212,21 +250,17 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
* vkms_output_init() - Initialize all sub-components needed for a VKMS device.
*
* @vkmsdev: VKMS device to initialize
- * @index: CRTC which can be attached to the planes. The caller must ensure that
- * @index is positive and less or equals to 31.
*/
-int vkms_output_init(struct vkms_device *vkmsdev, int index);
+int vkms_output_init(struct vkms_device *vkmsdev);
/**
* vkms_plane_init() - Initialize a plane
*
* @vkmsdev: VKMS device containing the plane
* @type: type of plane to initialize
- * @index: CRTC which can be attached to the plane. The caller must ensure that
- * @index is positive and less or equals to 31.
*/
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type, int index);
+ enum drm_plane_type type);
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
@@ -238,7 +272,6 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
/* Composer Support */
void vkms_composer_worker(struct work_struct *work);
void vkms_set_composer(struct vkms_output *out, bool enabled);
-void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y);
void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y);
/* Writeback */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index e8a5cc235ebb..39b1d7c97d45 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -10,21 +10,46 @@
#include "vkms_formats.h"
/**
- * pixel_offset() - Get the offset of the pixel at coordinates x/y in the first plane
+ * packed_pixels_offset() - Get the offset of the block containing the pixel at coordinates x/y
*
* @frame_info: Buffer metadata
* @x: The x coordinate of the wanted pixel in the buffer
* @y: The y coordinate of the wanted pixel in the buffer
+ * @plane_index: The index of the plane to use
+ * @offset: The returned offset inside the buffer of the block
+ * @rem_x: The returned X coordinate of the requested pixel in the block
+ * @rem_y: The returned Y coordinate of the requested pixel in the block
*
- * The caller must ensure that the framebuffer associated with this request uses a pixel format
- * where block_h == block_w == 1.
- * If this requirement is not fulfilled, the resulting offset can point to an other pixel or
- * outside of the buffer.
+ * As some pixel formats store multiple pixels in a block (DRM_FORMAT_R* for example), some
+ * pixels are not individually addressable. This function return 3 values: the offset of the
+ * whole block, and the coordinate of the requested pixel inside this block.
+ * For example, if the format is DRM_FORMAT_R1 and the requested coordinate is 13,5, the offset
+ * will point to the byte 5*pitches + 13/8 (second byte of the 5th line), and the rem_x/rem_y
+ * coordinates will be (13 % 8, 5 % 1) = (5, 0)
+ *
+ * With this function, the caller just have to extract the correct pixel from the block.
*/
-static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y)
+static void packed_pixels_offset(const struct vkms_frame_info *frame_info, int x, int y,
+ int plane_index, int *offset, int *rem_x, int *rem_y)
{
- return frame_info->offset + (y * frame_info->pitch)
- + (x * frame_info->cpp);
+ struct drm_framebuffer *fb = frame_info->fb;
+ const struct drm_format_info *format = frame_info->fb->format;
+ /* Directly using x and y to multiply pitches and format->ccp is not sufficient because
+ * in some formats a block can represent multiple pixels.
+ *
+ * Dividing x and y by the block size allows to extract the correct offset of the block
+ * containing the pixel.
+ */
+
+ int block_x = x / drm_format_info_block_width(format, plane_index);
+ int block_y = y / drm_format_info_block_height(format, plane_index);
+ int block_pitch = fb->pitches[plane_index] * drm_format_info_block_height(format,
+ plane_index);
+ *rem_x = x % drm_format_info_block_width(format, plane_index);
+ *rem_y = y % drm_format_info_block_height(format, plane_index);
+ *offset = fb->offsets[plane_index] +
+ block_y * block_pitch +
+ block_x * format->char_per_block[plane_index];
}
/**
@@ -34,145 +59,266 @@ static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int
* @frame_info: Buffer metadata
* @x: The x (width) coordinate inside the plane
* @y: The y (height) coordinate inside the plane
+ * @plane_index: The index of the plane
+ * @addr: The returned pointer
+ * @rem_x: The returned X coordinate of the requested pixel in the block
+ * @rem_y: The returned Y coordinate of the requested pixel in the block
*
- * Takes the information stored in the frame_info, a pair of coordinates, and
- * returns the address of the first color channel.
- * This function assumes the channels are packed together, i.e. a color channel
- * comes immediately after another in the memory. And therefore, this function
- * doesn't work for YUV with chroma subsampling (e.g. YUV420 and NV21).
+ * Takes the information stored in the frame_info, a pair of coordinates, and returns the address
+ * of the block containing this pixel and the pixel position inside this block.
*
- * The caller must ensure that the framebuffer associated with this request uses a pixel format
- * where block_h == block_w == 1, otherwise the returned pointer can be outside the buffer.
+ * See @packed_pixels_offset for details about rem_x/rem_y behavior.
*/
-static void *packed_pixels_addr(const struct vkms_frame_info *frame_info,
- int x, int y)
+static void packed_pixels_addr(const struct vkms_frame_info *frame_info,
+ int x, int y, int plane_index, u8 **addr, int *rem_x,
+ int *rem_y)
{
- size_t offset = pixel_offset(frame_info, x, y);
+ int offset;
- return (u8 *)frame_info->map[0].vaddr + offset;
+ packed_pixels_offset(frame_info, x, y, plane_index, &offset, rem_x, rem_y);
+ *addr = (u8 *)frame_info->map[0].vaddr + offset;
}
-static void *get_packed_src_addr(const struct vkms_frame_info *frame_info, int y)
+/**
+ * get_block_step_bytes() - Common helper to compute the correct step value between each pixel block
+ * to read in a certain direction.
+ *
+ * @fb: Framebuffer to iter on
+ * @direction: Direction of the reading
+ * @plane_index: Plane to get the step from
+ *
+ * As the returned count is the number of bytes between two consecutive blocks in a direction,
+ * the caller may have to read multiple pixels before using the next one (for example, to read from
+ * left to right in a DRM_FORMAT_R1 plane, each block contains 8 pixels, so the step must be used
+ * only every 8 pixels).
+ */
+static int get_block_step_bytes(struct drm_framebuffer *fb, enum pixel_read_direction direction,
+ int plane_index)
{
- int x_src = frame_info->src.x1 >> 16;
- int y_src = y - frame_info->rotated.y1 + (frame_info->src.y1 >> 16);
+ switch (direction) {
+ case READ_LEFT_TO_RIGHT:
+ return fb->format->char_per_block[plane_index];
+ case READ_RIGHT_TO_LEFT:
+ return -fb->format->char_per_block[plane_index];
+ case READ_TOP_TO_BOTTOM:
+ return (int)fb->pitches[plane_index] * drm_format_info_block_width(fb->format,
+ plane_index);
+ case READ_BOTTOM_TO_TOP:
+ return -(int)fb->pitches[plane_index] * drm_format_info_block_width(fb->format,
+ plane_index);
+ }
- return packed_pixels_addr(frame_info, x_src, y_src);
+ return 0;
}
-static int get_x_position(const struct vkms_frame_info *frame_info, int limit, int x)
+/**
+ * packed_pixels_addr_1x1() - Get the pointer to the block containing the pixel at the given
+ * coordinates
+ *
+ * @frame_info: Buffer metadata
+ * @x: The x (width) coordinate inside the plane
+ * @y: The y (height) coordinate inside the plane
+ * @plane_index: The index of the plane
+ * @addr: The returned pointer
+ *
+ * This function can only be used with format where block_h == block_w == 1.
+ */
+static void packed_pixels_addr_1x1(const struct vkms_frame_info *frame_info,
+ int x, int y, int plane_index, u8 **addr)
{
- if (frame_info->rotation & (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_270))
- return limit - x - 1;
- return x;
+ int offset, rem_x, rem_y;
+
+ WARN_ONCE(drm_format_info_block_width(frame_info->fb->format,
+ plane_index) != 1,
+ "%s() only support formats with block_w == 1", __func__);
+ WARN_ONCE(drm_format_info_block_height(frame_info->fb->format,
+ plane_index) != 1,
+ "%s() only support formats with block_h == 1", __func__);
+
+ packed_pixels_offset(frame_info, x, y, plane_index, &offset, &rem_x,
+ &rem_y);
+ *addr = (u8 *)frame_info->map[0].vaddr + offset;
}
/*
- * The following functions take pixel data from the buffer and convert them to the format
- * ARGB16161616 in @out_pixel.
+ * The following functions take pixel data (a, r, g, b, pixel, ...) and convert them to
+ * &struct pixel_argb_u16
*
- * They are used in the vkms_compose_row() function to handle multiple formats.
+ * They are used in the `read_line`s functions to avoid duplicate work for some pixel formats.
*/
-static void ARGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_u8888(u8 a, u8 r, u8 g, u8 b)
{
+ struct pixel_argb_u16 out_pixel;
/*
* The 257 is the "conversion ratio". This number is obtained by the
* (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
* the best color value in a pixel format with more possibilities.
* A similar idea applies to others RGB color conversions.
*/
- out_pixel->a = (u16)src_pixels[3] * 257;
- out_pixel->r = (u16)src_pixels[2] * 257;
- out_pixel->g = (u16)src_pixels[1] * 257;
- out_pixel->b = (u16)src_pixels[0] * 257;
-}
+ out_pixel.a = (u16)a * 257;
+ out_pixel.r = (u16)r * 257;
+ out_pixel.g = (u16)g * 257;
+ out_pixel.b = (u16)b * 257;
-static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
-{
- out_pixel->a = (u16)0xffff;
- out_pixel->r = (u16)src_pixels[2] * 257;
- out_pixel->g = (u16)src_pixels[1] * 257;
- out_pixel->b = (u16)src_pixels[0] * 257;
+ return out_pixel;
}
-static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_u16161616(u16 a, u16 r, u16 g, u16 b)
{
- __le16 *pixels = (__force __le16 *)src_pixels;
+ struct pixel_argb_u16 out_pixel;
- out_pixel->a = le16_to_cpu(pixels[3]);
- out_pixel->r = le16_to_cpu(pixels[2]);
- out_pixel->g = le16_to_cpu(pixels[1]);
- out_pixel->b = le16_to_cpu(pixels[0]);
+ out_pixel.a = a;
+ out_pixel.r = r;
+ out_pixel.g = g;
+ out_pixel.b = b;
+
+ return out_pixel;
}
-static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_le16161616(__le16 a, __le16 r, __le16 g, __le16 b)
{
- __le16 *pixels = (__force __le16 *)src_pixels;
-
- out_pixel->a = (u16)0xffff;
- out_pixel->r = le16_to_cpu(pixels[2]);
- out_pixel->g = le16_to_cpu(pixels[1]);
- out_pixel->b = le16_to_cpu(pixels[0]);
+ return argb_u16_from_u16161616(le16_to_cpu(a), le16_to_cpu(r), le16_to_cpu(g),
+ le16_to_cpu(b));
}
-static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
{
- __le16 *pixels = (__force __le16 *)src_pixels;
+ struct pixel_argb_u16 out_pixel;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
- u16 rgb_565 = le16_to_cpu(*pixels);
+ u16 rgb_565 = le16_to_cpu(*pixel);
s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
- out_pixel->a = (u16)0xffff;
- out_pixel->r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
- out_pixel->g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
- out_pixel->b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
+ out_pixel.a = (u16)0xffff;
+ out_pixel.r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
+ out_pixel.g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
+ out_pixel.b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
+
+ return out_pixel;
}
-/**
- * vkms_compose_row - compose a single row of a plane
- * @stage_buffer: output line with the composed pixels
- * @plane: state of the plane that is being composed
- * @y: y coordinate of the row
+/*
+ * The following functions are read_line function for each pixel format supported by VKMS.
+ *
+ * They read a line starting at the point @x_start,@y_start following the @direction. The result
+ * is stored in @out_pixel and in the format ARGB16161616.
+ *
+ * These functions are very repetitive, but the innermost pixel loops must be kept inside these
+ * functions for performance reasons. Some benchmarking was done in [1] where having the innermost
+ * loop factored out of these functions showed a slowdown by a factor of three.
*
- * This function composes a single row of a plane. It gets the source pixels
- * through the y coordinate (see get_packed_src_addr()) and goes linearly
- * through the source pixel, reading the pixels and converting it to
- * ARGB16161616 (see the pixel_read() callback). For rotate-90 and rotate-270,
- * the source pixels are not traversed linearly. The source pixels are queried
- * on each iteration in order to traverse the pixels vertically.
+ * [1]: https://lore.kernel.org/dri-devel/d258c8dc-78e9-4509-9037-a98f7f33b3a3@riseup.net/
*/
-void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y)
+
+static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
+ enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
{
- struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
- struct vkms_frame_info *frame_info = plane->frame_info;
- u8 *src_pixels = get_packed_src_addr(frame_info, y);
- int limit = min_t(size_t, drm_rect_width(&frame_info->dst), stage_buffer->n_pixels);
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
- for (size_t x = 0; x < limit; x++, src_pixels += frame_info->cpp) {
- int x_pos = get_x_position(frame_info, limit, x);
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
- if (drm_rotation_90_or_270(frame_info->rotation))
- src_pixels = get_packed_src_addr(frame_info, x + frame_info->rotated.y1)
- + frame_info->cpp * y;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
- plane->pixel_read(src_pixels, &out_pixels[x_pos]);
+ while (out_pixel < end) {
+ u8 *px = (u8 *)src_pixels;
+ *out_pixel = argb_u16_from_u8888(px[3], px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
+ enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ u8 *px = (u8 *)src_pixels;
+ *out_pixel = argb_u16_from_u8888(255, px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ u16 *px = (u16 *)src_pixels;
+ *out_pixel = argb_u16_from_u16161616(px[3], px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void XRGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ __le16 *px = (__le16 *)src_pixels;
+ *out_pixel = argb_u16_from_le16161616(cpu_to_le16(0xFFFF), px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ __le16 *px = (__le16 *)src_pixels;
+
+ *out_pixel = argb_u16_from_RGB565(px);
+ out_pixel += 1;
+ src_pixels += step;
}
}
/*
* The following functions take one &struct pixel_argb_u16 and convert it to a specific format.
- * The result is stored in @dst_pixels.
+ * The result is stored in @out_pixel.
*
* They are used in vkms_writeback_row() to convert and store a pixel from the src_buffer to
* the writeback buffer.
*/
-static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_ARGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
/*
* This sequence below is important because the format's byte order is
@@ -184,43 +330,43 @@ static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel
* | Addr + 2 | = Red channel
* | Addr + 3 | = Alpha channel
*/
- dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
- dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
- dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
- dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+ out_pixel[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
+ out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
-static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_XRGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- dst_pixels[3] = 0xff;
- dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
- dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
- dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+ out_pixel[3] = 0xff;
+ out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
-static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_ARGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- __le16 *pixels = (__force __le16 *)dst_pixels;
+ __le16 *pixel = (__le16 *)out_pixel;
- pixels[3] = cpu_to_le16(in_pixel->a);
- pixels[2] = cpu_to_le16(in_pixel->r);
- pixels[1] = cpu_to_le16(in_pixel->g);
- pixels[0] = cpu_to_le16(in_pixel->b);
+ pixel[3] = cpu_to_le16(in_pixel->a);
+ pixel[2] = cpu_to_le16(in_pixel->r);
+ pixel[1] = cpu_to_le16(in_pixel->g);
+ pixel[0] = cpu_to_le16(in_pixel->b);
}
-static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_XRGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- __le16 *pixels = (__force __le16 *)dst_pixels;
+ __le16 *pixel = (__le16 *)out_pixel;
- pixels[3] = cpu_to_le16(0xffff);
- pixels[2] = cpu_to_le16(in_pixel->r);
- pixels[1] = cpu_to_le16(in_pixel->g);
- pixels[0] = cpu_to_le16(in_pixel->b);
+ pixel[3] = cpu_to_le16(0xffff);
+ pixel[2] = cpu_to_le16(in_pixel->r);
+ pixel[1] = cpu_to_le16(in_pixel->g);
+ pixel[0] = cpu_to_le16(in_pixel->b);
}
-static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_RGB565(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- __le16 *pixels = (__force __le16 *)dst_pixels;
+ __le16 *pixel = (__le16 *)out_pixel;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
@@ -233,7 +379,7 @@ static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
- *pixels = cpu_to_le16(r << 11 | g << 5 | b);
+ *pixel = cpu_to_le16(r << 11 | g << 5 | b);
}
/**
@@ -249,36 +395,47 @@ void vkms_writeback_row(struct vkms_writeback_job *wb,
{
struct vkms_frame_info *frame_info = &wb->wb_frame_info;
int x_dst = frame_info->dst.x1;
- u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ u8 *dst_pixels;
+ int rem_x, rem_y;
+
+ packed_pixels_addr(frame_info, x_dst, y, 0, &dst_pixels, &rem_x, &rem_y);
struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst), src_buffer->n_pixels);
- for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->cpp)
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->fb->format->cpp[0])
wb->pixel_write(dst_pixels, &in_pixels[x]);
}
/**
- * get_pixel_conversion_function() - Retrieve the correct read_pixel function for a specific
+ * get_pixel_read_line_function() - Retrieve the correct read_line function for a specific
* format. The returned pointer is NULL for unsupported pixel formats. The caller must ensure that
* the pointer is valid before using it in a vkms_plane_state.
*
* @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
*/
-void *get_pixel_conversion_function(u32 format)
+pixel_read_line_t get_pixel_read_line_function(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
- return &ARGB8888_to_argb_u16;
+ return &ARGB8888_read_line;
case DRM_FORMAT_XRGB8888:
- return &XRGB8888_to_argb_u16;
+ return &XRGB8888_read_line;
case DRM_FORMAT_ARGB16161616:
- return &ARGB16161616_to_argb_u16;
+ return &ARGB16161616_read_line;
case DRM_FORMAT_XRGB16161616:
- return &XRGB16161616_to_argb_u16;
+ return &XRGB16161616_read_line;
case DRM_FORMAT_RGB565:
- return &RGB565_to_argb_u16;
+ return &RGB565_read_line;
default:
- return NULL;
+ /*
+ * This is a bug in vkms_plane_atomic_check(). All the supported
+ * format must:
+ * - Be listed in vkms_formats in vkms_plane.c
+ * - Have a pixel_read callback defined here
+ */
+ pr_err("Pixel format %p4cc is not supported by VKMS planes. This is a kernel bug, atomic check must forbid this configuration.\n",
+ &format);
+ BUG();
}
}
@@ -289,7 +446,7 @@ void *get_pixel_conversion_function(u32 format)
*
* @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
*/
-void *get_pixel_write_function(u32 format)
+pixel_write_t get_pixel_write_function(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
@@ -303,6 +460,14 @@ void *get_pixel_write_function(u32 format)
case DRM_FORMAT_RGB565:
return &argb_u16_to_RGB565;
default:
- return NULL;
+ /*
+ * This is a bug in vkms_writeback_atomic_check. All the supported
+ * format must:
+ * - Be listed in vkms_wb_formats in vkms_writeback.c
+ * - Have a pixel_write callback defined here
+ */
+ pr_err("Pixel format %p4cc is not supported by VKMS writeback. This is a kernel bug, atomic check must forbid this configuration.\n",
+ &format);
+ BUG();
}
}
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index cf59c2ed8e9a..8d2bef95ff79 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -5,8 +5,8 @@
#include "vkms_drv.h"
-void *get_pixel_conversion_function(u32 format);
+pixel_read_line_t get_pixel_read_line_function(u32 format);
-void *get_pixel_write_function(u32 format);
+pixel_write_t get_pixel_write_function(u32 format);
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 25a99fde126c..8f4bd5aef087 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -32,29 +32,14 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
.get_modes = vkms_conn_get_modes,
};
-static int vkms_add_overlay_plane(struct vkms_device *vkmsdev, int index,
- struct drm_crtc *crtc)
-{
- struct vkms_plane *overlay;
-
- overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY, index);
- if (IS_ERR(overlay))
- return PTR_ERR(overlay);
-
- if (!overlay->base.possible_crtcs)
- overlay->base.possible_crtcs = drm_crtc_mask(crtc);
-
- return 0;
-}
-
-int vkms_output_init(struct vkms_device *vkmsdev, int index)
+int vkms_output_init(struct vkms_device *vkmsdev)
{
struct vkms_output *output = &vkmsdev->output;
struct drm_device *dev = &vkmsdev->drm;
struct drm_connector *connector = &output->connector;
struct drm_encoder *encoder = &output->encoder;
struct drm_crtc *crtc = &output->crtc;
- struct vkms_plane *primary, *cursor = NULL;
+ struct vkms_plane *primary, *overlay, *cursor = NULL;
int ret;
int writeback;
unsigned int n;
@@ -65,29 +50,31 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
* The overlay and cursor planes are not mandatory, but can be used to perform complex
* composition.
*/
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
+ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(primary))
return PTR_ERR(primary);
- if (vkmsdev->config->overlay) {
- for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
- ret = vkms_add_overlay_plane(vkmsdev, index, crtc);
- if (ret)
- return ret;
- }
- }
-
if (vkmsdev->config->cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
+ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
if (IS_ERR(cursor))
return PTR_ERR(cursor);
}
- /* [1]: Allocation of a CRTC, its index will be BIT(0) = 1 */
ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base);
if (ret)
return ret;
+ if (vkmsdev->config->overlay) {
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY);
+ if (IS_ERR(overlay)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
+ return PTR_ERR(overlay);
+ }
+ overlay->base.possible_crtcs = drm_crtc_mask(crtc);
+ }
+ }
+
ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
@@ -103,11 +90,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
DRM_ERROR("Failed to init encoder\n");
goto err_encoder;
}
- /*
- * This is a hardcoded value to select crtc for the encoder.
- * BIT(0) here designate the first registered CRTC, the one allocated in [1]
- */
- encoder->possible_crtcs = BIT(0);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e5c625ab8e3e..e2fce471870f 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -112,23 +112,12 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
frame_info = vkms_plane_state->frame_info;
memcpy(&frame_info->src, &new_state->src, sizeof(struct drm_rect));
memcpy(&frame_info->dst, &new_state->dst, sizeof(struct drm_rect));
- memcpy(&frame_info->rotated, &new_state->dst, sizeof(struct drm_rect));
frame_info->fb = fb;
memcpy(&frame_info->map, &shadow_plane_state->data, sizeof(frame_info->map));
drm_framebuffer_get(frame_info->fb);
- frame_info->rotation = drm_rotation_simplify(new_state->rotation, DRM_MODE_ROTATE_0 |
- DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_270 |
- DRM_MODE_REFLECT_X |
- DRM_MODE_REFLECT_Y);
-
- drm_rect_rotate(&frame_info->rotated, drm_rect_width(&frame_info->rotated),
- drm_rect_height(&frame_info->rotated), frame_info->rotation);
-
- frame_info->offset = fb->offsets[0];
- frame_info->pitch = fb->pitches[0];
- frame_info->cpp = fb->format->cpp[0];
- vkms_plane_state->pixel_read = get_pixel_conversion_function(fmt);
+ frame_info->rotation = new_state->rotation;
+
+ vkms_plane_state->pixel_read_line = get_pixel_read_line_function(fmt);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -198,12 +187,12 @@ static const struct drm_plane_helper_funcs vkms_plane_helper_funcs = {
};
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type, int index)
+ enum drm_plane_type type)
{
struct drm_device *dev = &vkmsdev->drm;
struct vkms_plane *plane;
- plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 1 << index,
+ plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 0,
&vkms_plane_funcs,
vkms_formats, ARRAY_SIZE(vkms_formats),
NULL, type, NULL);
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 999d5c01ea81..79918b44fedd 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -149,11 +149,6 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
crtc_state->active_writeback = active_wb;
crtc_state->wb_pending = true;
spin_unlock_irq(&output->composer_lock);
-
- wb_frame_info->offset = fb->offsets[0];
- wb_frame_info->pitch = fb->pitches[0];
- wb_frame_info->cpp = fb->format->cpp[0];
-
drm_writeback_queue_job(wb_conn, connector_state);
active_wb->pixel_write = get_pixel_write_function(wb_format);
drm_rect_init(&wb_frame_info->src, 0, 0, crtc_width, crtc_height);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index a0e433fbcba6..9b5b8c1f063b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -228,7 +228,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
- buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
@@ -443,7 +442,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
if (params->pin)
ttm_bo_pin(&vmw_bo->tbo);
- ttm_bo_unreserve(&vmw_bo->tbo);
+ if (!params->keep_resv)
+ ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 43b5439ec9f7..11e330c7c7f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -56,8 +56,9 @@ struct vmw_bo_params {
u32 domain;
u32 busy_domain;
enum ttm_bo_type bo_type;
- size_t size;
bool pin;
+ bool keep_resv;
+ size_t size;
struct dma_resv *resv;
struct sg_table *sg;
};
@@ -83,7 +84,6 @@ struct vmw_bo {
struct ttm_placement placement;
struct ttm_place places[5];
- struct ttm_place busy_places[5];
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2c46897876dd..0f32471c8533 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -35,7 +35,7 @@
#include "vmwgfx_vkms.h"
#include "ttm_object.h"
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -403,7 +403,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
/*
@@ -415,10 +416,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
- vmw_bo_pin_reserved(vbo, true);
-
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
@@ -1634,7 +1631,6 @@ static const struct drm_driver driver = {
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
- .date = VMWGFX_DRIVER_DATE,
.major = VMWGFX_DRIVER_MAJOR,
.minor = VMWGFX_DRIVER_MINOR,
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b21831ef214a..5275ef632d4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -57,7 +57,6 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20211206"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 20
#define VMWGFX_DRIVER_PATCHLEVEL 0
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index b9857f37ca1a..ed5015ced392 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -206,6 +206,7 @@ struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
.bo_type = ttm_bo_type_sg,
.size = attach->dmabuf->size,
.pin = false,
+ .keep_resv = true,
.resv = attach->dmabuf->resv,
.sg = table,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8db38927729b..800a79e035ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -750,6 +750,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
struct vmw_bo *old_bo = NULL;
struct vmw_bo *new_bo = NULL;
+ struct ww_acquire_ctx ctx;
s32 hotspot_x, hotspot_y;
int ret;
@@ -769,9 +770,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (du->cursor_surface)
du->cursor_age = du->cursor_surface->snooper.age;
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
if (!vmw_user_object_is_null(&old_vps->uo)) {
old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
if (ret != 0)
return;
}
@@ -779,9 +782,14 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (!vmw_user_object_is_null(&vps->uo)) {
new_bo = vmw_user_object_buffer(&vps->uo);
if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
- if (ret != 0)
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ if (old_bo) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ }
return;
+ }
} else {
new_bo = NULL;
}
@@ -803,10 +811,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
if (new_bo)
ttm_bo_unreserve(&new_bo->tbo);
+ if (old_bo)
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 39949e0a493f..f0b429525467 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -479,7 +479,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, true);
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 0f4bfd98480a..32029d80b72b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -868,7 +868,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, true);
ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index a01ca3226d0a..7fb1c88bcc47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
if (!vmw_shader_id_ok(user_key, shader_type))
@@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
- if (unlikely(ret != 0))
- goto no_reserve;
-
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 82d18b88f4a7..114a75069e1c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1593,7 +1593,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, false);
ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 621d98b376bb..5553892d7c3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -572,15 +572,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
.busy_domain = domain,
.bo_type = ttm_bo_type_kernel,
.size = bo_size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 2de0de41b8dd..0d749ed44878 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -66,7 +66,7 @@ config DRM_XE_DEBUG_MEM
bool "Enable passing SYS/VRAM addresses to user space"
default n
help
- Pass object location trough uapi. Intended for extended
+ Pass object location through uapi. Intended for extended
testing and development only.
Recommended for driver developers only.
@@ -104,5 +104,5 @@ config DRM_XE_USERPTR_INVAL_INJECT
Choose this option when debugging error paths that
are hit during checks for userptr invalidations.
- Recomended for driver developers only.
+ Recommended for driver developers only.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index bc7a04ce69fd..5c97ad6ed738 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -101,6 +101,7 @@ xe-y += xe_bb.o \
xe_trace.o \
xe_trace_bo.o \
xe_trace_guc.o \
+ xe_trace_lrc.o \
xe_ttm_sys_mgr.o \
xe_ttm_stolen_mgr.o \
xe_ttm_vram_mgr.o \
@@ -110,6 +111,7 @@ xe-y += xe_bb.o \
xe_vm.o \
xe_vram.o \
xe_vram_freq.o \
+ xe_vsec.o \
xe_wait_user_fence.o \
xe_wa.o \
xe_wopcm.o
@@ -124,7 +126,8 @@ xe-y += \
xe_gt_sriov_vf.o \
xe_guc_relay.o \
xe_memirq.o \
- xe_sriov.o
+ xe_sriov.o \
+ xe_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
@@ -206,6 +209,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
+ i915-display/intel_display_conversion.o \
i915-display/intel_display_device.o \
i915-display/intel_display_driver.o \
i915-display/intel_display_irq.o \
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index b54fe40fc5a9..fee385532fb0 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -134,6 +134,8 @@ enum xe_guc_action {
XE_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503,
XE_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
XE_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
+ XE_GUC_ACTION_REGISTER_G2G = 0x4507,
+ XE_GUC_ACTION_DEREGISTER_G2G = 0x4508,
XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
@@ -218,4 +220,22 @@ enum xe_guc_tlb_inval_mode {
XE_GUC_TLB_INVAL_MODE_LITE = 0x1,
};
+/*
+ * GuC to GuC communication (de-)registration fields:
+ */
+enum xe_guc_g2g_type {
+ XE_G2G_TYPE_IN = 0x0,
+ XE_G2G_TYPE_OUT,
+ XE_G2G_TYPE_LIMIT,
+};
+
+#define XE_G2G_REGISTER_DEVICE REG_GENMASK(16, 16)
+#define XE_G2G_REGISTER_TILE REG_GENMASK(15, 12)
+#define XE_G2G_REGISTER_TYPE REG_GENMASK(11, 8)
+#define XE_G2G_REGISTER_SIZE REG_GENMASK(7, 0)
+
+#define XE_G2G_DEREGISTER_DEVICE REG_GENMASK(16, 16)
+#define XE_G2G_DEREGISTER_TILE REG_GENMASK(15, 12)
+#define XE_G2G_DEREGISTER_TYPE REG_GENMASK(11, 8)
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
index b6a1852749dd..0b28659d94e9 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
@@ -502,6 +502,44 @@
#define VF2GUC_VF_RESET_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
/**
+ * DOC: VF2GUC_NOTIFY_RESFIX_DONE
+ *
+ * This action is used by VF to notify the GuC that the VF KMD has completed
+ * post-migration recovery steps.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE` = 0x5508 |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE 0x5508u
+
+#define VF2GUC_NOTIFY_RESFIX_DONE_REQUEST_MSG_LEN GUC_HXG_REQUEST_MSG_MIN_LEN
+#define VF2GUC_NOTIFY_RESFIX_DONE_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+
+#define VF2GUC_NOTIFY_RESFIX_DONE_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define VF2GUC_NOTIFY_RESFIX_DONE_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
* DOC: VF2GUC_QUERY_SINGLE_KLV
*
* This action is used by VF to query value of the single KLV data.
diff --git a/drivers/gpu/drm/xe/abi/guc_capture_abi.h b/drivers/gpu/drm/xe/abi/guc_capture_abi.h
index e7898edc6236..dd4117553739 100644
--- a/drivers/gpu/drm/xe/abi/guc_capture_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_capture_abi.h
@@ -25,7 +25,7 @@ enum guc_state_capture_type {
#define GUC_STATE_CAPTURE_TYPE_MAX (GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE + 1)
-/* Class indecies for capture_class and capture_instance arrays */
+/* Class indices for capture_class and capture_instance arrays */
enum guc_capture_list_class_type {
GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 37606cf8cc5e..d633f1c739e4 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -132,7 +132,7 @@ enum {
* _`GUC_KLV_VGT_POLICY_SCHED_IF_IDLE` : 0x8001
* This config sets whether strict scheduling is enabled whereby any VF
* that doesn’t have work to submit is still allocated a fixed execution
- * time-slice to ensure active VFs execution is always consitent even
+ * time-slice to ensure active VFs execution is always consistent even
* during other VF reprovisiong / rebooting events. Changing this KLV
* impacts all VFs and takes effect on the next VF-Switch event.
*
@@ -207,7 +207,7 @@ enum {
* of and this will never be perfectly-exact (accumulated nano-second
* granularity) since the GPUs clock time runs off a different crystal
* from the CPUs clock. Changing this KLV on a VF that is currently
- * running a context wont take effect until a new context is scheduled in.
+ * running a context won't take effect until a new context is scheduled in.
* That said, when the PF is changing this value from 0x0 to
* a non-zero value, it might never take effect if the VF is running an
* infinitely long compute or shader kernel. In such a scenario, the
@@ -227,7 +227,7 @@ enum {
* HW is capable and this will never be perfectly-exact (accumulated
* nano-second granularity) since the GPUs clock time runs off a
* different crystal from the CPUs clock. Changing this KLV on a VF
- * that is currently running a context wont take effect until a new
+ * that is currently running a context won't take effect until a new
* context is scheduled in.
* That said, when the PF is changing this value from 0x0 to
* a non-zero value, it might never take effect if the VF is running an
@@ -291,6 +291,14 @@ enum {
*
* :0: (default)
* :1-65535: number of contexts (Gen12)
+ *
+ * _`GUC_KLV_VF_CFG_SCHED_PRIORITY` : 0x8A0C
+ * This config controls VF’s scheduling priority.
+ *
+ * :0: LOW = schedule VF only if it has active work (default)
+ * :1: NORMAL = schedule VF always, irrespective of whether it has work or not
+ * :2: HIGH = schedule VF in the next time-slice after current active
+ * time-slice completes if it has active work
*/
#define GUC_KLV_VF_CFG_GGTT_START_KEY 0x0001
@@ -343,6 +351,12 @@ enum {
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY 0x8a0b
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_LEN 1u
+#define GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY 0x8a0c
+#define GUC_KLV_VF_CFG_SCHED_PRIORITY_LEN 1u
+#define GUC_SCHED_PRIORITY_LOW 0u
+#define GUC_SCHED_PRIORITY_NORMAL 1u
+#define GUC_SCHED_PRIORITY_HIGH 2u
+
/*
* Workaround keys:
*/
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index 0382beb4035b..4fc3e535de91 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -10,6 +10,11 @@
#include "xe_device_types.h"
#include "xe_mmio.h"
+static inline struct intel_uncore *to_intel_uncore(struct drm_device *drm)
+{
+ return &to_xe_device(drm)->uncore;
+}
+
static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
@@ -117,10 +122,19 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
unsigned int slow_timeout_ms, u32 *out_value)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+ bool atomic;
+
+ /*
+ * Replicate the behavior from i915 here, in which sleep is not
+ * performed if slow_timeout_ms == 0. This is necessary because
+ * of some paths in display code where waits are done in atomic
+ * context.
+ */
+ atomic = !slow_timeout_ms && fast_timeout_us > 0;
return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
fast_timeout_us + 1000 * slow_timeout_ms,
- out_value, false);
+ out_value, atomic);
}
static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h
index d429d421ac70..d429d421ac70 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h
diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c
index a7dbc6554d69..ac4cda2d81c7 100644
--- a/drivers/gpu/drm/xe/display/ext/i915_irq.c
+++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c
@@ -53,18 +53,7 @@ void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
bool intel_irqs_enabled(struct xe_device *xe)
{
- /*
- * XXX: i915 has a racy handling of the irq.enabled, since it doesn't
- * lock its transitions. Because of that, the irq.enabled sometimes
- * is not read with the irq.lock in place.
- * However, the most critical cases like vblank and page flips are
- * properly using the locks.
- * We cannot take the lock in here or run any kind of assert because
- * of i915 inconsistency.
- * But at this point the xe irq is better protected against races,
- * although the full solution would be protecting the i915 side.
- */
- return xe->irq.enabled;
+ return atomic_read(&xe->irq.enabled);
}
void intel_synchronize_irq(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 9f54fad0f1c0..b463f5bd4eed 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -40,31 +40,8 @@ int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
- struct ttm_bo_kmap_obj map;
- void *src;
- bool is_iomem;
- int ret;
- ret = xe_bo_lock(bo, true);
- if (ret)
- return ret;
-
- ret = ttm_bo_kmap(&bo->ttm, offset >> PAGE_SHIFT, 1, &map);
- if (ret)
- goto out_unlock;
-
- offset &= ~PAGE_MASK;
- src = ttm_kmap_obj_virtual(&map, &is_iomem);
- src += offset;
- if (is_iomem)
- memcpy_fromio(dst, (void __iomem *)src, size);
- else
- memcpy(dst, src, size);
-
- ttm_bo_kunmap(&map);
-out_unlock:
- xe_bo_unlock(bo);
- return ret;
+ return xe_bo_read(bo, offset, dst, size);
}
struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index b5502f335f53..b3921dbc52ff 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -22,6 +22,7 @@
#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
+#include "intel_dmc_wl.h"
#include "intel_dp.h"
#include "intel_encoder.h"
#include "intel_fbdev.h"
@@ -103,11 +104,12 @@ int xe_display_create(struct xe_device *xe)
static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ struct intel_display *display = &xe->display;
if (!xe->info.probe_display)
return;
- intel_power_domains_cleanup(xe);
+ intel_power_domains_cleanup(display);
}
int xe_display_init_nommio(struct xe_device *xe)
@@ -132,7 +134,7 @@ static void xe_display_fini_noirq(void *arg)
if (!xe->info.probe_display)
return;
- intel_display_driver_remove_noirq(xe);
+ intel_display_driver_remove_noirq(display);
intel_opregion_cleanup(display);
}
@@ -144,7 +146,7 @@ int xe_display_init_noirq(struct xe_device *xe)
if (!xe->info.probe_display)
return 0;
- intel_display_driver_early_probe(xe);
+ intel_display_driver_early_probe(display);
/* Early display init.. */
intel_opregion_setup(display);
@@ -157,9 +159,9 @@ int xe_display_init_noirq(struct xe_device *xe)
intel_bw_init_hw(xe);
- intel_display_device_info_runtime_init(xe);
+ intel_display_device_info_runtime_init(display);
- err = intel_display_driver_probe_noirq(xe);
+ err = intel_display_driver_probe_noirq(display);
if (err) {
intel_opregion_cleanup(display);
return err;
@@ -171,21 +173,23 @@ int xe_display_init_noirq(struct xe_device *xe)
static void xe_display_fini_noaccel(void *arg)
{
struct xe_device *xe = arg;
+ struct intel_display *display = &xe->display;
if (!xe->info.probe_display)
return;
- intel_display_driver_remove_nogem(xe);
+ intel_display_driver_remove_nogem(display);
}
int xe_display_init_noaccel(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
int err;
if (!xe->info.probe_display)
return 0;
- err = intel_display_driver_probe_nogem(xe);
+ err = intel_display_driver_probe_nogem(display);
if (err)
return err;
@@ -194,10 +198,12 @@ int xe_display_init_noaccel(struct xe_device *xe)
int xe_display_init(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return 0;
- return intel_display_driver_probe(xe);
+ return intel_display_driver_probe(display);
}
void xe_display_fini(struct xe_device *xe)
@@ -215,30 +221,36 @@ void xe_display_fini(struct xe_device *xe)
void xe_display_register(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_driver_register(xe);
+ intel_display_driver_register(display);
+ intel_power_domains_enable(display);
intel_register_dsm_handler();
- intel_power_domains_enable(xe);
}
void xe_display_unregister(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
intel_unregister_dsm_handler();
- intel_power_domains_disable(xe);
- intel_display_driver_unregister(xe);
+ intel_power_domains_disable(display);
+ intel_display_driver_unregister(display);
}
void xe_display_driver_remove(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_driver_remove(xe);
+ intel_display_driver_remove(display);
}
/* IRQ-related functions */
@@ -322,25 +334,22 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
* We do a lot of poking in a lot of registers, make sure they work
* properly.
*/
- intel_power_domains_disable(xe);
+ intel_power_domains_disable(display);
if (!runtime)
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
if (!runtime && has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
- intel_display_driver_disable_user_access(xe);
- intel_display_driver_suspend(xe);
+ intel_display_driver_disable_user_access(display);
+ intel_display_driver_suspend(display);
}
xe_display_flush_cleanup_work(xe);
- if (!runtime)
- intel_dp_mst_suspend(xe);
-
intel_hpd_cancel_work(xe);
if (!runtime && has_display(xe)) {
- intel_display_driver_suspend_access(xe);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&xe->display);
}
@@ -364,20 +373,20 @@ void xe_display_pm_shutdown(struct xe_device *xe)
if (!xe->info.probe_display)
return;
- intel_power_domains_disable(xe);
+ intel_power_domains_disable(display);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
if (has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
- intel_display_driver_disable_user_access(xe);
- intel_display_driver_suspend(xe);
+ intel_display_driver_disable_user_access(display);
+ intel_display_driver_suspend(display);
}
xe_display_flush_cleanup_work(xe);
- intel_dp_mst_suspend(xe);
+ intel_dp_mst_suspend(display);
intel_hpd_cancel_work(xe);
if (has_display(xe))
- intel_display_driver_suspend_access(xe);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
intel_encoder_shutdown_all(display);
@@ -402,17 +411,37 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
void xe_display_pm_suspend_late(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_power_suspend_late(display, s2idle);
+}
+
+void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_power_domains_suspend(xe, s2idle);
+ if (xe->d3cold.allowed)
+ xe_display_pm_suspend_late(xe);
- intel_display_power_suspend_late(xe);
+ /*
+ * If xe_display_pm_suspend_late() is not called, it is likely
+ * that we will be on dynamic DC states with DMC wakelock enabled. We
+ * need to flush the release work in that case.
+ */
+ intel_dmc_wl_flush_release_work(display);
}
void xe_display_pm_shutdown_late(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -421,17 +450,17 @@ void xe_display_pm_shutdown_late(struct xe_device *xe)
* for now leaving all display power wells in the INIT power domain
* enabled.
*/
- intel_power_domains_driver_remove(xe);
+ intel_power_domains_driver_remove(display);
}
void xe_display_pm_resume_early(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_power_resume_early(xe);
-
- intel_power_domains_resume(xe);
+ intel_display_power_resume_early(display);
}
static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
@@ -446,20 +475,17 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
if (has_display(xe))
drm_mode_config_reset(&xe->drm);
- intel_display_driver_init_hw(xe);
- intel_hpd_init(xe);
+ intel_display_driver_init_hw(display);
if (!runtime && has_display(xe))
- intel_display_driver_resume_access(xe);
+ intel_display_driver_resume_access(display);
- /* MST sideband requires HPD interrupts enabled */
- if (!runtime)
- intel_dp_mst_resume(xe);
+ intel_hpd_init(xe);
if (!runtime && has_display(xe)) {
- intel_display_driver_resume(xe);
+ intel_display_driver_resume(display);
drm_kms_helper_poll_enable(&xe->drm);
- intel_display_driver_enable_user_access(xe);
+ intel_display_driver_enable_user_access(display);
}
if (has_display(xe))
@@ -470,7 +496,7 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
if (!runtime)
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
- intel_power_domains_enable(xe);
+ intel_power_domains_enable(display);
}
void xe_display_pm_resume(struct xe_device *xe)
@@ -495,21 +521,23 @@ void xe_display_pm_runtime_resume(struct xe_device *xe)
static void display_device_remove(struct drm_device *dev, void *arg)
{
- struct xe_device *xe = arg;
+ struct intel_display *display = arg;
- intel_display_device_remove(xe);
+ intel_display_device_remove(display);
}
int xe_display_probe(struct xe_device *xe)
{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct intel_display *display;
int err;
if (!xe->info.probe_display)
goto no_display;
- intel_display_device_probe(xe);
+ display = intel_display_device_probe(pdev);
- err = drmm_add_action_or_reset(&xe->drm, display_device_remove, xe);
+ err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 17afa537aee5..233f81a26c25 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -41,6 +41,7 @@ void xe_display_pm_shutdown_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
void xe_display_pm_resume(struct xe_device *xe);
void xe_display_pm_runtime_suspend(struct xe_device *xe);
+void xe_display_pm_runtime_suspend_late(struct xe_device *xe);
void xe_display_pm_runtime_resume(struct xe_device *xe);
#else
@@ -74,6 +75,7 @@ static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
static inline void xe_display_pm_resume(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_runtime_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}
#endif /* CONFIG_DRM_XE_DISPLAY */
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 761510ae0690..9fa51b84737c 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -161,7 +161,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
}
vma->dpt = dpt;
- vma->node = dpt->ggtt_node;
+ vma->node = dpt->ggtt_node[tile0->id];
return 0;
}
@@ -213,8 +213,8 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) {
- vma->node = bo->ggtt_node;
+ if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
+ vma->node = bo->ggtt_node[ggtt->tile->id];
} else if (view->type == I915_GTT_VIEW_NORMAL) {
u32 x, size = bo->ttm.base.size;
@@ -345,10 +345,12 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
+ u8 tile_id = vma->node->ggtt->tile->id;
+
if (vma->dpt)
xe_bo_unpin_map_no_vm(vma->dpt);
- else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) ||
- vma->bo->ggtt_node->base.start != vma->node->base.start)
+ else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) ||
+ vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start)
xe_ggtt_node_remove(vma->node, false);
ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 8c113463a3d5..2eb9633f163a 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -275,12 +275,12 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
}
}
-void intel_initial_plane_config(struct drm_i915_private *i915)
+void intel_initial_plane_config(struct intel_display *display)
{
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
@@ -294,7 +294,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
+ display->funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -302,7 +302,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
*/
intel_find_initial_plane_obj(crtc, plane_configs);
- if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config))
intel_crtc_wait_for_next_vblank(crtc);
plane_config_fini(plane_config);
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 7c78496e6213..d86219dedde2 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -83,6 +83,8 @@
#define RING_IMR(base) XE_REG((base) + 0xa8)
#define RING_INT_STATUS_RPT_PTR(base) XE_REG((base) + 0xac)
+#define CS_INT_VEC(base) XE_REG((base) + 0x1b8)
+
#define RING_EIR(base) XE_REG((base) + 0xb0)
#define RING_EMR(base) XE_REG((base) + 0xb4)
#define RING_ESR(base) XE_REG((base) + 0xb8)
@@ -138,6 +140,7 @@
#define RING_MODE(base) XE_REG((base) + 0x29c)
#define GFX_DISABLE_LEGACY_MODE REG_BIT(3)
+#define GFX_MSIX_INTERRUPT_ENABLE REG_BIT(13)
#define RING_TIMESTAMP(base) XE_REG((base) + 0x358)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 0c9e4b2fafab..162f18e975da 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -445,6 +445,8 @@
#define SAMPLER_MODE XE_REG_MCR(0xe18c, XE_REG_OPTION_MASKED)
#define ENABLE_SMALLPL REG_BIT(15)
+#define SMP_WAIT_FETCH_MERGING_COUNTER REG_GENMASK(11, 10)
+#define SMP_FORCE_128B_OVERFETCH REG_FIELD_PREP(SMP_WAIT_FETCH_MERGING_COUNTER, 1)
#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
#define SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
#define INDIRECT_STATE_BASE_ADDR_OVERRIDE REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 045dfd09db99..57944f90bbf6 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -25,6 +25,9 @@
#define CTX_INT_SRC_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 3)
#define CTX_INT_SRC_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 4)
+#define CTX_CS_INT_VEC_REG 0x5a
+#define CTX_CS_INT_VEC_DATA (CTX_CS_INT_VEC_REG + 1)
+
#define INDIRECT_CTX_RING_HEAD (0x02 + 1)
#define INDIRECT_CTX_RING_TAIL (0x04 + 1)
#define INDIRECT_CTX_RING_START (0x06 + 1)
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a9b0091cb7ee..a49561e9f3c3 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -41,14 +41,6 @@
#define OAG_OABUFFER XE_REG(0xdb08)
#define OABUFFER_SIZE_MASK REG_GENMASK(5, 3)
-#define OABUFFER_SIZE_128K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 0)
-#define OABUFFER_SIZE_256K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 1)
-#define OABUFFER_SIZE_512K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 2)
-#define OABUFFER_SIZE_1M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 3)
-#define OABUFFER_SIZE_2M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 4)
-#define OABUFFER_SIZE_4M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 5)
-#define OABUFFER_SIZE_8M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 6)
-#define OABUFFER_SIZE_16M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 7)
#define OAG_OABUFFER_MEMORY_SELECT REG_BIT(0) /* 0: PPGTT, 1: GGTT */
#define OAG_OACONTROL XE_REG(0xdaf4)
@@ -63,6 +55,7 @@
#define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
#define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14)
#define OAG_OA_DEBUG_START_TRIGGER_SCOPE_CONTROL REG_BIT(13)
+#define OAG_OA_DEBUG_BUF_SIZE_SELECT REG_BIT(12)
#define OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL REG_BIT(8)
#define OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL REG_BIT(7)
#define OAG_OA_DEBUG_INCLUDE_CLK_RATIO REG_BIT(6)
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
new file mode 100644
index 000000000000..f45abcd96ba8
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#ifndef _XE_PMT_H_
+#define _XE_PMT_H_
+
+#define SOC_BASE 0x280000
+
+#define BMG_PMT_BASE_OFFSET 0xDB000
+#define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET)
+
+#define BMG_TELEMETRY_BASE_OFFSET 0xE0000
+#define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE_OFFSET)
+
+#define SG_REMAP_INDEX1 XE_REG(SOC_BASE + 0x08)
+#define SG_REMAP_BITS REG_GENMASK(31, 24)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index 51fd40ffafcb..0eedd6c26b1b 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -13,7 +13,7 @@
/**
* struct xe_reg - Register definition
*
- * Register defintion to be used by the individual register. Although the same
+ * Register definition to be used by the individual register. Although the same
* definition is used for xe_reg and xe_reg_mcr, they use different internal
* APIs for accesses.
*/
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 3293172b0128..6cf282618836 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -44,12 +44,16 @@
#define MTL_RP_STATE_CAP XE_REG(0x138000)
+#define MTL_GT_RPA_FREQUENCY XE_REG(0x138008)
#define MTL_GT_RPE_FREQUENCY XE_REG(0x13800c)
#define MTL_MEDIAP_STATE_CAP XE_REG(0x138020)
#define MTL_RPN_CAP_MASK REG_GENMASK(24, 16)
#define MTL_RP0_CAP_MASK REG_GENMASK(8, 0)
+#define MTL_MPA_FREQUENCY XE_REG(0x138028)
+#define MTL_RPA_MASK REG_GENMASK(8, 0)
+
#define MTL_MPE_FREQUENCY XE_REG(0x13802c)
#define MTL_RPE_MASK REG_GENMASK(8, 0)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 3e0ae40ebbd2..6795d1d916e4 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -49,6 +49,13 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
KUNIT_FAIL(test, "Failed to submit bo clear.\n");
return PTR_ERR(fence);
}
+
+ if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) {
+ dma_fence_put(fence);
+ KUNIT_FAIL(test, "Timeout while clearing bo.\n");
+ return -ETIME;
+ }
+
dma_fence_put(fence);
}
@@ -257,10 +264,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
* however seems quite fragile not to also restart the GT. Try
* to do that here by triggering a GT reset.
*/
- for_each_gt(__gt, xe, id) {
- xe_gt_reset_async(__gt);
- flush_work(&__gt->reset.worker);
- }
+ for_each_gt(__gt, xe, id)
+ xe_gt_reset(__gt);
+
if (err) {
KUNIT_FAIL(test, "restore kernel err=%pe\n",
ERR_PTR(err));
@@ -599,8 +605,6 @@ static void xe_bo_shrink_kunit(struct kunit *test)
static struct kunit_case xe_bo_tests[] = {
KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
- KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param,
- {.speed = KUNIT_SPEED_SLOW}),
{}
};
@@ -611,3 +615,17 @@ struct kunit_suite xe_bo_test_suite = {
.init = xe_kunit_helper_xe_device_live_test_init,
};
EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite);
+
+static struct kunit_case xe_bo_shrink_test[] = {
+ KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param,
+ {.speed = KUNIT_SPEED_SLOW}),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_bo_shrink_test_suite = {
+ .name = "xe_bo_shrink",
+ .test_cases = xe_bo_shrink_test,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_shrink_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index 0d36ab864ec0..81277c77016d 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -6,11 +6,13 @@
#include <kunit/test.h>
extern struct kunit_suite xe_bo_test_suite;
+extern struct kunit_suite xe_bo_shrink_test_suite;
extern struct kunit_suite xe_dma_buf_test_suite;
extern struct kunit_suite xe_migrate_test_suite;
extern struct kunit_suite xe_mocs_test_suite;
kunit_test_suite(xe_bo_test_suite);
+kunit_test_suite(xe_bo_shrink_test_suite);
kunit_test_suite(xe_dma_buf_test_suite);
kunit_test_suite(xe_migrate_test_suite);
kunit_test_suite(xe_mocs_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 3bbdb362d6f0..d5fe0ea889ad 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -83,7 +83,8 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size,
ttm_bo_type_kernel,
region |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
@@ -642,7 +643,9 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(sys_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -666,7 +669,8 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(ccs_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -690,7 +694,8 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(vram_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(vram_bo));
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 6f9b7a266b41..ef1e5256c56a 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -58,7 +58,7 @@ static void read_l3cc_table(struct xe_gt *gt,
mocs_dbg(gt, "reg_val=0x%x\n", reg_val);
} else {
- /* Just re-use value read on previous iteration */
+ /* Just reuse value read on previous iteration */
reg_val >>= 16;
}
@@ -162,8 +162,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
- xe_gt_reset_async(gt);
- flush_work(&gt->reset.worker);
+ xe_gt_reset(gt);
kunit_info(test, "mocs_reset_test after reset\n");
if (flags & HAS_GLOBAL_MOCS)
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index 04d6b95c6d87..68fe70ce2be3 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -14,7 +14,7 @@
#include "xe_step.h"
/**
- * DOC: Xe ASSERTs
+ * DOC: Xe Asserts
*
* While Xe driver aims to be simpler than legacy i915 driver it is still
* complex enough that some changes introduced while adding new functionality
@@ -103,7 +103,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_assert(xe, condition) xe_assert_msg((xe), condition, "")
#define xe_assert_msg(xe, condition, msg, arg...) ({ \
@@ -138,7 +138,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_tile_assert(tile, condition) xe_tile_assert_msg((tile), condition, "")
#define xe_tile_assert_msg(tile, condition, msg, arg...) ({ \
@@ -162,7 +162,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_gt_assert(gt, condition) xe_gt_assert_msg((gt), condition, "")
#define xe_gt_assert_msg(gt, condition, msg, arg...) ({ \
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index ef777dbdf4ec..9570672fce33 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -41,7 +41,7 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
/*
* We need to allocate space for the requested number of dwords,
* one additional MI_BATCH_BUFFER_END dword, and additional buffer
- * space to accomodate the platform-specific hardware prefetch
+ * space to accommodate the platform-specific hardware prefetch
* requirements.
*/
bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index f61a8ef38094..3f5391d416d4 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -162,6 +162,15 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
}
}
+static bool force_contiguous(u32 bo_flags)
+{
+ /*
+ * For eviction / restore on suspend / resume objects pinned in VRAM
+ * must be contiguous, also only contiguous BOs support xe_bo_vmap.
+ */
+ return bo_flags & (XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+}
+
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{
@@ -175,12 +184,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
xe_assert(xe, vram && vram->usable_size);
io_size = vram->io_size;
- /*
- * For eviction / restore on suspend / resume objects
- * pinned in VRAM must be contiguous
- */
- if (bo_flags & (XE_BO_FLAG_PINNED |
- XE_BO_FLAG_GGTT))
+ if (force_contiguous(bo_flags))
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < vram->usable_size) {
@@ -212,8 +216,7 @@ static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_STOLEN,
- .flags = bo_flags & (XE_BO_FLAG_PINNED |
- XE_BO_FLAG_GGTT) ?
+ .flags = force_contiguous(bo_flags) ?
TTM_PL_FLAG_CONTIGUOUS : 0,
};
*c += 1;
@@ -442,6 +445,14 @@ static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
kfree(tt);
}
+static bool xe_ttm_resource_visible(struct ttm_resource *mem)
+{
+ struct xe_ttm_vram_mgr_resource *vres =
+ to_xe_ttm_vram_mgr_resource(mem);
+
+ return vres->used_visible_size == mem->size;
+}
+
static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
@@ -453,11 +464,9 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
return 0;
case XE_PL_VRAM0:
case XE_PL_VRAM1: {
- struct xe_ttm_vram_mgr_resource *vres =
- to_xe_ttm_vram_mgr_resource(mem);
struct xe_mem_region *vram = res_to_mem_region(mem);
- if (vres->used_visible_size < mem->size)
+ if (!xe_ttm_resource_visible(mem))
return -EINVAL;
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -777,7 +786,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
* / resume, some of the pinned memory is required for the
* device to resume / use the GPU to move other evicted memory
* (user memory) around. This likely could be optimized a bit
- * futher where we find the minimum set of pinned memory
+ * further where we find the minimum set of pinned memory
* required for resume but for simplity doing a memcpy for all
* pinned memory.
*/
@@ -866,7 +875,7 @@ out:
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
*
- * On successful completion, the object memory will be moved to sytem memory.
+ * On successful completion, the object memory will be moved to system memory.
*
* This is needed to for special handling of pinned VRAM object during
* suspend-resume.
@@ -884,6 +893,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
};
struct ttm_operation_ctx ctx = {
.interruptible = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_resource *new_mem;
int ret;
@@ -945,6 +955,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
+ .gfp_retry_mayfail = false,
};
struct ttm_resource *new_mem;
struct ttm_place *place = &bo->placements[0];
@@ -1114,7 +1125,8 @@ static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operati
static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
{
struct ttm_operation_ctx ctx = {
- .interruptible = false
+ .interruptible = false,
+ .gfp_retry_mayfail = false,
};
if (ttm_bo->ttm) {
@@ -1126,6 +1138,52 @@ static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
}
}
+static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct iosys_map vmap;
+ struct xe_res_cursor cursor;
+ struct xe_mem_region *vram;
+ int bytes_left = len;
+
+ xe_bo_assert_held(bo);
+ xe_device_assert_mem_access(xe);
+
+ if (!mem_type_is_vram(ttm_bo->resource->mem_type))
+ return -EIO;
+
+ /* FIXME: Use GPU for non-visible VRAM */
+ if (!xe_ttm_resource_visible(ttm_bo->resource))
+ return -EIO;
+
+ vram = res_to_mem_region(ttm_bo->resource);
+ xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
+ bo->size - (offset & PAGE_MASK), &cursor);
+
+ do {
+ unsigned long page_offset = (offset & ~PAGE_MASK);
+ int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left);
+
+ iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping +
+ cursor.start);
+ if (write)
+ xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count);
+ else
+ xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count);
+
+ buf += byte_count;
+ offset += byte_count;
+ bytes_left -= byte_count;
+ if (bytes_left)
+ xe_res_next(&cursor, PAGE_SIZE);
+ } while (bytes_left);
+
+ return len;
+}
+
const struct ttm_device_funcs xe_ttm_funcs = {
.ttm_tt_create = xe_ttm_tt_create,
.ttm_tt_populate = xe_ttm_tt_populate,
@@ -1135,6 +1193,7 @@ const struct ttm_device_funcs xe_ttm_funcs = {
.move = xe_bo_move,
.io_mem_reserve = xe_ttm_io_mem_reserve,
.io_mem_pfn = xe_ttm_io_mem_pfn,
+ .access_memory = xe_ttm_access_memory,
.release_notify = xe_ttm_bo_release_notify,
.eviction_valuable = ttm_bo_eviction_valuable,
.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
@@ -1145,6 +1204,8 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
{
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct xe_tile *tile;
+ u8 id;
if (bo->ttm.base.import_attach)
drm_prime_gem_destroy(&bo->ttm.base, NULL);
@@ -1152,8 +1213,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
- if (bo->ggtt_node && bo->ggtt_node->base.size)
- xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
+ for_each_tile(tile, xe, id)
+ if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size)
+ xe_ggtt_remove_bo(tile->mem.ggtt, bo);
#ifdef CONFIG_PROC_FS
if (bo->client)
@@ -1251,11 +1313,50 @@ out:
return ret;
}
+static int xe_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct ttm_buffer_object *ttm_bo = vma->vm_private_data;
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = ttm_bo_vm_access(vma, addr, buf, len, write);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+/**
+ * xe_bo_read() - Read from an xe_bo
+ * @bo: The buffer object to read from.
+ * @offset: The byte offset to start reading from.
+ * @dst: Location to store the read.
+ * @size: Size in bytes for the read.
+ *
+ * Read @size bytes from the @bo, starting from @offset, storing into @dst.
+ *
+ * Return: Zero on success, or negative error.
+ */
+int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
+{
+ int ret;
+
+ ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0);
+ if (ret >= 0 && ret != size)
+ ret = -EIO;
+ else if (ret == size)
+ ret = 0;
+
+ return ret;
+}
+
static const struct vm_operations_struct xe_gem_vm_ops = {
.fault = xe_gem_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
+ .access = xe_bo_vm_access,
};
static const struct drm_gem_object_funcs xe_gem_object_funcs = {
@@ -1269,7 +1370,7 @@ static const struct drm_gem_object_funcs xe_gem_object_funcs = {
/**
* xe_bo_alloc - Allocate storage for a struct xe_bo
*
- * This funcition is intended to allocate storage to be used for input
+ * This function is intended to allocate storage to be used for input
* to __xe_bo_create_locked(), in the case a pointer to the bo to be
* created is needed before the call to __xe_bo_create_locked().
* If __xe_bo_create_locked ends up never to be called, then the
@@ -1309,6 +1410,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement *placement;
uint32_t alignment;
@@ -1323,6 +1425,10 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return ERR_PTR(-EINVAL);
}
+ /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */
+ if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT))
+ return ERR_PTR(-EINVAL);
+
if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
!(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
@@ -1513,19 +1619,29 @@ __xe_bo_create_locked(struct xe_device *xe,
bo->vm = vm;
if (bo->flags & XE_BO_FLAG_GGTT) {
- if (!tile && flags & XE_BO_FLAG_STOLEN)
- tile = xe_device_get_root_tile(xe);
+ struct xe_tile *t;
+ u8 id;
- xe_assert(xe, tile);
+ if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) {
+ if (!tile && flags & XE_BO_FLAG_STOLEN)
+ tile = xe_device_get_root_tile(xe);
- if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
- err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
- start + bo->size, U64_MAX);
- } else {
- err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
+ xe_assert(xe, tile);
+ }
+
+ for_each_tile(t, xe, id) {
+ if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t)))
+ continue;
+
+ if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
+ err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
+ start + bo->size, U64_MAX);
+ } else {
+ err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
+ }
+ if (err)
+ goto err_unlock_put_bo;
}
- if (err)
- goto err_unlock_put_bo;
}
return bo;
@@ -1908,6 +2024,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
if (vm) {
@@ -1918,6 +2035,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
ctx.resv = xe_vm_resv(vm);
}
+ trace_xe_bo_validate(bo);
return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
}
@@ -1969,13 +2087,15 @@ dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
int xe_bo_vmap(struct xe_bo *bo)
{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
void *virtual;
bool is_iomem;
int ret;
xe_bo_assert_held(bo);
- if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
+ if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) ||
+ !force_contiguous(bo->flags)))
return -EINVAL;
if (!iosys_map_is_null(&bo->vmap))
@@ -2251,6 +2371,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement placement;
struct ttm_place requested;
@@ -2291,7 +2412,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
* @force_alloc: Set force_alloc in ttm_operation_ctx
*
* On successful completion, the object memory will be moved to evict
- * placement. Ths function blocks until the object has been fully moved.
+ * placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
@@ -2301,6 +2422,7 @@ int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
.interruptible = false,
.no_wait_gpu = false,
.force_alloc = force_alloc,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement placement;
int ret;
@@ -2380,14 +2502,18 @@ void xe_bo_put_commit(struct llist_head *deferred)
void xe_bo_put(struct xe_bo *bo)
{
+ struct xe_tile *tile;
+ u8 id;
+
might_sleep();
if (bo) {
#ifdef CONFIG_PROC_FS
if (bo->client)
might_lock(&bo->client->bos_lock);
#endif
- if (bo->ggtt_node && bo->ggtt_node->ggtt)
- might_lock(&bo->ggtt_node->ggtt->lock);
+ for_each_tile(tile, xe_bo_device(bo), id)
+ if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
+ might_lock(&bo->ggtt_node[id]->ggtt->lock);
drm_gem_object_put(&bo->ttm.base);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 7fa44a0138b0..d9386ab03140 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -39,10 +39,22 @@
#define XE_BO_FLAG_NEEDS_64K BIT(15)
#define XE_BO_FLAG_NEEDS_2M BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
+#define XE_BO_FLAG_GGTT0 BIT(18)
+#define XE_BO_FLAG_GGTT1 BIT(19)
+#define XE_BO_FLAG_GGTT2 BIT(20)
+#define XE_BO_FLAG_GGTT3 BIT(21)
+#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
+ XE_BO_FLAG_GGTT1 | \
+ XE_BO_FLAG_GGTT2 | \
+ XE_BO_FLAG_GGTT3)
+
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
+#define XE_BO_FLAG_GGTTx(tile) \
+ (XE_BO_FLAG_GGTT0 << (tile)->id)
+
#define XE_PTE_SHIFT 12
#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
#define XE_PTE_MASK (XE_PAGE_SIZE - 1)
@@ -194,18 +206,29 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
}
static inline u32
-xe_bo_ggtt_addr(struct xe_bo *bo)
+__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
{
- if (XE_WARN_ON(!bo->ggtt_node))
+ struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id];
+
+ if (XE_WARN_ON(!ggtt_node))
return 0;
- XE_WARN_ON(bo->ggtt_node->base.size > bo->size);
- XE_WARN_ON(bo->ggtt_node->base.start + bo->ggtt_node->base.size > (1ull << 32));
- return bo->ggtt_node->base.start;
+ XE_WARN_ON(ggtt_node->base.size > bo->size);
+ XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32));
+ return ggtt_node->base.start;
+}
+
+static inline u32
+xe_bo_ggtt_addr(struct xe_bo *bo)
+{
+ xe_assert(xe_bo_device(bo), bo->tile);
+
+ return __xe_bo_ggtt_addr(bo, bo->tile->id);
}
int xe_bo_vmap(struct xe_bo *bo);
void xe_bo_vunmap(struct xe_bo *bo);
+int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
bool mem_type_is_vram(u32 mem_type);
bool xe_bo_is_vram(struct xe_bo *bo);
diff --git a/drivers/gpu/drm/xe/xe_bo_doc.h b/drivers/gpu/drm/xe/xe_bo_doc.h
index f57d440cc95a..25a884c64bf1 100644
--- a/drivers/gpu/drm/xe/xe_bo_doc.h
+++ b/drivers/gpu/drm/xe/xe_bo_doc.h
@@ -41,7 +41,7 @@
* created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user
* access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All
* user BOs are evictable and user BOs are never pinned by XE. The allocation of
- * the backing store can be defered from creation time until first use which is
+ * the backing store can be deferred from creation time until first use which is
* either mmap, bind, or pagefault.
*
* Private BOs
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 8fb2be061003..6a40eedd9db1 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -152,11 +152,17 @@ int xe_bo_restore_kernel(struct xe_device *xe)
}
if (bo->flags & XE_BO_FLAG_GGTT) {
- struct xe_tile *tile = bo->tile;
+ struct xe_tile *tile;
+ u8 id;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
+ for_each_tile(tile, xe, id) {
+ if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
+ continue;
+
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo(tile->mem.ggtt, bo);
+ mutex_unlock(&tile->mem.ggtt->lock);
+ }
}
/*
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 13c6d8a69e91..46dc9e4e3e46 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -10,9 +10,9 @@
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
+#include "xe_device_types.h"
#include "xe_ggtt_types.h"
struct xe_device;
@@ -39,8 +39,8 @@ struct xe_bo {
struct ttm_place placements[XE_BO_MAX_PLACEMENTS];
/** @placement: current placement for this BO */
struct ttm_placement placement;
- /** @ggtt_node: GGTT node if this BO is mapped in the GGTT */
- struct xe_ggtt_node *ggtt_node;
+ /** @ggtt_node: Array of GGTT nodes if this BO is mapped in the GGTTs */
+ struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE];
/** @vmap: iosys map of this buffer */
struct iosys_map vmap;
/** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 21a50d539426..81dc7795c065 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -30,30 +30,39 @@
/**
* DOC: Xe device coredump
*
- * Devices overview:
* Xe uses dev_coredump infrastructure for exposing the crash errors in a
- * standardized way.
- * devcoredump exposes a temporary device under /sys/class/devcoredump/
- * which is linked with our card device directly.
- * The core dump can be accessed either from
- * /sys/class/drm/card<n>/device/devcoredump/ or from
- * /sys/class/devcoredump/devcd<m> where
- * /sys/class/devcoredump/devcd<m>/failing_device is a link to
- * /sys/class/drm/card<n>/device/.
+ * standardized way. Once a crash occurs, devcoredump exposes a temporary
+ * node under ``/sys/class/devcoredump/devcd<m>/``. The same node is also
+ * accessible in ``/sys/class/drm/card<n>/device/devcoredump/``. The
+ * ``failing_device`` symlink points to the device that crashed and created the
+ * coredump.
*
- * Snapshot at hang:
- * The 'data' file is printed with a drm_printer pointer at devcoredump read
- * time. For this reason, we need to take snapshots from when the hang has
- * happened, and not only when the user is reading the file. Otherwise the
- * information is outdated since the resets might have happened in between.
+ * The following characteristics are observed by xe when creating a device
+ * coredump:
*
- * 'First' failure snapshot:
- * In general, the first hang is the most critical one since the following hangs
- * can be a consequence of the initial hang. For this reason we only take the
- * snapshot of the 'first' failure and ignore subsequent calls of this function,
- * at least while the coredump device is alive. Dev_coredump has a delayed work
- * queue that will eventually delete the device and free all the dump
- * information.
+ * **Snapshot at hang**:
+ * The 'data' file contains a snapshot of the HW and driver states at the time
+ * the hang happened. Due to the driver recovering from resets/crashes, it may
+ * not correspond to the state of the system when the file is read by
+ * userspace.
+ *
+ * **Coredump release**:
+ * After a coredump is generated, it stays in kernel memory until released by
+ * userspace by writing anything to it, or after an internal timer expires. The
+ * exact timeout may vary and should not be relied upon. Example to release
+ * a coredump:
+ *
+ * .. code-block:: shell
+ *
+ * $ > /sys/class/drm/card0/device/devcoredump/data
+ *
+ * **First failure only**:
+ * In general, the first hang is the most critical one since the following
+ * hangs can be a consequence of the initial hang. For this reason a snapshot
+ * is taken only for the first failure. Until the devcoredump is released by
+ * userspace or kernel, all subsequent hangs do not override the snapshot nor
+ * create new ones. Devcoredump has a delayed work queue that will eventually
+ * delete the file node and free all the dump information.
*/
#ifdef CONFIG_DEV_COREDUMP
@@ -91,6 +100,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
p = drm_coredump_printer(&iter);
drm_puts(&p, "**** Xe Device Coredump ****\n");
+ drm_printf(&p, "Reason: %s\n", ss->reason);
drm_puts(&p, "kernel: " UTS_RELEASE "\n");
drm_puts(&p, "module: " KBUILD_MODNAME "\n");
@@ -98,7 +108,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
ts = ktime_to_timespec64(ss->boot_time);
drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
- drm_printf(&p, "Process: %s\n", ss->process_name);
+ drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid);
xe_device_snapshot_print(xe, &p);
drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id);
@@ -134,6 +144,9 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
{
int i;
+ kfree(ss->reason);
+ ss->reason = NULL;
+
xe_guc_log_snapshot_free(ss->guc.log);
ss->guc.log = NULL;
@@ -174,16 +187,24 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
- if (!ss->read.buffer)
+ mutex_lock(&coredump->lock);
+
+ if (!ss->read.buffer) {
+ mutex_unlock(&coredump->lock);
return -ENODEV;
+ }
- if (offset >= ss->read.size)
+ if (offset >= ss->read.size) {
+ mutex_unlock(&coredump->lock);
return 0;
+ }
byte_copied = count < ss->read.size - offset ? count :
ss->read.size - offset;
memcpy(buffer, ss->read.buffer + offset, byte_copied);
+ mutex_unlock(&coredump->lock);
+
return byte_copied;
}
@@ -197,15 +218,18 @@ static void xe_devcoredump_free(void *data)
cancel_work_sync(&coredump->snapshot.work);
+ mutex_lock(&coredump->lock);
+
xe_devcoredump_snapshot_free(&coredump->snapshot);
kvfree(coredump->snapshot.read.buffer);
/* To prevent stale data on next snapshot, clear everything */
memset(&coredump->snapshot, 0, sizeof(coredump->snapshot));
coredump->captured = false;
- coredump->job = NULL;
drm_info(&coredump_to_xe(coredump)->drm,
"Xe device coredump has been deleted.\n");
+
+ mutex_unlock(&coredump->lock);
}
static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
@@ -248,10 +272,10 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ struct xe_exec_queue *q,
struct xe_sched_job *job)
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
- struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
u32 adj_logical_mask = q->logical_mask;
u32 width_mask = (0x1 << q->width) - 1;
@@ -264,12 +288,14 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
- if (q->vm && q->vm->xef)
+ if (q->vm && q->vm->xef) {
process_name = q->vm->xef->process_name;
+ ss->pid = q->vm->xef->pid;
+ }
+
strscpy(ss->process_name, process_name);
ss->gt = q->gt;
- coredump->job = job;
INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
cookie = dma_fence_begin_signalling();
@@ -288,10 +314,11 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true);
ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct);
ss->ge = xe_guc_exec_queue_snapshot_capture(q);
- ss->job = xe_sched_job_snapshot_capture(job);
+ if (job)
+ ss->job = xe_sched_job_snapshot_capture(job);
ss->vm = xe_vm_snapshot_capture(q->vm);
- xe_engine_snapshot_capture_for_job(job);
+ xe_engine_snapshot_capture_for_queue(q);
queue_work(system_unbound_wq, &ss->work);
@@ -301,28 +328,42 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
/**
* xe_devcoredump - Take the required snapshots and initialize coredump device.
+ * @q: The faulty xe_exec_queue, where the issue was detected.
* @job: The faulty xe_sched_job, where the issue was detected.
+ * @fmt: Printf format + args to describe the reason for the core dump
*
* This function should be called at the crash time within the serialized
* gt_reset. It is skipped if we still have the core dump device available
* with the information of the 'first' snapshot.
*/
-void xe_devcoredump(struct xe_sched_job *job)
+__printf(3, 4)
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...)
{
- struct xe_device *xe = gt_to_xe(job->q->gt);
+ struct xe_device *xe = gt_to_xe(q->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
+ va_list varg;
+
+ mutex_lock(&coredump->lock);
if (coredump->captured) {
drm_dbg(&xe->drm, "Multiple hangs are occurring, but only the first snapshot was taken\n");
+ mutex_unlock(&coredump->lock);
return;
}
coredump->captured = true;
- devcoredump_snapshot(coredump, job);
+
+ va_start(varg, fmt);
+ coredump->snapshot.reason = kvasprintf(GFP_ATOMIC, fmt, varg);
+ va_end(varg);
+
+ devcoredump_snapshot(coredump, q, job);
drm_info(&xe->drm, "Xe device coredump has been created\n");
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
xe->drm.primary->index);
+
+ mutex_unlock(&coredump->lock);
}
static void xe_driver_devcoredump_fini(void *arg)
@@ -334,6 +375,18 @@ static void xe_driver_devcoredump_fini(void *arg)
int xe_devcoredump_init(struct xe_device *xe)
{
+ int err;
+
+ err = drmm_mutex_init(&xe->drm, &xe->devcoredump.lock);
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&xe->devcoredump.lock);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
return devm_add_action_or_reset(xe->drm.dev, xe_driver_devcoredump_fini, &xe->drm);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index a4eebc285fc8..6a17e6d60102 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -10,13 +10,16 @@
struct drm_printer;
struct xe_device;
+struct xe_exec_queue;
struct xe_sched_job;
#ifdef CONFIG_DEV_COREDUMP
-void xe_devcoredump(struct xe_sched_job *job);
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...);
int xe_devcoredump_init(struct xe_device *xe);
#else
-static inline void xe_devcoredump(struct xe_sched_job *job)
+static inline void xe_devcoredump(struct xe_exec_queue *q,
+ struct xe_sched_job *job,
+ const char *fmt, ...)
{
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 3703ddea1252..1a1d16a96b2d 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -28,6 +28,10 @@ struct xe_devcoredump_snapshot {
ktime_t boot_time;
/** @process_name: Name of process that triggered this gpu hang */
char process_name[TASK_COMM_LEN];
+ /** @pid: Process id of process that triggered this gpu hang */
+ pid_t pid;
+ /** @reason: The reason the coredump was triggered */
+ char *reason;
/** @gt: Affected GT, used by forcewake for delayed capture */
struct xe_gt *gt;
@@ -76,12 +80,12 @@ struct xe_devcoredump_snapshot {
* for reading the information.
*/
struct xe_devcoredump {
- /** @captured: The snapshot of the first hang has already been taken. */
+ /** @lock: protects access to entire structure */
+ struct mutex lock;
+ /** @captured: The snapshot of the first hang has already been taken */
bool captured;
/** @snapshot: Snapshot is captured at time of the first crash */
struct xe_devcoredump_snapshot snapshot;
- /** @job: Point to the faulting job */
- struct xe_sched_job *job;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 06d6db8b50f9..4de26470a4ae 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -44,6 +44,7 @@
#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
+#include "xe_oa.h"
#include "xe_observation.h"
#include "xe_pat.h"
#include "xe_pcode.h"
@@ -55,6 +56,7 @@
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
#include "xe_vram.h"
+#include "xe_vsec.h"
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
@@ -269,7 +271,6 @@ static struct drm_driver driver = {
.fops = &xe_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -324,7 +325,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
- spin_lock_init(&xe->irq.lock);
+ err = xe_irq_init(xe);
+ if (err)
+ goto err;
init_waitqueue_head(&xe->ufence_wq);
@@ -366,6 +369,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
goto err;
}
+ err = drmm_mutex_init(&xe->drm, &xe->pmt.lock);
+ if (err)
+ goto err;
+
err = xe_display_create(xe);
if (WARN_ON(err))
goto err;
@@ -599,7 +606,7 @@ static int probe_has_flat_ccs(struct xe_device *xe)
u32 reg;
/* Always enabled/disabled, no runtime check to do */
- if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
+ if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe))
return 0;
gt = xe_root_mmio_gt(xe);
@@ -760,6 +767,8 @@ int xe_device_probe(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
+ xe_vsec_init(xe);
+
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
err_fini_display:
@@ -990,7 +999,7 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
* xe_device_declare_wedged - Declare device wedged
* @xe: xe device instance
*
- * This is a final state that can only be cleared with a mudule
+ * This is a final state that can only be cleared with a module
* re-probe (unbind + bind).
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index f1fbfe916867..fc3c2af3fb7f 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -157,8 +157,7 @@ static inline bool xe_device_has_sriov(struct xe_device *xe)
static inline bool xe_device_has_msix(struct xe_device *xe)
{
- /* TODO: change this when MSI-X support is fully integrated */
- return false;
+ return xe->irq.msix.nvec > 0;
}
static inline bool xe_device_has_memirq(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index b9ea455d6f59..8a7b15972413 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -16,7 +16,7 @@
#include "xe_heci_gsc.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
-#include "xe_oa.h"
+#include "xe_oa_types.h"
#include "xe_platform_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_types.h"
@@ -42,8 +42,6 @@ struct xe_pat_ops;
#define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
#define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
#define IS_DGFX(xe) ((xe)->info.is_dgfx)
-#define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
-#define HAS_HECI_CSCFI(xe) ((xe)->info.has_heci_cscfi)
#define XE_VRAM_FLAGS_NEED64K BIT(0)
@@ -296,14 +294,24 @@ struct xe_device {
/** @info.va_bits: Maximum bits of a virtual address */
u8 va_bits;
- /** @info.is_dgfx: is discrete device */
- u8 is_dgfx:1;
- /** @info.has_asid: Has address space ID */
- u8 has_asid:1;
+ /*
+ * Keep all flags below alphabetically sorted
+ */
+
/** @info.force_execlist: Forced execlist submission */
u8 force_execlist:1;
+ /** @info.has_asid: Has address space ID */
+ u8 has_asid:1;
+ /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
+ u8 has_atomic_enable_pte_bit:1;
+ /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
+ u8 has_device_atomics_on_smem:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
+ /** @info.has_heci_cscfi: device has heci cscfi */
+ u8 has_heci_cscfi:1;
+ /** @info.has_heci_gscfi: device has heci gscfi */
+ u8 has_heci_gscfi:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
/** @info.has_mmio_ext: Device has extra MMIO address range */
@@ -314,6 +322,8 @@ struct xe_device {
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
+ /** @info.is_dgfx: is discrete device */
+ u8 is_dgfx:1;
/**
* @info.probe_display: Probe display hardware. If set to
* false, the driver will behave as if there is no display
@@ -323,20 +333,12 @@ struct xe_device {
* state the firmware or bootloader left it in.
*/
u8 probe_display:1;
+ /** @info.skip_guc_pc: Skip GuC based PM feature init */
+ u8 skip_guc_pc:1;
/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
u8 skip_mtcfg:1;
/** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
- /** @info.has_heci_gscfi: device has heci gscfi */
- u8 has_heci_gscfi:1;
- /** @info.has_heci_cscfi: device has heci cscfi */
- u8 has_heci_cscfi:1;
- /** @info.skip_guc_pc: Skip GuC based PM feature init */
- u8 skip_guc_pc:1;
- /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
- u8 has_atomic_enable_pte_bit:1;
- /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
- u8 has_device_atomics_on_smem:1;
} info;
/** @irq: device interrupt state */
@@ -345,7 +347,15 @@ struct xe_device {
spinlock_t lock;
/** @irq.enabled: interrupts enabled on this device */
- bool enabled;
+ atomic_t enabled;
+
+ /** @irq.msix: irq info for platforms that support MSI-X */
+ struct {
+ /** @irq.msix.nvec: number of MSI-X interrupts */
+ u16 nvec;
+ /** @irq.msix.indexes: used to allocate MSI-X indexes */
+ struct xarray indexes;
+ } msix;
} irq;
/** @ttm: ttm device */
@@ -374,6 +384,8 @@ struct xe_device {
/** @sriov.pf: PF specific data */
struct xe_device_pf pf;
+ /** @sriov.vf: VF specific data */
+ struct xe_device_vf vf;
/** @sriov.wq: workqueue used by the virtualization workers */
struct workqueue_struct *wq;
@@ -481,6 +493,12 @@ struct xe_device {
struct mutex lock;
} d3cold;
+ /** @pmt: Support the PMT driver callback interface */
+ struct {
+ /** @pmt.lock: protect access for telemetry data */
+ struct mutex lock;
+ } pmt;
+
/**
* @pm_callback_task: Track the active task that is running in either
* the runtime_suspend or runtime_resume callbacks.
@@ -588,7 +606,7 @@ struct xe_file {
/** @vm.xe: xarray to store VMs */
struct xarray xa;
/**
- * @vm.lock: Protects VM lookup + reference and removal a from
+ * @vm.lock: Protects VM lookup + reference and removal from
* file xarray. Not an intended to be an outer lock which does
* thing while being held.
*/
@@ -601,10 +619,15 @@ struct xe_file {
struct xarray xa;
/**
* @exec_queue.lock: Protects exec queue lookup + reference and
- * removal a frommfile xarray. Not an intended to be an outer
- * lock which does thing while being held.
+ * removal from file xarray. Not intended to be an outer lock
+ * which does things while being held.
*/
struct mutex lock;
+ /**
+ * @exec_queue.pending_removal: items pending to be removed to
+ * synchronize GPU state update with ongoing query.
+ */
+ atomic_t pending_removal;
} exec_queue;
/** @run_ticks: hw engine class run time in ticks for this drm client */
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 22f0f1a6dfd5..63f30b6df70b 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -261,6 +261,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
if (man) {
drm_print_memory_stats(p,
&stats[mem_type],
+ DRM_GEM_OBJECT_ACTIVE |
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
@@ -269,6 +270,49 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
}
}
+static struct xe_hw_engine *any_engine(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned long gt_id;
+
+ for_each_gt(gt, xe, gt_id) {
+ struct xe_hw_engine *hwe = xe_gt_any_hw_engine(gt);
+
+ if (hwe)
+ return hwe;
+ }
+
+ return NULL;
+}
+
+static bool force_wake_get_any_engine(struct xe_device *xe,
+ struct xe_hw_engine **phwe,
+ unsigned int *pfw_ref)
+{
+ enum xe_force_wake_domains domain;
+ unsigned int fw_ref;
+ struct xe_hw_engine *hwe;
+ struct xe_force_wake *fw;
+
+ hwe = any_engine(xe);
+ if (!hwe)
+ return false;
+
+ domain = xe_hw_engine_to_fw_domain(hwe);
+ fw = gt_to_fw(hwe->gt);
+
+ fw_ref = xe_force_wake_get(fw, domain);
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain)) {
+ xe_force_wake_put(fw, fw_ref);
+ return false;
+ }
+
+ *phwe = hwe;
+ *pfw_ref = fw_ref;
+
+ return true;
+}
+
static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
{
unsigned long class, i, gt_id, capacity[XE_ENGINE_CLASS_MAX] = { };
@@ -280,7 +324,18 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
u64 gpu_timestamp;
unsigned int fw_ref;
+ /*
+ * Wait for any exec queue going away: their cycles will get updated on
+ * context switch out, so wait for that to happen
+ */
+ wait_var_event(&xef->exec_queue.pending_removal,
+ !atomic_read(&xef->exec_queue.pending_removal));
+
xe_pm_runtime_get(xe);
+ if (!force_wake_get_any_engine(xe, &hwe, &fw_ref)) {
+ xe_pm_runtime_put(xe);
+ return;
+ }
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
@@ -295,33 +350,11 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
}
mutex_unlock(&xef->exec_queue.lock);
- /* Get the total GPU cycles */
- for_each_gt(gt, xe, gt_id) {
- enum xe_force_wake_domains fw;
-
- hwe = xe_gt_any_hw_engine(gt);
- if (!hwe)
- continue;
-
- fw = xe_hw_engine_to_fw_domain(hwe);
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), fw);
- if (!xe_force_wake_ref_has_domain(fw_ref, fw)) {
- hwe = NULL;
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- break;
- }
-
- gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- break;
- }
+ gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
+ xe_force_wake_put(gt_to_fw(hwe->gt), fw_ref);
xe_pm_runtime_put(xe);
- if (unlikely(!hwe))
- return;
-
for (class = 0; class < XE_ENGINE_CLASS_MAX; class++) {
const char *class_name;
@@ -352,7 +385,7 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
* @p: The drm_printer ptr
* @file: The drm_file ptr
*
- * This is callabck for drm fdinfo interface. Register this callback
+ * This is callback for drm fdinfo interface. Register this callback
* in drm driver ops for show_fdinfo.
*
* Return: void
diff --git a/drivers/gpu/drm/xe/xe_drv.h b/drivers/gpu/drm/xe/xe_drv.h
index d45b71426cc8..d61650d4aa0b 100644
--- a/drivers/gpu/drm/xe/xe_drv.h
+++ b/drivers/gpu/drm/xe/xe_drv.h
@@ -10,7 +10,6 @@
#define DRIVER_NAME "xe"
#define DRIVER_DESC "Intel Xe Graphics"
-#define DRIVER_DATE "20201103"
/* Interface history:
*
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 31cca938956f..df8ce550deb4 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -33,7 +33,7 @@
*
* In XE we avoid all of this complication by not allowing a BO list to be
* passed into an exec, using the dma-buf implicit sync uAPI, have binds as
- * seperate operations, and using the DRM scheduler to flow control the ring.
+ * separate operations, and using the DRM scheduler to flow control the ring.
* Let's deep dive on each of these.
*
* We can get away from a BO list by forcing the user to use in / out fences on
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 268cd3123be9..7e1abbbfba12 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -17,6 +17,7 @@
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_migrate.h"
@@ -69,6 +70,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->gt = gt;
q->class = hwe->class;
q->width = width;
+ q->msix_vec = XE_IRQ_DEFAULT_MSIX;
q->logical_mask = logical_mask;
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
@@ -118,7 +120,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
}
for (i = 0; i < q->width; ++i) {
- q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K);
+ q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec);
if (IS_ERR(q->lrc[i])) {
err = PTR_ERR(q->lrc[i]);
goto err_unlock;
@@ -241,6 +243,7 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
return q;
}
+ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
void xe_exec_queue_destroy(struct kref *ref)
{
@@ -263,8 +266,11 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
/*
* Before releasing our ref to lrc and xef, accumulate our run ticks
+ * and wakeup any waiters.
*/
xe_exec_queue_update_run_ticks(q);
+ if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
+ wake_up_var(&q->xef->exec_queue.pending_removal);
for (i = 0; i < q->width; ++i)
xe_lrc_put(q->lrc[i]);
@@ -764,25 +770,20 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
- struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
int idx;
/*
- * Jobs that are run during driver load may use an exec_queue, but are
- * not associated with a user xe file, so avoid accumulating busyness
- * for kernel specific work.
+ * Jobs that are executed by kernel doesn't have a corresponding xe_file
+ * and thus are not accounted.
*/
- if (!q->vm || !q->vm->xef)
+ if (!q->xef)
return;
/* Synchronize with unbind while holding the xe file open */
if (!drm_dev_enter(&xe->drm, &idx))
return;
-
- xef = q->vm->xef;
-
/*
* Only sample the first LRC. For parallel submission, all of them are
* scheduled together and we compensate that below by multiplying by
@@ -793,7 +794,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
*/
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
- xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
+ q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
drm_dev_exit(idx);
}
@@ -835,7 +836,10 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
mutex_lock(&xef->exec_queue.lock);
q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
+ if (q)
+ atomic_inc(&xef->exec_queue.pending_removal);
mutex_unlock(&xef->exec_queue.lock);
+
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 1158b6062a6c..5af5419cec7a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -41,7 +41,7 @@ struct xe_exec_queue {
/** @xef: Back pointer to xe file if this is user created exec queue */
struct xe_file *xef;
- /** @gt: graphics tile this exec queue can submit to */
+ /** @gt: GT structure this exec queue can submit to */
struct xe_gt *gt;
/**
* @hwe: A hardware of the same class. May (physical engine) or may not
@@ -63,6 +63,8 @@ struct xe_exec_queue {
char name[MAX_FENCE_NAME_LEN];
/** @width: width (number BB submitted per exec) of this exec queue */
u16 width;
+ /** @msix_vec: MSI-X vector (for platforms that support it) */
+ u16 msix_vec;
/** @fence_irq: fence IRQ used to signal job completion */
struct xe_hw_fence_irq *fence_irq;
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index a8c416a48812..5ef96deaa881 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -17,6 +17,7 @@
#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_hw_fence.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
@@ -47,6 +48,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
struct xe_mmio *mmio = &gt->mmio;
struct xe_device *xe = gt_to_xe(gt);
u64 lrc_desc;
+ u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
lrc_desc = xe_lrc_descriptor(lrc);
@@ -80,8 +82,10 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
xe_mmio_write32(mmio, RING_HWS_PGA(hwe->mmio_base),
xe_bo_ggtt_addr(hwe->hwsp));
xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base));
- xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+
+ if (xe_device_has_msix(gt_to_xe(hwe->gt)))
+ ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), ring_mode);
xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
lower_32_bits(lrc_desc));
@@ -265,7 +269,7 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
port->hwe = hwe;
- port->lrc = xe_lrc_create(hwe, NULL, SZ_16K);
+ port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX);
if (IS_ERR(port->lrc)) {
err = PTR_ERR(port->lrc);
goto err;
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 558fac8bb6fb..5fcb2b4c2c13 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -362,7 +362,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
/*
* So we don't need to worry about 64K GGTT layout when dealing with
- * scratch entires, rather keep the scratch page in system memory on
+ * scratch entries, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
flags = XE_BO_FLAG_PINNED;
@@ -598,10 +598,10 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
u64 start;
u64 offset, pte;
- if (XE_WARN_ON(!bo->ggtt_node))
+ if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id]))
return;
- start = bo->ggtt_node->base.start;
+ start = bo->ggtt_node[ggtt->tile->id]->base.start;
for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
@@ -612,15 +612,16 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
- int err;
u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
+ u8 tile_id = ggtt->tile->id;
+ int err;
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
alignment = SZ_64K;
- if (XE_WARN_ON(bo->ggtt_node)) {
+ if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
/* Someone's already inserted this BO in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
return 0;
}
@@ -630,19 +631,19 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
- bo->ggtt_node = xe_ggtt_node_init(ggtt);
- if (IS_ERR(bo->ggtt_node)) {
- err = PTR_ERR(bo->ggtt_node);
- bo->ggtt_node = NULL;
+ bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(bo->ggtt_node[tile_id])) {
+ err = PTR_ERR(bo->ggtt_node[tile_id]);
+ bo->ggtt_node[tile_id] = NULL;
goto out;
}
mutex_lock(&ggtt->lock);
- err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size,
- alignment, 0, start, end, 0);
+ err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
+ bo->size, alignment, 0, start, end, 0);
if (err) {
- xe_ggtt_node_fini(bo->ggtt_node);
- bo->ggtt_node = NULL;
+ xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
+ bo->ggtt_node[tile_id] = NULL;
} else {
xe_ggtt_map_bo(ggtt, bo);
}
@@ -691,13 +692,15 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
*/
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- if (XE_WARN_ON(!bo->ggtt_node))
+ u8 tile_id = ggtt->tile->id;
+
+ if (XE_WARN_ON(!bo->ggtt_node[tile_id]))
return;
/* This BO is not currently in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
- xe_ggtt_node_remove(bo->ggtt_node,
+ xe_ggtt_node_remove(bo->ggtt_node[tile_id],
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 64b2ae6839db..c250ea773491 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -71,8 +71,14 @@ static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
static inline
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
{
- return list_first_entry_or_null(&sched->base.pending_list,
- struct xe_sched_job, drm.list);
+ struct xe_sched_job *job;
+
+ spin_lock(&sched->base.job_list_lock);
+ job = list_first_entry_or_null(&sched->base.pending_list,
+ struct xe_sched_job, drm.list);
+ spin_unlock(&sched->base.job_list_lock);
+
+ return job;
}
static inline int
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index fc64b45d324b..24cc6a4f9a96 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -139,17 +139,29 @@ static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
return 0;
}
-static int validate_proxy_header(struct xe_gsc_proxy_header *header,
+static int validate_proxy_header(struct xe_gt *gt,
+ struct xe_gsc_proxy_header *header,
u32 source, u32 dest, u32 max_size)
{
u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
+ int ret = 0;
- if (header->destination != dest || header->source != source)
- return -ENOEXEC;
+ if (header->destination != dest || header->source != source) {
+ ret = -ENOEXEC;
+ goto out;
+ }
- if (length + PROXY_HDR_SIZE > max_size)
- return -E2BIG;
+ if (length + PROXY_HDR_SIZE > max_size) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* We only care about the status if this is a message for the driver */
+ if (dest == GSC_PROXY_ADDRESSING_KMD && header->status != 0) {
+ ret = -EIO;
+ goto out;
+ }
switch (type) {
case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
@@ -157,12 +169,20 @@ static int validate_proxy_header(struct xe_gsc_proxy_header *header,
break;
fallthrough;
case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
- return -EIO;
+ ret = -EIO;
+ break;
default:
break;
}
- return 0;
+out:
+ if (ret)
+ xe_gt_err(gt,
+ "GSC proxy error: s=0x%x[0x%x], d=0x%x[0x%x], t=%u, l=0x%x, st=0x%x\n",
+ header->source, source, header->destination, dest,
+ type, length, header->status);
+
+ return ret;
}
#define proxy_header_wr(xe_, map_, offset_, field_, val_) \
@@ -228,12 +248,17 @@ static int proxy_query(struct xe_gsc *gsc)
xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
reply_offset, PROXY_HDR_SIZE);
- /* stop if this was the last message */
- if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END)
+ /* Check the status and stop if this was the last message */
+ if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END) {
+ ret = validate_proxy_header(gt, to_csme_hdr,
+ GSC_PROXY_ADDRESSING_GSC,
+ GSC_PROXY_ADDRESSING_KMD,
+ GSC_PROXY_BUFFER_SIZE - reply_offset);
break;
+ }
/* make sure the GSC-to-CSME proxy header is sane */
- ret = validate_proxy_header(to_csme_hdr,
+ ret = validate_proxy_header(gt, to_csme_hdr,
GSC_PROXY_ADDRESSING_GSC,
GSC_PROXY_ADDRESSING_CSME,
GSC_PROXY_BUFFER_SIZE - reply_offset);
@@ -262,7 +287,7 @@ static int proxy_query(struct xe_gsc *gsc)
}
/* make sure the CSME-to-GSC proxy header is sane */
- ret = validate_proxy_header(gsc->proxy.from_csme,
+ ret = validate_proxy_header(gt, gsc->proxy.from_csme,
GSC_PROXY_ADDRESSING_CSME,
GSC_PROXY_ADDRESSING_GSC,
GSC_PROXY_BUFFER_SIZE - reply_offset);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 94d468d01253..26e64530ada2 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -748,10 +748,8 @@ static int do_gt_restart(struct xe_gt *gt)
if (err)
return err;
- for_each_hw_engine(hwe, gt, id) {
+ for_each_hw_engine(hwe, gt, id)
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
- xe_reg_sr_apply_whitelist(hwe);
- }
/* Get CCS mode in sync between sw/hw */
xe_gt_apply_ccs_mode(gt);
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 82b9b7f82fca..e504cc33ade4 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -37,7 +37,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt);
/**
* xe_gt_record_user_engines - save data related to engines available to
- * usersapce
+ * userspace
* @gt: GT structure
*
* Walk the available HW engines from gt->info.engine_mask and calculate data
@@ -57,6 +57,31 @@ int xe_gt_sanitize_freq(struct xe_gt *gt);
void xe_gt_remove(struct xe_gt *gt);
/**
+ * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the work to finish execution,
+ * %false if there was no scheduled reset or it was done.
+ */
+static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
+{
+ return flush_work(&gt->reset.worker);
+}
+
+/**
+ * xe_gt_reset - perform synchronous reset
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the reset to finish,
+ * %false if there was no scheduled reset.
+ */
+static inline bool xe_gt_reset(struct xe_gt *gt)
+{
+ xe_gt_reset_async(gt);
+ return xe_gt_wait_for_reset(gt);
+}
+
+/**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
* first that matches the same reset domain as @class
* @gt: GT structure
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index b6adfb9f2030..50fffc9ebf62 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -150,7 +150,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
gt->ccs_mode = num_engines;
xe_gt_record_user_engines(gt);
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
}
mutex_unlock(&xe->drm.filelist_mutex);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 3e8c351a0eab..e7792858b1e4 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -132,11 +132,9 @@ static int force_reset(struct xe_gt *gt, struct drm_printer *p)
static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
xe_pm_runtime_put(gt_to_xe(gt));
- flush_work(&gt->reset.worker);
-
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 6bd39b2c5003..604bdc7c8173 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -115,6 +115,20 @@ static ssize_t rpe_freq_show(struct device *dev,
}
static DEVICE_ATTR_RO(rpe_freq);
+static ssize_t rpa_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rpa_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return sysfs_emit(buf, "%d\n", freq);
+}
+static DEVICE_ATTR_RO(rpa_freq);
+
static ssize_t rpn_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -202,6 +216,7 @@ static const struct attribute *freq_attrs[] = {
&dev_attr_act_freq.attr,
&dev_attr_cur_freq.attr,
&dev_attr_rp0_freq.attr,
+ &dev_attr_rpa_freq.attr,
&dev_attr_rpe_freq.attr,
&dev_attr_rpn_freq.attr,
&dev_attr_min_freq.attr,
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 5013d674e17d..a1676b787fdc 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -371,7 +371,7 @@ void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group,
* @group: steering group ID
* @instance: steering instance ID
*
- * Return: the coverted DSS id.
+ * Return: the converted DSS id.
*/
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance)
{
@@ -550,9 +550,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
* Returns true if the caller should steer to the @group/@instance values
* returned. Returns false if the caller need not perform any steering
*/
-static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
- struct xe_reg_mcr reg_mcr,
- u8 *group, u8 *instance)
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance)
{
const struct xe_reg reg = to_xe_reg(reg_mcr);
const struct xe_mmio_range *implicit_ranges;
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index c0cd36021c24..bc06520befab 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -26,6 +26,10 @@ void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
u32 value);
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance);
+
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 79c426dc2505..2606cd396df5 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -10,7 +10,6 @@
#include <drm/drm_exec.h>
#include <drm/drm_managed.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
index 5dc71394372d..11da0228cea7 100644
--- a/drivers/gpu/drm/xe/xe_gt_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -60,6 +60,21 @@ static inline void __xe_gt_printfn_info(struct drm_printer *p, struct va_format
xe_gt_info(gt, "%pV", vaf);
}
+static inline void __xe_gt_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_gt *gt = p->arg;
+ struct drm_printer dbg;
+
+ /*
+ * The original xe_gt_dbg() callsite annotations are useless here,
+ * redirect to the tweaked drm_dbg_printer() instead.
+ */
+ dbg = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, NULL);
+ dbg.origin = p->origin;
+
+ drm_printf(&dbg, "GT%u: %pV", gt->info.id, vaf);
+}
+
/**
* xe_gt_err_printer - Construct a &drm_printer that outputs to xe_gt_err()
* @gt: the &xe_gt pointer to use in xe_gt_err()
@@ -90,4 +105,20 @@ static inline struct drm_printer xe_gt_info_printer(struct xe_gt *gt)
return p;
}
+/**
+ * xe_gt_dbg_printer - Construct a &drm_printer that outputs like xe_gt_dbg()
+ * @gt: the &xe_gt pointer to use in xe_gt_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_gt_dbg_printer(struct xe_gt *gt)
+{
+ struct drm_printer p = {
+ .printfn = __xe_gt_printfn_dbg,
+ .arg = gt,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index ca49860168f6..878e96281c03 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -207,6 +207,11 @@ static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u
return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
}
+static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
+}
+
static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
{
return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
@@ -1540,8 +1545,6 @@ static u64 pf_query_max_lmem(struct xe_gt *gt)
#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
#define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
-#else
-#define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */
#endif
static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
@@ -1767,6 +1770,77 @@ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfi
return preempt_timeout;
}
+static const char *sched_priority_unit(u32 priority)
+{
+ return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
+ priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
+ priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
+ "(?)";
+}
+
+static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int err;
+
+ err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
+ if (unlikely(err))
+ return err;
+
+ config->sched_priority = priority;
+ return 0;
+}
+
+static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->sched_priority;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @priority: requested scheduling priority
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_sched_priority(gt, vfid, priority);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, priority,
+ xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
+ "scheduling priority", sched_priority_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's) scheduling priority.
+ */
+u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 priority;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ priority = pf_get_sched_priority(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return priority;
+}
+
static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -2087,7 +2161,7 @@ bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
*
* This function can only be called on PF.
*
- * Return: mininum size of the buffer or the number of bytes saved,
+ * Return: minimum size of the buffer or the number of bytes saved,
* or a negative error code on failure.
*/
ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index 0c55aa40a1a7..f894e9d4abba 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -44,6 +44,9 @@ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfi
int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
u32 preempt_timeout);
+u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority);
+
u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
enum xe_guc_klv_threshold_index index);
int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
index 2d3b73d78f14..686c7b3b6d7a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
@@ -33,6 +33,8 @@ struct xe_gt_sriov_config {
u32 exec_quantum;
/** @preempt_timeout: preemption timeout in microseconds. */
u32 preempt_timeout;
+ /** @sched_priority: scheduling priority. */
+ u32 sched_priority;
/** @thresholds: GuC thresholds for adverse events notifications. */
u32 thresholds[XE_GUC_KLV_NUM_THRESHOLDS];
};
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index 05df4ab3514b..b2521dd6ec42 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -164,6 +164,7 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
* │   │   ├── contexts_spare
* │   │   ├── exec_quantum_ms
* │   │   ├── preempt_timeout_us
+ * │   │   ├── sched_priority
* │   ├── vf1
* │   │   ├── ggtt_quota
* │   │   ├── lmem_quota
@@ -171,6 +172,7 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
* │   │   ├── contexts_quota
* │   │   ├── exec_quantum_ms
* │   │   ├── preempt_timeout_us
+ * │   │   ├── sched_priority
*/
#define DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(CONFIG, TYPE, FORMAT) \
@@ -209,6 +211,7 @@ DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ctxs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(dbs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(exec_quantum, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(preempt_timeout, u32, "%llu\n");
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(sched_priority, u32, "%llu\n");
/*
* /sys/kernel/debug/dri/0/
@@ -295,6 +298,8 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
&exec_quantum_fops);
debugfs_create_file_unsafe("preempt_timeout_us", 0644, parent, parent,
&preempt_timeout_fops);
+ debugfs_create_file_unsafe("sched_priority", 0644, parent, parent,
+ &sched_priority_fops);
/* register all threshold attributes */
#define register_threshold_attribute(TAG, NAME, ...) \
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
index 0bf12d89ceb2..6af219d93c3b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
@@ -18,7 +18,7 @@
* is within a range of supported VF numbers (up to maximum number of VFs that
* driver can support, including VF0 that represents the PF itself).
*
- * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ * Note: Effective only on debug builds. See `Xe Asserts`_ for more information.
*/
#define xe_gt_sriov_pf_assert_vfid(gt, vfid) xe_sriov_pf_assert_vfid(gt_to_xe(gt), (vfid))
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
index fae5be5a2a11..c00fb354705f 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
@@ -135,14 +135,33 @@ static int pf_update_policy_u32(struct xe_gt *gt, u16 key, u32 *policy, u32 valu
return 0;
}
+static void pf_bulk_reset_sched_priority(struct xe_gt *gt, u32 priority)
+{
+ unsigned int total_vfs = 1 + xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int n;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 0; n < total_vfs; n++)
+ gt->sriov.pf.vfs[n].config.sched_priority = priority;
+}
+
static int pf_provision_sched_if_idle(struct xe_gt *gt, bool enable)
{
+ int err;
+
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
- return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
- &gt->sriov.pf.policy.guc.sched_if_idle,
- enable);
+ err = pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
+ &gt->sriov.pf.policy.guc.sched_if_idle,
+ enable);
+
+ if (!err)
+ pf_bulk_reset_sched_priority(gt, enable ? GUC_SCHED_PRIORITY_NORMAL :
+ GUC_SCHED_PRIORITY_LOW);
+ return err;
}
static int pf_reprovision_sched_if_idle(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index d3baba50f085..cca5d5732802 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -27,6 +27,7 @@
#include "xe_guc_relay.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
+#include "xe_sriov_vf.h"
#include "xe_uc_fw.h"
#include "xe_wopcm.h"
@@ -223,6 +224,44 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
return 0;
}
+static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
+{
+ u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE),
+ };
+ int ret;
+
+ ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+/**
+ * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
+ * @gt: the &xe_gt struct instance linked to target GuC
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
+{
+ struct xe_guc *guc = &gt->uc.guc;
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ err = guc_action_vf_notify_resfix_done(guc);
+ if (unlikely(err))
+ xe_gt_sriov_err(gt, "Failed to notify GuC about resource fixup done (%pe)\n",
+ ERR_PTR(err));
+ else
+ xe_gt_sriov_dbg_verbose(gt, "sent GuC resource fixup done\n");
+
+ return err;
+}
+
static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
u32 *value, u32 value_len)
{
@@ -692,6 +731,30 @@ failed:
return err;
}
+/**
+ * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
+ * or just mark that a GuC is ready for it.
+ * @gt: the &xe_gt struct instance linked to target GuC
+ *
+ * This function shall be called only by VF.
+ */
+void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+
+ set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
+ /*
+ * We need to be certain that if all flags were set, at least one
+ * thread will notice that and schedule the recovery.
+ */
+ smp_mb__after_atomic();
+
+ xe_gt_sriov_info(gt, "ready for recovery after migration\n");
+ xe_sriov_vf_start_migration_recovery(xe);
+}
+
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
{
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index e541ce57bec2..912d20814261 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -17,6 +17,8 @@ int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
+int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
+void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index c7364a5aef8f..7a6c1d808e41 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -12,7 +12,7 @@
/**
* xe_gt_stats_incr - Increments the specified stats counter
- * @gt: graphics tile
+ * @gt: GT structure
* @id: xe_gt_stats_id type id that needs to be incremented
* @incr: value to be incremented with
*
@@ -32,7 +32,7 @@ static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
/**
* xe_gt_stats_print_info - Print the GT stats
- * @gt: graphics tile
+ * @gt: GT structure
* @p: drm_printer where it will be printed out.
*
* This prints out all the available GT stats.
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h
index 91d944f6c4e4..38325ef53617 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats.h
@@ -6,15 +6,11 @@
#ifndef _XE_GT_STATS_H_
#define _XE_GT_STATS_H_
+#include "xe_gt_stats_types.h"
+
struct xe_gt;
struct drm_printer;
-enum xe_gt_stats_id {
- XE_GT_STATS_ID_TLB_INVAL,
- /* must be the last entry */
- __XE_GT_STATS_NUM_IDS,
-};
-
#ifdef CONFIG_DEBUG_FS
int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
new file mode 100644
index 000000000000..2fc055e39f27
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_STATS_TYPES_H_
+#define _XE_GT_STATS_TYPES_H_
+
+enum xe_gt_stats_id {
+ XE_GT_STATS_ID_TLB_INVAL,
+ /* must be the last entry */
+ __XE_GT_STATS_NUM_IDS,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 03b225364101..8db78d616b6f 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -8,6 +8,7 @@
#include <regs/xe_gt_regs.h>
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle.h"
#include "xe_mmio.h"
@@ -53,6 +54,7 @@ static u32 read_status(struct xe_gt *gt)
{
u32 status = xe_gt_throttle_get_limit_reasons(gt) & GT0_PERF_LIMIT_REASONS_MASK;
+ xe_gt_dbg(gt, "throttle reasons: 0x%08x\n", status);
return status;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 0a0af413770e..0a93831c0a02 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -107,7 +107,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
/**
* xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
- * @gt: graphics tile
+ * @gt: GT structure
*
* Initialize GT TLB invalidation state, purely software initialization, should
* be called once during driver load.
@@ -128,7 +128,7 @@ int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
/**
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
- * @gt: graphics tile
+ * @gt: GT structure
*
* Signal any pending invalidation fences, should be called during a GT reset
*/
@@ -244,7 +244,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
/**
* xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion
*
@@ -261,14 +261,23 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
0, /* seqno, replaced in send_tlb_invalidation */
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
};
+ int ret;
+
+ ret = send_tlb_invalidation(&gt->uc.guc, fence, action,
+ ARRAY_SIZE(action));
+ /*
+ * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
+ * should be nuked on a GT reset so this error can be ignored.
+ */
+ if (ret == -ECANCELED)
+ return 0;
- return send_tlb_invalidation(&gt->uc.guc, fence, action,
- ARRAY_SIZE(action));
+ return ret;
}
/**
* xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
- * @gt: graphics tile
+ * @gt: GT structure
*
* Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
* synchronous.
@@ -317,7 +326,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
* xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
* address range
*
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion
* @start: start address
@@ -403,7 +412,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
/**
* xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion, can be NULL
* @vma: VMA to invalidate
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index a287b98ee70b..6e66bf0e8b3f 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -11,10 +11,10 @@
#include "xe_gt_idle_types.h"
#include "xe_gt_sriov_pf_types.h"
#include "xe_gt_sriov_vf_types.h"
-#include "xe_gt_stats.h"
+#include "xe_gt_stats_types.h"
#include "xe_hw_engine_types.h"
#include "xe_hw_fence_types.h"
-#include "xe_oa.h"
+#include "xe_oa_types.h"
#include "xe_reg_sr_types.h"
#include "xe_sa_types.h"
#include "xe_uc_types.h"
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 7f704346a8f4..408365dfe4ee 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -44,7 +44,15 @@ static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
struct xe_bo *bo)
{
struct xe_device *xe = guc_to_xe(guc);
- u32 addr = xe_bo_ggtt_addr(bo);
+ u32 addr;
+
+ /*
+ * For most BOs, the address on the allocating tile is fine. However for
+ * some, e.g. G2G CTB, the address on a specific tile is required as it
+ * might be different for each tile. So, just always ask for the address
+ * on the target GuC.
+ */
+ addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id);
/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
@@ -139,6 +147,34 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc)
return flags;
}
+static bool needs_wa_dual_queue(struct xe_gt *gt)
+{
+ /*
+ * The DUAL_QUEUE_WA tells the GuC to not allow concurrent submissions
+ * on RCS and CCSes with different address spaces, which on DG2 is
+ * required as a WA for an HW bug.
+ */
+ if (XE_WA(gt, 22011391025))
+ return true;
+
+ /*
+ * On newer platforms, the HW has been updated to not allow parallel
+ * execution of different address spaces, so the RCS/CCS will stall the
+ * context switch if one of the other RCS/CCSes is busy with a different
+ * address space. While functionally correct, having a submission
+ * stalled on the HW limits the GuC ability to shuffle things around and
+ * can cause complications if the non-stalled submission runs for a long
+ * time, because the GuC doesn't know that the stalled submission isn't
+ * actually running and might declare it as hung. Therefore, we enable
+ * the DUAL_QUEUE_WA on all newer platforms on GTs that have CCS engines
+ * to move management back to the GuC.
+ */
+ if (CCS_MASK(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
+ return true;
+
+ return false;
+}
+
static u32 guc_ctl_wa_flags(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
@@ -151,7 +187,7 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (XE_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
- if (XE_WA(gt, 22011391025))
+ if (needs_wa_dual_queue(gt))
flags |= GUC_WA_DUAL_QUEUE;
/*
@@ -244,6 +280,293 @@ static void guc_write_params(struct xe_guc *guc)
xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(1 + i), guc->params[i]);
}
+static int guc_action_register_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev,
+ u32 desc_addr, u32 buff_addr, u32 size)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 action[] = {
+ XE_GUC_ACTION_REGISTER_G2G,
+ FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) |
+ FIELD_PREP(XE_G2G_REGISTER_TYPE, type) |
+ FIELD_PREP(XE_G2G_REGISTER_TILE, dst_tile) |
+ FIELD_PREP(XE_G2G_REGISTER_DEVICE, dst_dev),
+ desc_addr,
+ buff_addr,
+ };
+
+ xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
+ xe_assert(xe, !(size % SZ_4K));
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static int guc_action_deregister_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 action[] = {
+ XE_GUC_ACTION_DEREGISTER_G2G,
+ FIELD_PREP(XE_G2G_DEREGISTER_TYPE, type) |
+ FIELD_PREP(XE_G2G_DEREGISTER_TILE, dst_tile) |
+ FIELD_PREP(XE_G2G_DEREGISTER_DEVICE, dst_dev),
+ };
+
+ xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+#define G2G_DEV(gt) (((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1)
+
+#define G2G_BUFFER_SIZE (SZ_4K)
+#define G2G_DESC_SIZE (64)
+#define G2G_DESC_AREA_SIZE (SZ_4K)
+
+/*
+ * Generate a unique id for each bi-directional CTB for each pair of
+ * near and far tiles/devices. The id can then be used as an index into
+ * a single allocation that is sub-divided into multiple CTBs.
+ *
+ * For example, with two devices per tile and two tiles, the table should
+ * look like:
+ * Far <tile>.<dev>
+ * 0.0 0.1 1.0 1.1
+ * N 0.0 --/-- 00/01 02/03 04/05
+ * e 0.1 01/00 --/-- 06/07 08/09
+ * a 1.0 03/02 07/06 --/-- 10/11
+ * r 1.1 05/04 09/08 11/10 --/--
+ *
+ * Where each entry is Rx/Tx channel id.
+ *
+ * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
+ * be reading from channel #11 and writing to channel #10. Whereas,
+ * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
+ */
+static unsigned int g2g_slot(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
+ u32 type, u32 max_inst, bool have_dev)
+{
+ u32 near = near_tile, far = far_tile;
+ u32 idx = 0, x, y, direction;
+ int i;
+
+ if (have_dev) {
+ near = (near << 1) | near_dev;
+ far = (far << 1) | far_dev;
+ }
+
+ /* No need to send to one's self */
+ if (far == near)
+ return -1;
+
+ if (far > near) {
+ /* Top right table half */
+ x = far;
+ y = near;
+
+ /* T/R is 'forwards' direction */
+ direction = type;
+ } else {
+ /* Bottom left table half */
+ x = near;
+ y = far;
+
+ /* B/L is 'backwards' direction */
+ direction = (1 - type);
+ }
+
+ /* Count the rows prior to the target */
+ for (i = y; i > 0; i--)
+ idx += max_inst - i;
+
+ /* Count this row up to the target */
+ idx += (x - 1 - y);
+
+ /* Slots are in Rx/Tx pairs */
+ idx *= 2;
+
+ /* Pick Rx/Tx direction */
+ idx += direction;
+
+ return idx;
+}
+
+static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 type, bool have_dev)
+{
+ struct xe_gt *near_gt = guc_to_gt(near_guc);
+ struct xe_device *xe = gt_to_xe(near_gt);
+ struct xe_bo *g2g_bo;
+ u32 near_tile = gt_to_tile(near_gt)->id;
+ u32 near_dev = G2G_DEV(near_gt);
+ u32 far_tile = gt_to_tile(far_gt)->id;
+ u32 far_dev = G2G_DEV(far_gt);
+ u32 max = xe->info.gt_count;
+ u32 base, desc, buf;
+ int slot;
+
+ /* G2G is not allowed between different cards */
+ xe_assert(xe, xe == gt_to_xe(far_gt));
+
+ g2g_bo = near_guc->g2g.bo;
+ xe_assert(xe, g2g_bo);
+
+ slot = g2g_slot(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
+ xe_assert(xe, slot >= 0);
+
+ base = guc_bo_ggtt_addr(near_guc, g2g_bo);
+ desc = base + slot * G2G_DESC_SIZE;
+ buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
+
+ xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size);
+
+ return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
+ desc, buf, G2G_BUFFER_SIZE);
+}
+
+static void guc_g2g_deregister(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type)
+{
+ guc_action_deregister_g2g_buffer(guc, type, far_tile, far_dev);
+}
+
+static u32 guc_g2g_size(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int count = xe->info.gt_count;
+ u32 num_channels = (count * (count - 1)) / 2;
+
+ xe_assert(xe, num_channels * XE_G2G_TYPE_LIMIT * G2G_DESC_SIZE <= G2G_DESC_AREA_SIZE);
+
+ return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+}
+
+static bool xe_guc_g2g_wanted(struct xe_device *xe)
+{
+ /* Can't do GuC to GuC communication if there is only one GuC */
+ if (xe->info.gt_count <= 1)
+ return false;
+
+ /* No current user */
+ return false;
+}
+
+static int guc_g2g_alloc(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ if (guc->g2g.bo)
+ return 0;
+
+ if (gt->info.id != 0) {
+ struct xe_gt *root_gt = xe_device_get_gt(xe, 0);
+ struct xe_guc *root_guc = &root_gt->uc.guc;
+ struct xe_bo *bo;
+
+ bo = xe_bo_get(root_guc->g2g.bo);
+ if (!bo)
+ return -ENODEV;
+
+ guc->g2g.bo = bo;
+ guc->g2g.owned = false;
+ return 0;
+ }
+
+ g2g_size = guc_g2g_size(guc);
+ bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+ guc->g2g.bo = bo;
+ guc->g2g.owned = true;
+
+ return 0;
+}
+
+static void guc_g2g_fini(struct xe_guc *guc)
+{
+ if (!guc->g2g.bo)
+ return;
+
+ /* Unpinning the owned object is handled by generic shutdown */
+ if (!guc->g2g.owned)
+ xe_bo_put(guc->g2g.bo);
+
+ guc->g2g.bo = NULL;
+}
+
+static int guc_g2g_start(struct xe_guc *guc)
+{
+ struct xe_gt *far_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int i, j;
+ int t, err;
+ bool have_dev;
+
+ if (!guc->g2g.bo) {
+ int ret;
+
+ ret = guc_g2g_alloc(guc);
+ if (ret)
+ return ret;
+ }
+
+ /* GuC interface will need extending if more GT device types are ever created. */
+ xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA));
+
+ /* Channel numbering depends on whether there are multiple GTs per tile */
+ have_dev = xe->info.gt_count > xe->info.tile_count;
+
+ for_each_gt(far_gt, xe, i) {
+ u32 far_tile, far_dev;
+
+ if (far_gt->info.id == gt->info.id)
+ continue;
+
+ far_tile = gt_to_tile(far_gt)->id;
+ far_dev = G2G_DEV(far_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
+ err = guc_g2g_register(guc, far_gt, t, have_dev);
+ if (err) {
+ while (--t >= 0)
+ guc_g2g_deregister(guc, far_tile, far_dev, t);
+ goto err_deregister;
+ }
+ }
+ }
+
+ return 0;
+
+err_deregister:
+ for_each_gt(far_gt, xe, j) {
+ u32 tile, dev;
+
+ if (far_gt->info.id == gt->info.id)
+ continue;
+
+ if (j >= i)
+ break;
+
+ tile = gt_to_tile(far_gt)->id;
+ dev = G2G_DEV(far_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
+ guc_g2g_deregister(guc, tile, dev, t);
+ }
+
+ return err;
+}
+
static void guc_fini_hw(void *arg)
{
struct xe_guc *guc = arg;
@@ -253,6 +576,8 @@ static void guc_fini_hw(void *arg)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_uc_fini_hw(&guc_to_gt(guc)->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ guc_g2g_fini(guc);
}
/**
@@ -423,7 +748,16 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
int xe_guc_post_load_init(struct xe_guc *guc)
{
+ int ret;
+
xe_guc_ads_populate_post_load(&guc->ads);
+
+ if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
+ ret = guc_g2g_start(guc);
+ if (ret)
+ return ret;
+ }
+
guc->submission_state.enabled = true;
return 0;
@@ -945,7 +1279,6 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
- xe_assert(xe, !xe_guc_ct_enabled(&guc->ct));
xe_assert(xe, len);
xe_assert(xe, len <= VF_SW_FLAG_COUNT);
xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
@@ -1099,10 +1432,21 @@ int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
return guc_self_cfg(guc, key, 2, val);
}
+static void xe_guc_sw_0_irq_handler(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ xe_gt_sriov_vf_migrated_event_handler(gt);
+}
+
void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
{
if (iir & GUC_INTR_GUC2HOST)
xe_guc_ct_irq_handler(&guc->ct);
+
+ if (iir & GUC_INTR_SW_INT_0)
+ xe_guc_sw_0_irq_handler(guc);
}
void xe_guc_sanitize(struct xe_guc *guc)
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 4e746ae98888..fab259adc380 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -29,6 +29,7 @@
#include "xe_platform_types.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
+#include "xe_gt_mcr.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -231,11 +232,6 @@ static size_t guc_ads_size(struct xe_guc_ads *ads)
guc_ads_private_data_size(ads);
}
-static bool needs_wa_1607983814(struct xe_device *xe)
-{
- return GRAPHICS_VERx100(xe) < 1250;
-}
-
static size_t calculate_regset_size(struct xe_gt *gt)
{
struct xe_reg_sr_entry *sr_entry;
@@ -250,7 +246,7 @@ static size_t calculate_regset_size(struct xe_gt *gt)
count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
- if (needs_wa_1607983814(gt_to_xe(gt)))
+ if (XE_WA(gt, 1607983814))
count += LNCFCMOCS_REG_COUNT;
return count * sizeof(struct guc_mmio_reg);
@@ -701,6 +697,20 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
.flags = reg.masked ? GUC_REGSET_MASKED : 0,
};
+ if (reg.mcr) {
+ struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
+ u8 group, instance;
+
+ bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
+ &group, &instance);
+
+ if (steer) {
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
+ entry.flags |= GUC_REGSET_STEERING_NEEDED;
+ }
+ }
+
xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
&entry, sizeof(entry));
}
@@ -709,7 +719,6 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
struct iosys_map *regset_map,
struct xe_hw_engine *hwe)
{
- struct xe_device *xe = ads_to_xe(ads);
struct xe_hw_engine *hwe_rcs_reset_domain =
xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
struct xe_reg_sr_entry *entry;
@@ -740,8 +749,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
}
- /* Wa_1607983814 */
- if (needs_wa_1607983814(xe) && hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
guc_mmio_regset_write_one(ads, regset_map,
XELP_LNCFCMOCS(i), count++);
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index d63912d28246..f6d523e4c5fe 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -1806,7 +1806,6 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
if (!devcore_snapshot->matched_node)
return;
- xe_gt_assert(gt, snapshot->source <= XE_ENGINE_CAPTURE_SOURCE_GUC);
xe_gt_assert(gt, snapshot->hwe);
capture_class = xe_engine_class_to_guc_capture_class(snapshot->hwe->class);
@@ -1815,7 +1814,8 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
snapshot->name ? snapshot->name : "",
snapshot->logical_instance);
drm_printf(p, "\tCapture_source: %s\n",
- snapshot->source == XE_ENGINE_CAPTURE_SOURCE_GUC ? "GuC" : "Manual");
+ devcore_snapshot->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC ?
+ "GuC" : "Manual");
drm_printf(p, "\tCoverage: %s\n", grptype[devcore_snapshot->matched_node->is_partial]);
drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
snapshot->forcewake.domain, snapshot->forcewake.ref);
@@ -1840,29 +1840,24 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
}
/**
- * xe_guc_capture_get_matching_and_lock - Matching GuC capture for the job.
- * @job: The job object.
+ * xe_guc_capture_get_matching_and_lock - Matching GuC capture for the queue.
+ * @q: The exec queue object
*
- * Search within the capture outlist for the job, could be used for check if
- * GuC capture is ready for the job.
+ * Search within the capture outlist for the queue, could be used for check if
+ * GuC capture is ready for the queue.
* If found, the locked boolean of the node will be flagged.
*
* Returns: found guc-capture node ptr else NULL
*/
struct __guc_capture_parsed_output *
-xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job)
+xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q)
{
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
- struct xe_exec_queue *q;
struct xe_device *xe;
u16 guc_class = GUC_LAST_ENGINE_CLASS + 1;
struct xe_devcoredump_snapshot *ss;
- if (!job)
- return NULL;
-
- q = job->q;
if (!q || !q->gt)
return NULL;
@@ -1874,7 +1869,7 @@ xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job)
if (ss->matched_node && ss->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC)
return ss->matched_node;
- /* Find hwe for the job */
+ /* Find hwe for the queue */
for_each_hw_engine(hwe, q->gt, id) {
if (hwe != q->hwe)
continue;
@@ -1906,17 +1901,16 @@ xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job)
}
/**
- * xe_engine_snapshot_capture_for_job - Take snapshot of associated engine
- * @job: The job object
+ * xe_engine_snapshot_capture_for_queue - Take snapshot of associated engine
+ * @q: The exec queue object
*
* Take snapshot of associated HW Engine
*
* Returns: None.
*/
void
-xe_engine_snapshot_capture_for_job(struct xe_sched_job *job)
+xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q)
{
- struct xe_exec_queue *q = job->q;
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
struct xe_hw_engine *hwe;
@@ -1934,11 +1928,12 @@ xe_engine_snapshot_capture_for_job(struct xe_sched_job *job)
}
if (!coredump->snapshot.hwe[id]) {
- coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe, job);
+ coredump->snapshot.hwe[id] =
+ xe_hw_engine_snapshot_capture(hwe, q);
} else {
struct __guc_capture_parsed_output *new;
- new = xe_guc_capture_get_matching_and_lock(job);
+ new = xe_guc_capture_get_matching_and_lock(q);
if (new) {
struct xe_guc *guc = &q->gt->uc.guc;
@@ -1960,7 +1955,7 @@ xe_engine_snapshot_capture_for_job(struct xe_sched_job *job)
}
/*
- * xe_guc_capture_put_matched_nodes - Cleanup macthed nodes
+ * xe_guc_capture_put_matched_nodes - Cleanup matched nodes
* @guc: The GuC object
*
* Free matched node and all nodes with the equal guc_id from
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.h b/drivers/gpu/drm/xe/xe_guc_capture.h
index 97a795d13dd1..20a078dc4b85 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.h
+++ b/drivers/gpu/drm/xe/xe_guc_capture.h
@@ -11,10 +11,10 @@
#include "xe_guc.h"
#include "xe_guc_fwif.h"
+struct xe_exec_queue;
struct xe_guc;
struct xe_hw_engine;
struct xe_hw_engine_snapshot;
-struct xe_sched_job;
static inline enum guc_capture_list_class_type xe_guc_class_to_capture_class(u16 class)
{
@@ -50,10 +50,10 @@ size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc);
const struct __guc_mmio_reg_descr_group *
xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type,
enum guc_capture_list_class_type capture_class, bool is_ext);
-struct __guc_capture_parsed_output *xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job);
+struct __guc_capture_parsed_output *xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q);
void xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot);
void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p);
-void xe_engine_snapshot_capture_for_job(struct xe_sched_job *job);
+void xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q);
void xe_guc_capture_steered_list_init(struct xe_guc *guc);
void xe_guc_capture_put_matched_nodes(struct xe_guc *guc);
int xe_guc_capture_init(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h
index 2057125b1bfa..ca2d390ccbee 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h
@@ -22,7 +22,7 @@ enum capture_register_data_type {
* struct __guc_mmio_reg_descr - GuC mmio register descriptor
*
* xe_guc_capture module uses these structures to define a register
- * (offsets, names, flags,...) that are used at the ADS regisration
+ * (offsets, names, flags,...) that are used at the ADS registration
* time as well as during runtime processing and reporting of error-
* capture states generated by GuC just prior to engine reset events.
*/
@@ -48,7 +48,7 @@ struct __guc_mmio_reg_descr {
*
* xe_guc_capture module uses these structures to maintain static
* tables (per unique platform) that consists of lists of registers
- * (offsets, names, flags,...) that are used at the ADS regisration
+ * (offsets, names, flags,...) that are used at the ADS registration
* time as well as during runtime processing and reporting of error-
* capture states generated by GuC just prior to engine reset events.
*/
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 8aeb1789805c..8b65c5e959cc 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -54,6 +54,7 @@ enum {
CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
+ CT_DEAD_CRASH, /* 0x8000 */
};
static void ct_dead_worker_func(struct work_struct *w);
@@ -469,8 +470,10 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
* after any existing dead state has been dumped.
*/
spin_lock_irq(&ct->dead.lock);
- if (ct->dead.reason)
+ if (ct->dead.reason) {
ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
+ queue_work(system_unbound_wq, &ct->dead.worker);
+ }
spin_unlock_irq(&ct->dead.lock);
#endif
@@ -707,7 +710,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
--len;
++action;
- /* Write H2G ensuring visable before descriptor update */
+ /* Write H2G ensuring visible before descriptor update */
xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
xe_device_wmb(xe);
@@ -1017,7 +1020,6 @@ retry_same_fence:
}
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
-
if (!ret) {
LNL_FLUSH_WORK(&ct->g2h_worker);
if (g2h_fence.done) {
@@ -1121,6 +1123,24 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
return 0;
}
+static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+
+ if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
+ xe_gt_err(gt, "GuC Crash dump notification\n");
+ else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
+ xe_gt_err(gt, "GuC Exception notification\n");
+ else
+ xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
+
+ CT_DEAD(ct, NULL, CRASH);
+
+ kick_reset(ct);
+
+ return 0;
+}
+
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
@@ -1295,13 +1315,17 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
break;
+ case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
+ case XE_GUC_ACTION_NOTIFY_EXCEPTION:
+ ret = guc_crash_process_msg(ct, action);
+ break;
default:
xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
if (ret) {
- xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
- action, ERR_PTR(ret));
+ xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
+ action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
CT_DEAD(ct, NULL, PROCESS_FAILED);
}
@@ -1359,7 +1383,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
* this function and nowhere else. Hence, they cannot be different
* unless two g2h_read calls are running concurrently. Which is not
* possible because it is guarded by ct->fast_lock. And yet, some
- * discrete platforms are reguarly hitting this error :(.
+ * discrete platforms are regularly hitting this error :(.
*
* desc_head rolling backwards shouldn't cause any noticeable
* problems - just a delay in GuC being allowed to proceed past that
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 08ffe59f22fa..057153f89b30 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -17,6 +17,7 @@
#define G2H_LEN_DW_TLB_INVALIDATE 3
#define GUC_ID_MAX 65535
+#define GUC_ID_UNKNOWN 0xffffffff
#define GUC_CONTEXT_DISABLE 0
#define GUC_CONTEXT_ENABLE 1
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
index 9d99fe266d97..146a6eda9e06 100644
--- a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
+++ b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
@@ -49,6 +49,8 @@ const char *xe_guc_klv_key_to_string(u16 key)
return "begin_db_id";
case GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY:
return "begin_ctx_id";
+ case GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY:
+ return "sched_priority";
/* VF CFG threshold keys */
#define define_threshold_key_to_string_case(TAG, NAME, ...) \
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index e8b9faeaef64..df7f130fb663 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -38,6 +38,7 @@
#define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
#define RPE_MASK REG_GENMASK(15, 8)
+#define RPA_MASK REG_GENMASK(31, 16)
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define CAGF_MASK REG_GENMASK(19, 11)
@@ -328,6 +329,19 @@ static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
freq);
}
+static void mtl_update_rpa_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg;
+
+ if (xe_gt_is_media_type(gt))
+ reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
+ else
+ reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
+
+ pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
+}
+
static void mtl_update_rpe_value(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
@@ -341,6 +355,25 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc)
pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
}
+static void tgl_update_rpa_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 reg;
+
+ /*
+ * For PVC we still need to use fused RP1 as the approximation for RPe
+ * For other platforms than PVC we get the resolved RPe directly from
+ * PCODE at a different register
+ */
+ if (xe->info.platform == XE_PVC)
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
+ else
+ reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
+
+ pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+}
+
static void tgl_update_rpe_value(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
@@ -365,10 +398,13 @@ static void pc_update_rp_values(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
struct xe_device *xe = gt_to_xe(gt);
- if (GRAPHICS_VERx100(xe) >= 1270)
+ if (GRAPHICS_VERx100(xe) >= 1270) {
+ mtl_update_rpa_value(pc);
mtl_update_rpe_value(pc);
- else
+ } else {
+ tgl_update_rpa_value(pc);
tgl_update_rpe_value(pc);
+ }
/*
* RPe is decided at runtime by PCODE. In the rare case where that's
@@ -421,8 +457,8 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
* GuC SLPC plays with cur freq request when GuCRC is enabled
* Block RC6 for a more reliable read.
*/
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return -ETIMEDOUT;
}
@@ -448,6 +484,19 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
}
/**
+ * xe_guc_pc_get_rpa_freq - Get the RPa freq
+ * @pc: The GuC PC
+ *
+ * Returns: RPa freq.
+ */
+u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
+{
+ pc_update_rp_values(pc);
+
+ return pc->rpa_freq;
+}
+
+/**
* xe_guc_pc_get_rpe_freq - Get the RPe freq
* @pc: The GuC PC
*
@@ -481,10 +530,10 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
*/
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
{
- struct xe_gt *gt = pc_to_gt(pc);
- unsigned int fw_ref;
int ret;
+ xe_device_assert_mem_access(pc_to_xe(pc));
+
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -492,24 +541,12 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
goto out;
}
- /*
- * GuC SLPC plays with min freq request when GuCRC is enabled
- * Block RC6 for a more reliable read.
- */
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- ret = -ETIMEDOUT;
- goto fw;
- }
-
ret = pc_action_query_task_state(pc);
if (ret)
- goto fw;
+ goto out;
*freq = pc_get_min_freq(pc);
-fw:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
out:
mutex_unlock(&pc->freq_lock);
return ret;
@@ -969,8 +1006,8 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
xe_gt_assert(gt, xe_device_uc_enabled(xe));
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index efda432fadfc..619f59cd633c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -21,6 +21,7 @@ int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
+u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
index 13810be015db..2978ac9a249b 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
@@ -17,6 +17,8 @@ struct xe_guc_pc {
struct xe_bo *bo;
/** @rp0_freq: HW RP0 frequency - The Maximum one */
u32 rp0_freq;
+ /** @rpa_freq: HW RPa frequency - The Achievable one */
+ u32 rpa_freq;
/** @rpe_freq: HW RPe frequency - The Efficient one */
u32 rpe_freq;
/** @rpn_freq: HW RPN frequency - The Minimum one */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 6f4a9812b4f4..913c74d6e2ae 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -412,12 +412,11 @@ static const int xe_exec_queue_prio_to_guc[] = {
static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{
struct exec_queue_policy policy;
- struct xe_device *xe = guc_to_xe(guc);
enum xe_exec_queue_priority prio = q->sched_props.priority;
u32 timeslice_us = q->sched_props.timeslice_us;
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
- xe_assert(xe, exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
__guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
@@ -451,12 +450,11 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
struct guc_ctxt_registration_info *info)
{
#define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
- struct xe_device *xe = guc_to_xe(guc);
u32 action[MAX_MLRC_REG_SIZE];
int len = 0;
int i;
- xe_assert(xe, xe_exec_queue_is_parallel(q));
+ xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q));
action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
action[len++] = info->flags;
@@ -479,7 +477,7 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
}
- xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
+ xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE
xe_guc_ct_send(&guc->ct, action, len, 0, 0);
@@ -513,7 +511,7 @@ static void register_exec_queue(struct xe_exec_queue *q)
struct xe_lrc *lrc = q->lrc[0];
struct guc_ctxt_registration_info info;
- xe_assert(xe, !exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
memset(&info, 0, sizeof(info));
info.context_idx = q->guc->id;
@@ -603,7 +601,7 @@ static int wq_noop_append(struct xe_exec_queue *q)
if (wq_wait_for_space(q, wq_space_until_wrap(q)))
return -ENODEV;
- xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
+ xe_gt_assert(guc_to_gt(guc), FIELD_FIT(WQ_LEN_MASK, len_dw));
parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
@@ -643,13 +641,13 @@ static void wq_item_append(struct xe_exec_queue *q)
wqi[i++] = lrc->ring.tail / sizeof(u64);
}
- xe_assert(xe, i == wqi_size / sizeof(u32));
+ xe_gt_assert(guc_to_gt(guc), i == wqi_size / sizeof(u32));
iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
wq[q->guc->wqi_tail / sizeof(u32)]));
xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
q->guc->wqi_tail += wqi_size;
- xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
+ xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE);
xe_device_wmb(xe);
@@ -661,7 +659,6 @@ static void wq_item_append(struct xe_exec_queue *q)
static void submit_exec_queue(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_lrc *lrc = q->lrc[0];
u32 action[3];
u32 g2h_len = 0;
@@ -669,7 +666,7 @@ static void submit_exec_queue(struct xe_exec_queue *q)
int len = 0;
bool extra_submit = false;
- xe_assert(xe, exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
if (xe_exec_queue_is_parallel(q))
wq_item_append(q);
@@ -716,12 +713,11 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
struct xe_sched_job *job = to_xe_sched_job(drm_job);
struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct dma_fence *fence = NULL;
bool lr = xe_exec_queue_is_lr(q);
- xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
- exec_queue_banned(q) || exec_queue_suspended(q));
+ xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
+ exec_queue_banned(q) || exec_queue_suspended(q));
trace_xe_sched_job_run(job);
@@ -823,7 +819,7 @@ static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
*/
void xe_guc_submit_wedge(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
unsigned long index;
int err;
@@ -833,7 +829,8 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc);
if (err) {
- drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
+ xe_gt_err(gt, "Failed to register clean-up on wedged.mode=2; "
+ "Although device is wedged.\n");
return;
}
@@ -865,11 +862,10 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
container_of(w, struct xe_guc_exec_queue, lr_tdr);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_gpu_scheduler *sched = &ge->sched;
bool wedged;
- xe_assert(xe, xe_exec_queue_is_lr(q));
+ xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
trace_xe_exec_queue_lr_cleanup(q);
wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
@@ -903,13 +899,19 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
!exec_queue_pending_disable(q) ||
xe_guc_read_stopped(guc), HZ * 5);
if (!ret) {
- drm_warn(&xe->drm, "Schedule disable failed to respond");
+ xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
+ q->guc->id);
+ xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n",
+ q->guc->id);
xe_sched_submission_start(sched);
xe_gt_reset_async(q->gt);
return;
}
}
+ if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
+ xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
+
xe_sched_submission_start(sched);
}
@@ -1068,13 +1070,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* do manual capture first and decide later if we need to use it
*/
if (!exec_queue_killed(q) && !xe->devcoredump.captured &&
- !xe_guc_capture_get_matching_and_lock(job)) {
+ !xe_guc_capture_get_matching_and_lock(q)) {
/* take force wake before engine register manual capture */
fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n");
- xe_engine_snapshot_capture_for_job(job);
+ xe_engine_snapshot_capture_for_queue(q);
xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
}
@@ -1132,7 +1134,12 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
if (!ret || xe_guc_read_stopped(guc)) {
trigger_reset:
if (!ret)
- xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond");
+ xe_gt_warn(guc_to_gt(guc),
+ "Schedule disable failed to respond, guc_id=%d",
+ q->guc->id);
+ xe_devcoredump(q, job,
+ "Schedule disable failed to respond, guc_id=%d, ret=%d, guc_read=%d",
+ q->guc->id, ret, xe_guc_read_stopped(guc));
set_exec_queue_extra_ref(q);
xe_exec_queue_get(q); /* GT reset owns this */
set_exec_queue_banned(q);
@@ -1162,7 +1169,10 @@ trigger_reset:
trace_xe_sched_job_timedout(job);
if (!exec_queue_killed(q))
- xe_devcoredump(job);
+ xe_devcoredump(q, job,
+ "Timedout job - seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags);
/*
* Kernel jobs should never fail, nor should VM jobs if they do
@@ -1216,7 +1226,7 @@ sched_enable:
enable_scheduling(q);
rearm:
/*
- * XXX: Ideally want to adjust timeout based on current exection time
+ * XXX: Ideally want to adjust timeout based on current execution time
* but there is not currently an easy way to do in DRM scheduler. With
* some thought, do this in a follow up.
*/
@@ -1277,9 +1287,8 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
{
struct xe_exec_queue *q = msg->private_data;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
+ xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
trace_xe_exec_queue_cleanup_entity(q);
if (exec_queue_registered(q))
@@ -1315,11 +1324,10 @@ static void __suspend_fence_signal(struct xe_exec_queue *q)
static void suspend_fence_signal(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
- xe_guc_read_stopped(guc));
- xe_assert(xe, q->guc->suspend_pending);
+ xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) ||
+ xe_guc_read_stopped(guc));
+ xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending);
__suspend_fence_signal(q);
}
@@ -1415,12 +1423,11 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_guc_exec_queue *ge;
long timeout;
int err, i;
- xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
+ xe_gt_assert(guc_to_gt(guc), xe_device_uc_enabled(guc_to_xe(guc)));
ge = kzalloc(sizeof(*ge), GFP_KERNEL);
if (!ge)
@@ -1633,9 +1640,8 @@ static void guc_exec_queue_resume(struct xe_exec_queue *q)
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, !q->guc->suspend_pending);
+ xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending);
xe_sched_msg_lock(sched);
guc_exec_queue_try_add_msg(q, msg, RESUME);
@@ -1708,7 +1714,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
ban = true;
}
} else if (xe_exec_queue_is_lr(q) &&
- (xe_lrc_ring_head(q->lrc[0]) != xe_lrc_ring_tail(q->lrc[0]))) {
+ !xe_lrc_ring_is_idle(q->lrc[0])) {
ban = true;
}
@@ -1747,9 +1753,8 @@ void xe_guc_submit_stop(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, xe_guc_read_stopped(guc) == 1);
+ xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
@@ -1791,9 +1796,8 @@ int xe_guc_submit_start(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, xe_guc_read_stopped(guc) == 1);
+ xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
@@ -1814,22 +1818,22 @@ int xe_guc_submit_start(struct xe_guc *guc)
static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
if (unlikely(guc_id >= GUC_ID_MAX)) {
- drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
+ xe_gt_err(gt, "Invalid guc_id %u\n", guc_id);
return NULL;
}
q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
if (unlikely(!q)) {
- drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
+ xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id);
return NULL;
}
- xe_assert(xe, guc_id >= q->guc->id);
- xe_assert(xe, guc_id < (q->guc->id + q->width));
+ xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id);
+ xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width));
return q;
}
@@ -1898,15 +1902,14 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
- u32 runnable_state = msg[1];
+ u32 guc_id, runnable_state;
- if (unlikely(len < 2)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 2))
return -EPROTO;
- }
+
+ guc_id = msg[0];
+ runnable_state = msg[1];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -1940,14 +1943,13 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
- }
+
+ guc_id = msg[0];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -1969,14 +1971,13 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
- }
+
+ guc_id = msg[0];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -2016,10 +2017,8 @@ int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
u32 status;
- if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN)) {
- xe_gt_dbg(guc_to_gt(guc), "Invalid length %u", len);
+ if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN))
return -EPROTO;
- }
status = msg[0] & XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
if (status == XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
@@ -2034,13 +2033,21 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
+
+ guc_id = msg[0];
+
+ if (guc_id == GUC_ID_UNKNOWN) {
+ /*
+ * GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF
+ * context. In such case only PF will be notified about that fault.
+ */
+ xe_gt_err_ratelimited(gt, "Memory CAT error reported by GuC!\n");
+ return 0;
}
q = g2h_exec_queue_lookup(guc, guc_id);
@@ -2062,24 +2069,22 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
- if (unlikely(len != 3)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len != 3))
return -EPROTO;
- }
guc_class = msg[0];
instance = msg[1];
reason = msg[2];
/* Unexpected failure of a hardware feature, log an actual error */
- drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X",
- guc_class, instance, reason);
+ xe_gt_err(gt, "GuC engine reset request failed on %d:%d because 0x%08X",
+ guc_class, instance, reason);
- xe_gt_reset_async(guc_to_gt(guc));
+ xe_gt_reset_async(gt);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index fa75f57bf5da..83a41ebcdc91 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -64,6 +64,15 @@ struct xe_guc {
struct xe_guc_pc pc;
/** @dbm: GuC Doorbell Manager */
struct xe_guc_db_mgr dbm;
+
+ /** @g2g: GuC to GuC communication state */
+ struct {
+ /** @g2g.bo: Storage for GuC to GuC communication channels */
+ struct xe_bo *bo;
+ /** @g2g.owned: Is the BO owned by this GT or just mapped in */
+ bool owned;
+ } g2g;
+
/** @submission_state: GuC submission state */
struct {
/** @submission_state.idm: GuC context ID Manager */
@@ -79,6 +88,7 @@ struct xe_guc {
/** @submission_state.fini_wq: submit fini wait queue */
wait_queue_head_t fini_wq;
} submission_state;
+
/** @hwconfig: Hardware config state */
struct {
/** @hwconfig.bo: buffer object of the hardware config */
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 65b2e147c4b9..d765bfd3636b 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -92,7 +92,7 @@ void xe_heci_gsc_fini(struct xe_device *xe)
{
struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
- if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
+ if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
return;
if (heci_gsc->adev) {
@@ -177,7 +177,7 @@ void xe_heci_gsc_init(struct xe_device *xe)
const struct heci_gsc_def *def;
int ret;
- if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
+ if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
return;
heci_gsc->irq = -1;
@@ -222,7 +222,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
if ((iir & GSC_IRQ_INTF(1)) == 0)
return;
- if (!HAS_HECI_GSCFI(xe)) {
+ if (!xe->info.has_heci_gscfi) {
drm_warn_once(&xe->drm, "GSC irq: not supported");
return;
}
@@ -242,7 +242,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
if ((iir & CSC_IRQ_INTF(1)) == 0)
return;
- if (!HAS_HECI_CSCFI(xe)) {
+ if (!xe->info.has_heci_cscfi) {
drm_warn_once(&xe->drm, "CSC irq: not supported");
return;
}
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 2c32dc46f7d4..089834467880 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -159,7 +159,7 @@ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
* This function allocates the storage of the userptr sg table.
* It is caller's responsibility to free it calling sg_free_table.
*
- * returns: 0 for succuss; negative error no on failure
+ * returns: 0 for success; negative error no on failure
*/
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
bool is_mm_mmap_locked)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 1557acee3523..fc447751fe78 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -324,6 +324,7 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
{
u32 ccs_mask =
xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
+ u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
@@ -332,8 +333,10 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
xe_bo_ggtt_addr(hwe->hwsp));
- xe_hw_engine_mmio_write32(hwe, RING_MODE(0),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+
+ if (xe_device_has_msix(gt_to_xe(hwe->gt)))
+ ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
_MASKED_BIT_DISABLE(STOP_RING));
xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
@@ -419,7 +422,7 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
* Bspec: 72161
*/
const u8 mocs_write_idx = gt->mocs.uc_index;
- const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
+ const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
(GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
gt->mocs.wb_index : gt->mocs.uc_index;
u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
@@ -574,7 +577,6 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
- xe_reg_sr_apply_whitelist(hwe);
hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
@@ -773,7 +775,7 @@ static void check_gsc_availability(struct xe_gt *gt)
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
- drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
+ drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
}
}
@@ -829,7 +831,7 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
/**
* xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
* @hwe: Xe HW Engine.
- * @job: The job object.
+ * @q: The exec queue object.
*
* This can be printed out in a later stage like during dev_coredump
* analysis.
@@ -838,7 +840,7 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
* caller, using `xe_hw_engine_snapshot_free`.
*/
struct xe_hw_engine_snapshot *
-xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job)
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
{
struct xe_hw_engine_snapshot *snapshot;
struct __guc_capture_parsed_output *node;
@@ -864,15 +866,14 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job
if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
return snapshot;
- if (job) {
+ if (q) {
/* If got guc capture, set source to GuC */
- node = xe_guc_capture_get_matching_and_lock(job);
+ node = xe_guc_capture_get_matching_and_lock(q);
if (node) {
struct xe_device *xe = gt_to_xe(hwe->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
coredump->snapshot.matched_node = node;
- snapshot->source = XE_ENGINE_CAPTURE_SOURCE_GUC;
xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
return snapshot;
}
@@ -880,7 +881,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job
/* otherwise, do manual capture */
xe_engine_manual_capture(hwe, snapshot);
- snapshot->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL;
xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
return snapshot;
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h
index da0a6922a26f..6b5f9fa2a594 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine.h
@@ -11,7 +11,7 @@
struct drm_printer;
struct drm_xe_engine_class_instance;
struct xe_device;
-struct xe_sched_job;
+struct xe_exec_queue;
#ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN
#define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN
@@ -56,7 +56,7 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe);
u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
enum xe_engine_class engine_class);
struct xe_hw_engine_snapshot *
-xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job);
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q);
void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot);
void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p);
void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index 719f27ef00a5..e4191a7a2c31 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -106,7 +106,7 @@ struct xe_hw_engine_class_intf {
* Contains all the hardware engine state for physical instances.
*/
struct xe_hw_engine {
- /** @gt: graphics tile this hw engine belongs to */
+ /** @gt: GT structure this hw engine belongs to */
struct xe_gt *gt;
/** @name: name of this hw engine */
const char *name;
@@ -165,8 +165,6 @@ enum xe_hw_engine_snapshot_source_id {
struct xe_hw_engine_snapshot {
/** @name: name of the hw engine */
char *name;
- /** @source: Data source, either manual or GuC */
- enum xe_hw_engine_snapshot_source_id source;
/** @hwe: hw engine */
struct xe_hw_engine *hwe;
/** @logical_instance: logical instance of this hw engine */
diff --git a/drivers/gpu/drm/xe/xe_hw_fence_types.h b/drivers/gpu/drm/xe/xe_hw_fence_types.h
index 364a61f4bfda..58a8d09afe5c 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_fence_types.h
@@ -41,7 +41,7 @@ struct xe_hw_fence_irq {
* to a xe_hw_fence_irq, maintains serial seqno.
*/
struct xe_hw_fence_ctx {
- /** @gt: graphics tile of hardware fence context */
+ /** @gt: GT structure of hardware fence context */
struct xe_gt *gt;
/** @irq: fence irq handler */
struct xe_hw_fence_irq *irq;
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index b7995ebd54ab..32f5a67a917b 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
#include "display/xe_display.h"
+#include "regs/xe_guc_regs.h"
#include "regs/xe_irq_regs.h"
#include "xe_device.h"
#include "xe_drv.h"
@@ -29,6 +30,11 @@
#define IIR(offset) XE_REG(offset + 0x8)
#define IER(offset) XE_REG(offset + 0xc)
+static int xe_irq_msix_init(struct xe_device *xe);
+static void xe_irq_msix_free(struct xe_device *xe);
+static int xe_irq_msix_request_irqs(struct xe_device *xe);
+static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
+
static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 val = xe_mmio_read32(mmio, reg);
@@ -192,7 +198,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
gsc_mask = irqs | GSC_ER_COMPLETE;
heci_mask = GSC_IRQ_INTF(1);
- } else if (HAS_HECI_GSCFI(xe)) {
+ } else if (xe->info.has_heci_gscfi) {
gsc_mask = GSC_IRQ_INTF(1);
}
@@ -325,7 +331,7 @@ static void gt_irq_handler(struct xe_tile *tile,
if (class == XE_ENGINE_CLASS_OTHER) {
/* HECI GSCFI interrupts come from outside of GT */
- if (HAS_HECI_GSCFI(xe) && instance == OTHER_GSC_INSTANCE)
+ if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
xe_heci_gsc_irq_handler(xe, intr_vec);
else
gt_other_irq_handler(engine_gt, instance, intr_vec);
@@ -348,12 +354,8 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
unsigned long intr_dw[2];
u32 identity[32];
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
master_ctl = xelp_intr_disable(xe);
if (!master_ctl) {
@@ -417,12 +419,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
/* TODO: This really shouldn't be copied+pasted */
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
master_tile_ctl = dg1_intr_disable(xe);
if (!master_tile_ctl) {
@@ -459,7 +457,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* the primary tile.
*/
if (id == 0) {
- if (HAS_HECI_CSCFI(xe))
+ if (xe->info.has_heci_cscfi)
xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
@@ -508,7 +506,7 @@ static void gt_irq_reset(struct xe_tile *tile)
if ((tile->media_gt &&
xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
- HAS_HECI_GSCFI(tile_to_xe(tile))) {
+ tile_to_xe(tile)->info.has_heci_gscfi) {
xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
@@ -580,6 +578,11 @@ static void xe_irq_reset(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return vf_irq_reset(xe);
+ if (xe_device_uses_memirq(xe)) {
+ for_each_tile(tile, xe, id)
+ xe_memirq_reset(&tile->memirq);
+ }
+
for_each_tile(tile, xe, id) {
if (GRAPHICS_VERx100(xe) >= 1210)
dg1_irq_reset(tile);
@@ -622,6 +625,14 @@ static void xe_irq_postinstall(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return vf_irq_postinstall(xe);
+ if (xe_device_uses_memirq(xe)) {
+ struct xe_tile *tile;
+ unsigned int id;
+
+ for_each_tile(tile, xe, id)
+ xe_memirq_postinstall(&tile->memirq);
+ }
+
xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
/*
@@ -644,12 +655,8 @@ static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
struct xe_tile *tile;
unsigned int id;
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
for_each_tile(tile, xe, id)
xe_memirq_handler(&tile->memirq);
@@ -668,63 +675,85 @@ static irq_handler_t xe_irq_handler(struct xe_device *xe)
return xelp_irq_handler;
}
-static void irq_uninstall(void *arg)
+static int xe_irq_msi_request_irqs(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ irq_handler_t irq_handler;
+ int irq, err;
+
+ irq_handler = xe_irq_handler(xe);
+ if (!irq_handler) {
+ drm_err(&xe->drm, "No supported interrupt handler");
+ return -EINVAL;
+ }
+
+ irq = pci_irq_vector(pdev, 0);
+ err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
+ if (err < 0) {
+ drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void xe_irq_msi_free(struct xe_device *xe)
{
- struct xe_device *xe = arg;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
int irq;
- if (!xe->irq.enabled)
+ irq = pci_irq_vector(pdev, 0);
+ free_irq(irq, xe);
+}
+
+static void irq_uninstall(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ if (!atomic_xchg(&xe->irq.enabled, 0))
return;
- xe->irq.enabled = false;
xe_irq_reset(xe);
- irq = pci_irq_vector(pdev, 0);
- free_irq(irq, xe);
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_free(xe);
+ else
+ xe_irq_msi_free(xe);
+}
+
+int xe_irq_init(struct xe_device *xe)
+{
+ spin_lock_init(&xe->irq.lock);
+
+ return xe_irq_msix_init(xe);
}
int xe_irq_install(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- unsigned int irq_flags = PCI_IRQ_MSIX;
- irq_handler_t irq_handler;
- int err, irq, nvec;
-
- irq_handler = xe_irq_handler(xe);
- if (!irq_handler) {
- drm_err(&xe->drm, "No supported interrupt handler");
- return -EINVAL;
- }
+ unsigned int irq_flags = PCI_IRQ_MSI;
+ int nvec = 1;
+ int err;
xe_irq_reset(xe);
- nvec = pci_msix_vec_count(pdev);
- if (nvec <= 0) {
- if (nvec == -EINVAL) {
- /* MSIX capability is not supported in the device, using MSI */
- irq_flags = PCI_IRQ_MSI;
- nvec = 1;
- } else {
- drm_err(&xe->drm, "MSIX: Failed getting count\n");
- return nvec;
- }
+ if (xe_device_has_msix(xe)) {
+ nvec = xe->irq.msix.nvec;
+ irq_flags = PCI_IRQ_MSIX;
}
err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
if (err < 0) {
- drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
+ drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
return err;
}
- irq = pci_irq_vector(pdev, 0);
- err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
- if (err < 0) {
- drm_err(&xe->drm, "Failed to request MSI/MSIX IRQ %d\n", err);
+ err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
+ xe_irq_msi_request_irqs(xe);
+ if (err)
return err;
- }
- xe->irq.enabled = true;
+ atomic_set(&xe->irq.enabled, 1);
xe_irq_postinstall(xe);
@@ -735,20 +764,28 @@ int xe_irq_install(struct xe_device *xe)
return 0;
free_irq_handler:
- free_irq(irq, xe);
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_free(xe);
+ else
+ xe_irq_msi_free(xe);
return err;
}
-void xe_irq_suspend(struct xe_device *xe)
+static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
{
- int irq = to_pci_dev(xe->drm.dev)->irq;
+ synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
+}
- spin_lock_irq(&xe->irq.lock);
- xe->irq.enabled = false; /* no new irqs */
- spin_unlock_irq(&xe->irq.lock);
+void xe_irq_suspend(struct xe_device *xe)
+{
+ atomic_set(&xe->irq.enabled, 0); /* no new irqs */
- synchronize_irq(irq); /* flush irqs */
+ /* flush irqs */
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_synchronize_irq(xe);
+ else
+ xe_irq_msi_synchronize_irq(xe);
xe_irq_reset(xe); /* turn irqs off */
}
@@ -762,10 +799,205 @@ void xe_irq_resume(struct xe_device *xe)
* 1. no irq will arrive before the postinstall
* 2. display is not yet resumed
*/
- xe->irq.enabled = true;
+ atomic_set(&xe->irq.enabled, 1);
xe_irq_reset(xe);
xe_irq_postinstall(xe); /* turn irqs on */
for_each_gt(gt, xe, id)
xe_irq_enable_hwe(gt);
}
+
+/* MSI-X related definitions and functions below. */
+
+enum xe_irq_msix_static {
+ GUC2HOST_MSIX = 0,
+ DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
+ /* Must be last */
+ NUM_OF_STATIC_MSIX,
+};
+
+static int xe_irq_msix_init(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int nvec = pci_msix_vec_count(pdev);
+
+ if (nvec == -EINVAL)
+ return 0; /* MSI */
+
+ if (nvec < 0) {
+ drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
+ return nvec;
+ }
+
+ xe->irq.msix.nvec = nvec;
+ xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
+ return 0;
+}
+
+static irqreturn_t guc2host_irq_handler(int irq, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ u8 id;
+
+ if (!atomic_read(&xe->irq.enabled))
+ return IRQ_NONE;
+
+ for_each_tile(tile, xe, id)
+ xe_guc_irq_handler(&tile->primary_gt->uc.guc,
+ GUC_INTR_GUC2HOST);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
+{
+ unsigned int tile_id, gt_id;
+ struct xe_device *xe = arg;
+ struct xe_memirq *memirq;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+
+ if (!atomic_read(&xe->irq.enabled))
+ return IRQ_NONE;
+
+ for_each_tile(tile, xe, tile_id) {
+ memirq = &tile->memirq;
+ if (!memirq->bo)
+ continue;
+
+ for_each_gt(gt, xe, gt_id) {
+ if (gt->tile != tile)
+ continue;
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_memirq_hwe_handler(memirq, hwe);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
+ bool dynamic_msix, u16 *msix)
+{
+ struct xa_limit limit;
+ int ret;
+ u32 id;
+
+ limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
+ XA_LIMIT(*msix, *msix);
+ ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (dynamic_msix)
+ *msix = id;
+
+ return 0;
+}
+
+static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
+{
+ xa_erase(&xe->irq.msix.indexes, msix);
+}
+
+static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
+ void *irq_buf, const char *name, u16 msix)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret, irq;
+
+ irq = pci_irq_vector(pdev, msix);
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix)
+{
+ int ret;
+
+ ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
+ if (ret)
+ return ret;
+
+ ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
+ if (ret) {
+ drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
+ xe_irq_msix_release_vector(xe, *msix);
+ return ret;
+ }
+
+ return 0;
+}
+
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int irq;
+ void *irq_buf;
+
+ irq_buf = xa_load(&xe->irq.msix.indexes, msix);
+ if (!irq_buf)
+ return;
+
+ irq = pci_irq_vector(pdev, msix);
+ if (irq < 0) {
+ drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
+ return;
+ }
+
+ free_irq(irq, irq_buf);
+ xe_irq_msix_release_vector(xe, msix);
+}
+
+int xe_irq_msix_request_irqs(struct xe_device *xe)
+{
+ int err;
+ u16 msix;
+
+ msix = GUC2HOST_MSIX;
+ err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
+ DRIVER_NAME "-guc2host", false, &msix);
+ if (err)
+ return err;
+
+ msix = DEFAULT_MSIX;
+ err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
+ DRIVER_NAME "-default-msix", false, &msix);
+ if (err) {
+ xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
+ return err;
+ }
+
+ return 0;
+}
+
+void xe_irq_msix_free(struct xe_device *xe)
+{
+ unsigned long msix;
+ u32 *dummy;
+
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ xe_irq_msix_free_irq(xe, msix);
+ xa_destroy(&xe->irq.msix.indexes);
+}
+
+void xe_irq_msix_synchronize_irq(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ unsigned long msix;
+ u32 *dummy;
+
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ synchronize_irq(pci_irq_vector(pdev, msix));
+}
diff --git a/drivers/gpu/drm/xe/xe_irq.h b/drivers/gpu/drm/xe/xe_irq.h
index 067514e13675..a28bd577ba52 100644
--- a/drivers/gpu/drm/xe/xe_irq.h
+++ b/drivers/gpu/drm/xe/xe_irq.h
@@ -6,13 +6,21 @@
#ifndef _XE_IRQ_H_
#define _XE_IRQ_H_
+#include <linux/interrupt.h>
+
+#define XE_IRQ_DEFAULT_MSIX 1
+
struct xe_device;
struct xe_tile;
struct xe_gt;
+int xe_irq_init(struct xe_device *xe);
int xe_irq_install(struct xe_device *xe);
void xe_irq_suspend(struct xe_device *xe);
void xe_irq_resume(struct xe_device *xe);
void xe_irq_enable_hwe(struct xe_gt *gt);
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix);
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix);
#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 4f64c7f4e68d..bbb9ffbf6367 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -25,6 +25,7 @@
#include "xe_map.h"
#include "xe_memirq.h"
#include "xe_sriov.h"
+#include "xe_trace_lrc.h"
#include "xe_vm.h"
#include "xe_wa.h"
@@ -583,6 +584,7 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
{
struct xe_memirq *memirq = &gt_to_tile(hwe->gt)->memirq;
struct xe_device *xe = gt_to_xe(hwe->gt);
+ u8 num_regs;
if (!xe_device_uses_memirq(xe))
return;
@@ -592,12 +594,18 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
regs[CTX_INT_MASK_ENABLE_REG] = RING_IMR(0).addr;
regs[CTX_INT_MASK_ENABLE_PTR] = xe_memirq_enable_ptr(memirq);
- regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ num_regs = xe_device_has_msix(xe) ? 3 : 2;
+ regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(num_regs) |
MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED;
regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr;
regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq, hwe);
regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr;
regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq, hwe);
+
+ if (xe_device_has_msix(xe)) {
+ regs[CTX_CS_INT_VEC_REG] = CS_INT_VEC(0).addr;
+ /* CTX_CS_INT_VEC_DATA will be set in xe_lrc_init */
+ }
}
static int lrc_ring_mi_mode(struct xe_hw_engine *hwe)
@@ -875,7 +883,7 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
- struct xe_vm *vm, u32 ring_size)
+ struct xe_vm *vm, u32 ring_size, u16 msix_vec)
{
struct xe_gt *gt = hwe->gt;
struct xe_tile *tile = gt_to_tile(gt);
@@ -944,6 +952,14 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_drm_client_add_bo(vm->xef->client, lrc->bo);
}
+ if (xe_device_has_msix(xe)) {
+ xe_lrc_write_ctx_reg(lrc, CTX_INT_STATUS_REPORT_PTR,
+ xe_memirq_status_ptr(&tile->memirq, hwe));
+ xe_lrc_write_ctx_reg(lrc, CTX_INT_SRC_REPORT_PTR,
+ xe_memirq_source_ptr(&tile->memirq, hwe));
+ xe_lrc_write_ctx_reg(lrc, CTX_CS_INT_VEC_DATA, msix_vec << 16 | msix_vec);
+ }
+
if (xe_gt_has_indirect_ring_state(gt)) {
xe_lrc_write_ctx_reg(lrc, CTX_INDIRECT_RING_STATE,
__xe_lrc_indirect_ring_ggtt_addr(lrc));
@@ -1004,6 +1020,7 @@ err_lrc_finish:
* @hwe: Hardware Engine
* @vm: The VM (address space)
* @ring_size: LRC ring size
+ * @msix_vec: MSI-X interrupt vector (for platforms that support it)
*
* Allocate and initialize the Logical Ring Context (LRC).
*
@@ -1011,7 +1028,7 @@ err_lrc_finish:
* upon failure.
*/
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size)
+ u32 ring_size, u16 msix_vec)
{
struct xe_lrc *lrc;
int err;
@@ -1020,7 +1037,7 @@ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
if (!lrc)
return ERR_PTR(-ENOMEM);
- err = xe_lrc_init(lrc, hwe, vm, ring_size);
+ err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec);
if (err) {
kfree(lrc);
return ERR_PTR(err);
@@ -1060,6 +1077,14 @@ u32 xe_lrc_ring_tail(struct xe_lrc *lrc)
return xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL) & TAIL_ADDR;
}
+static u32 xe_lrc_ring_start(struct xe_lrc *lrc)
+{
+ if (xe_lrc_has_indirect_ring_state(lrc))
+ return xe_lrc_read_indirect_ctx_reg(lrc, INDIRECT_CTX_RING_START);
+ else
+ return xe_lrc_read_ctx_reg(lrc, CTX_RING_START);
+}
+
void xe_lrc_set_ring_head(struct xe_lrc *lrc, u32 head)
{
if (xe_lrc_has_indirect_ring_state(lrc))
@@ -1635,10 +1660,12 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
xe_vm_get(lrc->bo->vm);
snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
+ snapshot->ring_addr = __xe_lrc_ring_ggtt_addr(lrc);
snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc);
snapshot->head = xe_lrc_ring_head(lrc);
snapshot->tail.internal = lrc->ring.tail;
snapshot->tail.memory = xe_lrc_ring_tail(lrc);
+ snapshot->start = xe_lrc_ring_start(lrc);
snapshot->start_seqno = xe_lrc_start_seqno(lrc);
snapshot->seqno = xe_lrc_seqno(lrc);
snapshot->lrc_bo = xe_bo_get(lrc->bo);
@@ -1692,11 +1719,14 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
return;
drm_printf(p, "\tHW Context Desc: 0x%08x\n", snapshot->context_desc);
+ drm_printf(p, "\tHW Ring address: 0x%08x\n",
+ snapshot->ring_addr);
drm_printf(p, "\tHW Indirect Ring State: 0x%08x\n",
snapshot->indirect_context_desc);
drm_printf(p, "\tLRC Head: (memory) %u\n", snapshot->head);
drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
snapshot->tail.internal, snapshot->tail.memory);
+ drm_printf(p, "\tRing start: (memory) 0x%08x\n", snapshot->start);
drm_printf(p, "\tStart seqno: (memory) %d\n", snapshot->start_seqno);
drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->seqno);
drm_printf(p, "\tTimestamp: 0x%08x\n", snapshot->ctx_timestamp);
@@ -1758,5 +1788,20 @@ u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ trace_xe_lrc_update_timestamp(lrc, *old_ts);
+
return lrc->ctx_timestamp;
}
+
+/**
+ * xe_lrc_ring_is_idle() - LRC is idle
+ * @lrc: Pointer to the lrc.
+ *
+ * Compare LRC ring head and tail to determine if idle.
+ *
+ * Return: True is ring is idle, False otherwise
+ */
+bool xe_lrc_ring_is_idle(struct xe_lrc *lrc)
+{
+ return xe_lrc_ring_head(lrc) == xe_lrc_ring_tail(lrc);
+}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 40d8f6906d3e..4206e6a8b50a 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -25,8 +25,10 @@ struct xe_lrc_snapshot {
unsigned long lrc_size, lrc_offset;
u32 context_desc;
+ u32 ring_addr;
u32 indirect_context_desc;
u32 head;
+ u32 start;
struct {
u32 internal;
u32 memory;
@@ -40,7 +42,7 @@ struct xe_lrc_snapshot {
#define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size);
+ u32 ring_size, u16 msix_vec);
void xe_lrc_destroy(struct kref *ref);
/**
@@ -78,6 +80,8 @@ u32 xe_lrc_ring_head(struct xe_lrc *lrc);
u32 xe_lrc_ring_space(struct xe_lrc *lrc);
void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size);
+bool xe_lrc_ring_is_idle(struct xe_lrc *lrc);
+
u32 xe_lrc_indirect_ring_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc);
u32 *xe_lrc_regs(struct xe_lrc *lrc);
diff --git a/drivers/gpu/drm/xe/xe_macros.h b/drivers/gpu/drm/xe/xe_macros.h
index daf56c846d03..8a77c2423555 100644
--- a/drivers/gpu/drm/xe/xe_macros.h
+++ b/drivers/gpu/drm/xe/xe_macros.h
@@ -10,9 +10,13 @@
#define XE_WARN_ON WARN_ON
-#define XE_IOCTL_DBG(xe, cond) \
- ((cond) && (drm_dbg(&(xe)->drm, \
- "Ioctl argument check failed at %s:%d: %s", \
- __FILE__, __LINE__, #cond), 1))
+#define XE_IOCTL_DBG(xe, cond) ({ \
+ int cond__ = !!(cond); \
+ if (cond__) \
+ drm_dbg(&(xe)->drm, \
+ "Ioctl argument check failed at %s:%d: %s", \
+ __FILE__, __LINE__, #cond); \
+ cond__; \
+})
#endif
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index f833da88150a..404fa2a456d5 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -155,13 +155,6 @@ static const char *guc_name(struct xe_guc *guc)
*
*/
-static void __release_xe_bo(struct drm_device *drm, void *arg)
-{
- struct xe_bo *bo = arg;
-
- xe_bo_unpin_map_no_vm(bo);
-}
-
static inline bool hw_reports_to_instance_zero(struct xe_memirq *memirq)
{
/*
@@ -184,14 +177,12 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET(0), SZ_64));
BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET(0), SZ_4K));
- /* XXX: convert to managed bo */
- bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE |
- XE_BO_FLAG_NEEDS_UC |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo = xe_managed_bo_create_pin_map(xe, tile, bo_size,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_NEEDS_UC |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out;
@@ -215,7 +206,7 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
xe_bo_ggtt_addr(bo), bo_size, XE_MEMIRQ_SOURCE_OFFSET(0),
XE_MEMIRQ_STATUS_OFFSET(0));
- return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo);
+ return 0;
out:
memirq_err(memirq, "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
@@ -442,6 +433,9 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat
if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
+
+ if (memirq_received(memirq, status, ilog2(GUC_INTR_SW_INT_0), name))
+ xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 1b97d90aadda..278bc96cf593 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1506,7 +1506,7 @@ err_bb:
* using the default engine for the updates, they will be performed in the
* order they grab the job_mutex. If different engines are used, external
* synchronization is needed for overlapping updates to maintain page-table
- * consistency. Note that the meaing of "overlapping" is that the updates
+ * consistency. Note that the meaning of "overlapping" is that the updates
* touch the same page-table, which might be a higher-level page-directory.
* If no pipelining is needed, then updates may be performed by the cpu.
*
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index bfc3deebdaa2..07b27114be9a 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -19,7 +19,7 @@
struct xe_modparam xe_modparam = {
.probe_display = true,
- .guc_log_level = 5,
+ .guc_log_level = 3,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
/* the rest are 0 by default */
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 5cc0f6f9bc11..eeb96b5f49e2 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -16,7 +16,6 @@
#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
-#include "regs/xe_lrc_layout.h"
#include "regs/xe_oa_regs.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -28,7 +27,6 @@
#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_guc_pc.h"
-#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_oa.h"
@@ -90,6 +88,8 @@ struct xe_oa_open_param {
struct drm_xe_sync __user *syncs_user;
int num_syncs;
struct xe_sync_entry *syncs;
+ size_t oa_buffer_size;
+ int wait_num_reports;
};
struct xe_oa_config_bo {
@@ -234,11 +234,10 @@ static void oa_timestamp_clear(struct xe_oa_stream *stream, u32 *report)
static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
{
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
+ u32 tail, hw_tail, partial_report_size, available;
int report_size = stream->oa_buffer.format->size;
- u32 tail, hw_tail;
unsigned long flags;
bool pollin;
- u32 partial_report_size;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -282,8 +281,8 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
stream->oa_buffer.tail = tail;
- pollin = xe_oa_circ_diff(stream, stream->oa_buffer.tail,
- stream->oa_buffer.head) >= report_size;
+ available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
+ pollin = available >= stream->wait_num_reports * report_size;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -397,11 +396,19 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
{
- struct xe_mmio *mmio = &stream->gt->mmio;
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
- u32 oa_buf = gtt_offset | OABUFFER_SIZE_16M | OAG_OABUFFER_MEMORY_SELECT;
+ int size_exponent = __ffs(stream->oa_buffer.bo->size);
+ u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT;
+ struct xe_mmio *mmio = &stream->gt->mmio;
unsigned long flags;
+ /*
+ * If oa buffer size is more than 16MB (exponent greater than 24), the
+ * oa buffer size field is multiplied by 8 in xe_oa_enable_metric_set.
+ */
+ oa_buf |= REG_FIELD_PREP(OABUFFER_SIZE_MASK,
+ size_exponent > 24 ? size_exponent - 20 : size_exponent - 17);
+
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
xe_mmio_write32(mmio, __oa_regs(stream)->oa_status, 0);
@@ -857,15 +864,12 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
xe_file_put(stream->xef);
}
-static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
+static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
{
struct xe_bo *bo;
- BUILD_BUG_ON_NOT_POWER_OF_2(XE_OA_BUFFER_SIZE);
- BUILD_BUG_ON(XE_OA_BUFFER_SIZE < SZ_128K || XE_OA_BUFFER_SIZE > SZ_16M);
-
bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
- XE_OA_BUFFER_SIZE, ttm_bo_type_kernel,
+ size, ttm_bo_type_kernel,
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1043,6 +1047,13 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
+static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
+{
+ return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
+ stream->oa_buffer.bo->size > SZ_16M ?
+ OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
+}
+
static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
{
struct xe_mmio *mmio = &stream->gt->mmio;
@@ -1075,6 +1086,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
_MASKED_BIT_ENABLE(oa_debug) |
oag_report_ctx_switches(stream) |
+ oag_buf_size_select(stream) |
oag_configure_mmio_trigger(stream, true));
xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
@@ -1216,6 +1228,28 @@ static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
return 0;
}
+static int xe_oa_set_prop_oa_buffer_size(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (!is_power_of_2(value) || value < SZ_128K || value > SZ_128M) {
+ drm_dbg(&oa->xe->drm, "OA buffer size invalid %llu\n", value);
+ return -EINVAL;
+ }
+ param->oa_buffer_size = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_wait_num_reports(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (!value) {
+ drm_dbg(&oa->xe->drm, "wait_num_reports %llu\n", value);
+ return -EINVAL;
+ }
+ param->wait_num_reports = value;
+ return 0;
+}
+
static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value,
struct xe_oa_open_param *param)
{
@@ -1236,6 +1270,8 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = {
[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_oa_buffer_size,
+ [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_wait_num_reports,
};
static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
@@ -1250,6 +1286,8 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval,
[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_ret_inval,
};
static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from,
@@ -1509,7 +1547,7 @@ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg)
{
- struct drm_xe_oa_stream_info info = { .oa_buf_size = XE_OA_BUFFER_SIZE, };
+ struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, };
void __user *uaddr = (void __user *)arg;
if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -1595,7 +1633,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
- if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
+ if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) {
drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
return -EINVAL;
}
@@ -1633,81 +1671,6 @@ static const struct file_operations xe_oa_fops = {
.mmap = xe_oa_mmap,
};
-static bool engine_supports_mi_query(struct xe_hw_engine *hwe)
-{
- return hwe->class == XE_ENGINE_CLASS_RENDER ||
- hwe->class == XE_ENGINE_CLASS_COMPUTE;
-}
-
-static bool xe_oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
-{
- u32 idx = *offset;
- u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
- bool found = false;
-
- idx++;
- for (; idx < len; idx += 2) {
- if (state[idx] == reg) {
- found = true;
- break;
- }
- }
-
- *offset = idx;
- return found;
-}
-
-#define IS_MI_LRI_CMD(x) (REG_FIELD_GET(MI_OPCODE, (x)) == \
- REG_FIELD_GET(MI_OPCODE, MI_LOAD_REGISTER_IMM))
-
-static u32 xe_oa_context_image_offset(struct xe_oa_stream *stream, u32 reg)
-{
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 len = (xe_gt_lrc_size(stream->gt, stream->hwe->class) +
- lrc->ring.size) / sizeof(u32);
- u32 offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
- u32 *state = (u32 *)lrc->bo->vmap.vaddr;
-
- if (drm_WARN_ON(&stream->oa->xe->drm, !state))
- return U32_MAX;
-
- for (; offset < len; ) {
- if (IS_MI_LRI_CMD(state[offset])) {
- /*
- * We expect reg-value pairs in MI_LRI command, so
- * MI_LRI_LEN() should be even
- */
- drm_WARN_ON(&stream->oa->xe->drm,
- MI_LRI_LEN(state[offset]) & 0x1);
-
- if (xe_oa_find_reg_in_lri(state, reg, &offset, len))
- break;
- } else {
- offset++;
- }
- }
-
- return offset < len ? offset : U32_MAX;
-}
-
-static int xe_oa_set_ctx_ctrl_offset(struct xe_oa_stream *stream)
-{
- struct xe_reg reg = OACTXCONTROL(stream->hwe->mmio_base);
- u32 offset = stream->oa->ctx_oactxctrl_offset[stream->hwe->class];
-
- /* Do this only once. Failure is stored as offset of U32_MAX */
- if (offset)
- goto exit;
-
- offset = xe_oa_context_image_offset(stream, reg.addr);
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] = offset;
-
- drm_dbg(&stream->oa->xe->drm, "%s oa ctx control at 0x%08x dword offset\n",
- stream->hwe->name, offset);
-exit:
- return offset && offset != U32_MAX ? 0 : -ENODEV;
-}
-
static int xe_oa_stream_init(struct xe_oa_stream *stream,
struct xe_oa_open_param *param)
{
@@ -1726,6 +1689,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
stream->periodic = param->period_exponent > 0;
stream->period_exponent = param->period_exponent;
stream->no_preempt = param->no_preempt;
+ stream->wait_num_reports = param->wait_num_reports;
stream->xef = xe_file_get(param->xef);
stream->num_syncs = param->num_syncs;
@@ -1739,20 +1703,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
stream->oa_buffer.circ_size =
- XE_OA_BUFFER_SIZE - XE_OA_BUFFER_SIZE % stream->oa_buffer.format->size;
+ param->oa_buffer_size -
+ param->oa_buffer_size % stream->oa_buffer.format->size;
else
- stream->oa_buffer.circ_size = XE_OA_BUFFER_SIZE;
-
- if (stream->exec_q && engine_supports_mi_query(stream->hwe)) {
- /* If we don't find the context offset, just return error */
- ret = xe_oa_set_ctx_ctrl_offset(stream);
- if (ret) {
- drm_err(&stream->oa->xe->drm,
- "xe_oa_set_ctx_ctrl_offset failed for %s\n",
- stream->hwe->name);
- goto exit;
- }
- }
+ stream->oa_buffer.circ_size = param->oa_buffer_size;
stream->oa_config = xe_oa_get_oa_config(stream->oa, param->metric_set);
if (!stream->oa_config) {
@@ -1784,7 +1738,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
goto err_fw_put;
}
- ret = xe_oa_alloc_oa_buffer(stream);
+ ret = xe_oa_alloc_oa_buffer(stream, param->oa_buffer_size);
if (ret)
goto err_fw_put;
@@ -2081,6 +2035,17 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz);
}
+ if (!param.oa_buffer_size)
+ param.oa_buffer_size = DEFAULT_XE_OA_BUFFER_SIZE;
+
+ if (!param.wait_num_reports)
+ param.wait_num_reports = 1;
+ if (param.wait_num_reports > param.oa_buffer_size / f->size) {
+ drm_dbg(&oa->xe->drm, "wait_num_reports %d\n", param.wait_num_reports);
+ ret = -EINVAL;
+ goto err_exec_q;
+ }
+
ret = xe_oa_parse_syncs(oa, &param);
if (ret)
goto err_exec_q;
@@ -2198,6 +2163,7 @@ static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
+ { .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
{ .start = 0xE590, .end = 0xE590 }, /* TDL_LSC_LAT_MEASURE_TDL_GFX */
{ .start = 0x13000, .end = 0x137FC }, /* PES_0_PESL0 - PES_63_UPPER_PESL3 */
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index fea9d981e414..52e33c37d5ee 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -15,7 +15,7 @@
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
-#define XE_OA_BUFFER_SIZE SZ_16M
+#define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M
enum xe_oa_report_header {
HDR_32_BIT = 0,
@@ -138,9 +138,6 @@ struct xe_oa {
/** @metrics_idr: List of dynamic configurations (struct xe_oa_config) */
struct idr metrics_idr;
- /** @ctx_oactxctrl_offset: offset of OACTXCONTROL register in context image */
- u32 ctx_oactxctrl_offset[XE_ENGINE_CLASS_MAX];
-
/** @oa_formats: tracks all OA formats across platforms */
const struct xe_oa_format *oa_formats;
@@ -218,6 +215,9 @@ struct xe_oa_stream {
/** @pollin: Whether there is data available to read */
bool pollin;
+ /** @wait_num_reports: Number of reports to wait for before signalling pollin */
+ int wait_num_reports;
+
/** @periodic: Whether periodic sampling is currently enabled */
bool periodic;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 6b7f77425c7f..39be74848e44 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -490,7 +490,7 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
* least basic xe_gt and xe_guc initialization.
*
* Since to obtain the value of GMDID_MEDIA we need to use the
- * media GuC, temporarly tweak the gt type.
+ * media GuC, temporarily tweak the gt type.
*/
xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
@@ -781,7 +781,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
* error injectable functions is proper handling of the error code by the
* caller for recovery, which is always the case here. The second
* requirement is that no state is changed before the first error return.
- * It is not strictly fullfilled for all initialization functions using the
+ * It is not strictly fulfilled for all initialization functions using the
* ALLOW_ERROR_INJECTION() macro but this is acceptable because for those
* error cases at probe time, the error code is simply propagated up by the
* caller. Therefore there is no consequence on those specific callers when
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index d95d9835de42..9333ce776a6e 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -217,7 +217,7 @@ out:
*
* It returns 0 on success, and -ERROR number on failure, -EINVAL if max
* frequency is higher then the minimal, and other errors directly translated
- * from the PCODE Error returs:
+ * from the PCODE Error returns:
* - -ENXIO: "Illegal Command"
* - -ETIMEDOUT: "Timed out"
* - -EINVAL: "Illegal Data"
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 40f7c844ed44..c9cc0c091dfd 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -7,6 +7,7 @@
#include <linux/fault-inject.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_placement.h>
@@ -390,7 +391,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
- * also checks and delets bo entry from user fault list.
+ * also checks and deletes bo entry from user fault list.
*/
mutex_lock(&xe->mem_access.vram_userfault.lock);
list_for_each_entry_safe(bo, on,
@@ -414,8 +415,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_irq_suspend(xe);
- if (xe->d3cold.allowed)
- xe_display_pm_suspend_late(xe);
+ xe_display_pm_runtime_suspend_late(xe);
+
out:
if (err)
xe_display_pm_runtime_resume(xe);
@@ -607,7 +608,8 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
struct device *dev = xe->drm.dev;
return dev->power.runtime_status == RPM_SUSPENDING ||
- dev->power.runtime_status == RPM_RESUMING;
+ dev->power.runtime_status == RPM_RESUMING ||
+ pm_suspend_target_state != PM_SUSPEND_ON;
#else
return false;
#endif
@@ -738,9 +740,6 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
xe->d3cold.allowed = false;
mutex_unlock(&xe->d3cold.lock);
-
- drm_dbg(&xe->drm,
- "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
}
/**
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 797576690356..1ddcc7e79a93 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -136,6 +136,7 @@ err_kfree:
xe_pt_free(pt);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_pt_create, ERRNO);
/**
* xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
@@ -275,7 +276,7 @@ struct xe_pt_stage_bind_walk {
/* Also input, but is updated during the walk*/
/** @curs: The DMA address cursor. */
struct xe_res_cursor *curs;
- /** @va_curs_start: The Virtual address coresponding to @curs->start */
+ /** @va_curs_start: The Virtual address corresponding to @curs->start */
u64 va_curs_start;
/* Output */
@@ -1850,6 +1851,7 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2130,6 +2132,7 @@ kill_vm_tile1:
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_pt_update_ops_run, ERRNO);
/**
* xe_pt_update_ops_fini() - Finish PT update operations
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 170ae72d1a7b..c059639613f7 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -23,6 +23,7 @@
#include "xe_guc_hwconfig.h"
#include "xe_macros.h"
#include "xe_mmio.h"
+#include "xe_oa.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
@@ -670,7 +671,9 @@ static int query_oa_units(struct xe_device *xe,
du->oa_unit_id = u->oa_unit_id;
du->oa_unit_type = u->type;
du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
- du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS;
+ du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
+ DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
+ DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
j = 0;
for_each_hw_engine(hwe, gt, hwe_id) {
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index c13123008e90..9475e3f74958 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -24,7 +24,6 @@
#include "xe_hw_engine_types.h"
#include "xe_macros.h"
#include "xe_mmio.h"
-#include "xe_reg_whitelist.h"
#include "xe_rtp_types.h"
static void reg_sr_fini(struct drm_device *drm, void *arg)
@@ -192,58 +191,6 @@ err_force_wake:
xe_gt_err(gt, "Failed to apply, err=-ETIMEDOUT\n");
}
-void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe)
-{
- struct xe_reg_sr *sr = &hwe->reg_whitelist;
- struct xe_gt *gt = hwe->gt;
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_reg_sr_entry *entry;
- struct drm_printer p;
- u32 mmio_base = hwe->mmio_base;
- unsigned long reg;
- unsigned int slot = 0;
- unsigned int fw_ref;
-
- if (xa_empty(&sr->xa))
- return;
-
- drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name);
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
- goto err_force_wake;
-
- p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
- xa_for_each(&sr->xa, reg, entry) {
- if (slot == RING_MAX_NONPRIV_SLOTS) {
- xe_gt_err(gt,
- "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
- hwe->name, RING_MAX_NONPRIV_SLOTS);
- break;
- }
-
- xe_reg_whitelist_print_entry(&p, 0, reg, entry);
- xe_mmio_write32(&gt->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot),
- reg | entry->set_bits);
- slot++;
- }
-
- /* And clear the rest just in case of garbage */
- for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) {
- u32 addr = RING_NOPID(mmio_base).addr;
-
- xe_mmio_write32(&gt->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr);
- }
-
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
- return;
-
-err_force_wake:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- drm_err(&xe->drm, "Failed to apply, err=-ETIMEDOUT\n");
-}
-
/**
* xe_reg_sr_dump - print all save/restore entries
* @sr: Save/restore entries
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index 3996934974fa..edab5d4e3ba5 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -10,7 +10,9 @@
#include "regs/xe_oa_regs.h"
#include "regs/xe_regs.h"
#include "xe_gt_types.h"
+#include "xe_gt_printk.h"
#include "xe_platform_types.h"
+#include "xe_reg_sr.h"
#include "xe_rtp.h"
#include "xe_step.h"
@@ -89,6 +91,40 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
{}
};
+static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
+{
+ struct xe_reg_sr *sr = &hwe->reg_whitelist;
+ struct xe_reg_sr_entry *entry;
+ struct drm_printer p;
+ unsigned long reg;
+ unsigned int slot;
+
+ xe_gt_dbg(hwe->gt, "Add %s whitelist to engine\n", sr->name);
+ p = xe_gt_dbg_printer(hwe->gt);
+
+ slot = 0;
+ xa_for_each(&sr->xa, reg, entry) {
+ struct xe_reg_sr_entry hwe_entry = {
+ .reg = RING_FORCE_TO_NONPRIV(hwe->mmio_base, slot),
+ .set_bits = entry->reg.addr | entry->set_bits,
+ .clr_bits = ~0u,
+ .read_mask = entry->read_mask,
+ };
+
+ if (slot == RING_MAX_NONPRIV_SLOTS) {
+ xe_gt_err(hwe->gt,
+ "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
+ hwe->name, RING_MAX_NONPRIV_SLOTS);
+ break;
+ }
+
+ xe_reg_whitelist_print_entry(&p, 0, reg, entry);
+ xe_reg_sr_add(&hwe->reg_sr, &hwe_entry, hwe->gt);
+
+ slot++;
+ }
+}
+
/**
* xe_reg_whitelist_process_engine - process table of registers to whitelist
* @hwe: engine instance to process whitelist for
@@ -102,6 +138,7 @@ void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist);
+ whitelist_apply_to_hwe(hwe);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index b13d4d62f0b1..7a1c78fdfc92 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -340,3 +340,8 @@ bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
return dss >= dss_per_gslice;
}
+bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return !IS_SRIOV_VF(gt_to_xe(gt));
+}
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 827d932b6908..38b9f13bba5e 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -131,7 +131,7 @@ struct xe_reg_sr;
* @ver_end__: Last graphics IP version to match
*
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
- * inclusive on boths sides
+ * inclusive on both sides
*
* Refer to XE_RTP_RULES() for expected usage.
*/
@@ -169,7 +169,7 @@ struct xe_reg_sr;
* @ver_end__: Last media IP version to match
*
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
- * inclusive on boths sides
+ * inclusive on both sides
*
* Refer to XE_RTP_RULES() for expected usage.
*/
@@ -476,4 +476,15 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
+/*
+ * xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
+ *
+ * @gt: GT structure
+ * @hwe: Engine instance
+ *
+ * Returns: true if device is not VF, false otherwise.
+ */
+bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index ef10782af656..04e2f539ccd9 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -14,6 +14,7 @@
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_vf.h"
/**
* xe_sriov_mode_to_string - Convert enum value to string.
@@ -114,6 +115,9 @@ int xe_sriov_init(struct xe_device *xe)
return err;
}
+ if (IS_SRIOV_VF(xe))
+ xe_sriov_vf_init_early(xe);
+
xe_assert(xe, !xe->sriov.wq);
xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
if (!xe->sriov.wq)
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
index 7d156ba82479..dd1df950b021 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
@@ -20,7 +20,7 @@
* is within a range of supported VF numbers (up to maximum number of VFs that
* driver can support, including VF0 that represents the PF itself).
*
- * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ * Note: Effective only on debug builds. See `Xe Asserts`_ for more information.
*/
#define xe_sriov_pf_assert_vfid(xe, vfid) \
xe_assert((xe), (vfid) <= xe_sriov_pf_get_totalvfs(xe))
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index c7b7ad4af5c8..ca94382a721e 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -9,6 +9,7 @@
#include <linux/build_bug.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#include <linux/workqueue_types.h>
/**
* VFID - Virtual Function Identifier
@@ -56,4 +57,20 @@ struct xe_device_pf {
struct mutex master_lock;
};
+/**
+ * struct xe_device_vf - Xe Virtual Function related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_VF mode.
+ */
+struct xe_device_vf {
+ /** @migration: VF Migration state data */
+ struct {
+ /** @migration.worker: VF migration recovery worker */
+ struct work_struct worker;
+ /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
+ unsigned long gt_flags;
+ } migration;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
new file mode 100644
index 000000000000..c1275e64aa9c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+#include "xe_sriov_vf.h"
+
+/**
+ * DOC: VF restore procedure in PF KMD and VF KMD
+ *
+ * Restoring previously saved state of a VF is one of core features of
+ * SR-IOV. All major VM Management applications allow saving and restoring
+ * the VM state, and doing that to a VM which uses SRIOV VF as one of
+ * the accessible devices requires support from KMD on both PF and VF side.
+ * VMM initiates all required operations through VFIO module, which then
+ * translates them into PF KMD calls. This description will focus on these
+ * calls, leaving out the module which initiates these steps (VFIO).
+ *
+ * In order to start the restore procedure, GuC needs to keep the VF in
+ * proper state. The PF driver can ensure GuC set it to VF_READY state
+ * by provisioning the VF, which in turn can be done after Function Level
+ * Reset of said VF (or after it was freshly created - in that case FLR
+ * is not needed). The FLR procedure ends with GuC sending message
+ * `GUC_PF_NOTIFY_VF_FLR_DONE`, and then provisioning data is sent to GuC.
+ * After the provisioning is completed, the VF needs to be paused, and
+ * at that point the actual restore can begin.
+ *
+ * During VF Restore, state of several resources is restored. These may
+ * include local memory content (system memory is restored by VMM itself),
+ * values of MMIO registers, stateless compression metadata and others.
+ * The final resource which also needs restoring is state of the VF
+ * submission maintained within GuC. For that, `GUC_PF_OPCODE_VF_RESTORE`
+ * message is used, with reference to the state blob to be consumed by
+ * GuC.
+ *
+ * Next, when VFIO is asked to set the VM into running state, the PF driver
+ * sends `GUC_PF_TRIGGER_VF_RESUME` to GuC. When sent after restore, this
+ * changes VF state within GuC to `VF_RESFIX_BLOCKED` rather than the
+ * usual `VF_RUNNING`. At this point GuC triggers an interrupt to inform
+ * the VF KMD within the VM that it was migrated.
+ *
+ * As soon as Virtual GPU of the VM starts, the VF driver within receives
+ * the MIGRATED interrupt and schedules post-migration recovery worker.
+ * That worker queries GuC for new provisioning (using MMIO communication),
+ * and applies fixups to any non-virtualized resources used by the VF.
+ *
+ * When the VF driver is ready to continue operation on the newly connected
+ * hardware, it sends `VF2GUC_NOTIFY_RESFIX_DONE` which causes it to
+ * enter the long awaited `VF_RUNNING` state, and therefore start handling
+ * CTB messages and scheduling workloads from the VF::
+ *
+ * PF GuC VF
+ * [ ] | |
+ * [ ] PF2GUC_VF_CONTROL(pause) | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_READY_PAUSED |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] success [ ] |
+ * [ ] <---------------------------[ ] |
+ * [ ] | |
+ * [ ] PF loads resources from the | |
+ * [ ]------- saved image supplied | |
+ * [ ] | | |
+ * [ ] <----- | |
+ * [ ] | |
+ * [ ] GUC_PF_OPCODE_VF_RESTORE | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC loads contexts and CTB |
+ * [ ] [ ]------- state from image |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_RESFIX_PAUSED |
+ * [ ] [ ] | |
+ * [ ] success [ ] <----- |
+ * [ ] <---------------------------[ ] |
+ * [ ] | |
+ * [ ] GUC_PF_TRIGGER_VF_RESUME | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_RESFIX_BLOCKED |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] [ ] |
+ * [ ] [ ] GUC_INTR_SW_INT_0 |
+ * [ ] success [ ]---------------------------> [ ]
+ * [ ] <---------------------------[ ] [ ]
+ * | | VF2GUC_QUERY_SINGLE_KLV [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] [ ]
+ * | [ ] new VF provisioning [ ]
+ * | [ ]---------------------------> [ ]
+ * | | [ ]
+ * | | VF driver applies post [ ]
+ * | | migration fixups -------[ ]
+ * | | | [ ]
+ * | | -----> [ ]
+ * | | [ ]
+ * | | VF2GUC_NOTIFY_RESFIX_DONE [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] [ ]
+ * | [ ] GuC sets new VF state to [ ]
+ * | [ ]------- VF_RUNNING [ ]
+ * | [ ] | [ ]
+ * | [ ] <----- [ ]
+ * | [ ] success [ ]
+ * | [ ]---------------------------> [ ]
+ * | | |
+ * | | |
+ */
+
+static void migration_worker_func(struct work_struct *w);
+
+/**
+ * xe_sriov_vf_init_early - Initialize SR-IOV VF specific data.
+ * @xe: the &xe_device to initialize
+ */
+void xe_sriov_vf_init_early(struct xe_device *xe)
+{
+ INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+}
+
+/**
+ * vf_post_migration_requery_guc - Re-query GuC for current VF provisioning.
+ * @xe: the &xe_device struct instance
+ *
+ * After migration, we need to re-query all VF configuration to make sure
+ * they match previous provisioning. Note that most of VF provisioning
+ * shall be the same, except GGTT range, since GGTT is not virtualized per-VF.
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+static int vf_post_migration_requery_guc(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err, ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_vf_query_config(gt);
+ ret = ret ?: err;
+ }
+
+ return ret;
+}
+
+/*
+ * vf_post_migration_imminent - Check if post-restore recovery is coming.
+ * @xe: the &xe_device struct instance
+ *
+ * Return: True if migration recovery worker will soon be running. Any worker currently
+ * executing does not affect the result.
+ */
+static bool vf_post_migration_imminent(struct xe_device *xe)
+{
+ return xe->sriov.vf.migration.gt_flags != 0 ||
+ work_pending(&xe->sriov.vf.migration.worker);
+}
+
+/*
+ * Notify all GuCs about resource fixups apply finished.
+ */
+static void vf_post_migration_notify_resfix_done(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, xe, id) {
+ if (vf_post_migration_imminent(xe))
+ goto skip;
+ xe_gt_sriov_vf_notify_resfix_done(gt);
+ }
+ return;
+
+skip:
+ drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n");
+}
+
+static void vf_post_migration_recovery(struct xe_device *xe)
+{
+ int err;
+
+ drm_dbg(&xe->drm, "migration recovery in progress\n");
+ xe_pm_runtime_get(xe);
+ err = vf_post_migration_requery_guc(xe);
+ if (vf_post_migration_imminent(xe))
+ goto defer;
+ if (unlikely(err))
+ goto fail;
+
+ /* FIXME: add the recovery steps */
+ vf_post_migration_notify_resfix_done(xe);
+ xe_pm_runtime_put(xe);
+ drm_notice(&xe->drm, "migration recovery ended\n");
+ return;
+defer:
+ xe_pm_runtime_put(xe);
+ drm_dbg(&xe->drm, "migration recovery deferred\n");
+ return;
+fail:
+ xe_pm_runtime_put(xe);
+ drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
+ xe_device_declare_wedged(xe);
+}
+
+static void migration_worker_func(struct work_struct *w)
+{
+ struct xe_device *xe = container_of(w, struct xe_device,
+ sriov.vf.migration.worker);
+
+ vf_post_migration_recovery(xe);
+}
+
+static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, xe, id) {
+ if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) {
+ xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * xe_sriov_vf_start_migration_recovery - Start VF migration recovery.
+ * @xe: the &xe_device to start recovery on
+ *
+ * This function shall be called only by VF.
+ */
+void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
+{
+ bool started;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ if (!vf_ready_to_recovery_on_all_gts(xe))
+ return;
+
+ WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0);
+ /* Ensure other threads see that no flags are set now. */
+ smp_mb();
+
+ started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
+ drm_info(&xe->drm, "VF migration recovery %s\n", started ?
+ "scheduled" : "already in progress");
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
new file mode 100644
index 000000000000..7b8622cff2b7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_H_
+#define _XE_SRIOV_VF_H_
+
+struct xe_device;
+
+void xe_sriov_vf_init_early(struct xe_device *xe);
+void xe_sriov_vf_start_migration_recovery(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 91130ad8999c..d5281de04d54 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -211,6 +211,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__string(dev, __dev_name_eq(job->q))
__field(u32, seqno)
__field(u32, lrc_seqno)
+ __field(u8, gt_id)
__field(u16, guc_id)
__field(u32, guc_state)
__field(u32, flags)
@@ -223,6 +224,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__assign_str(dev);
__entry->seqno = xe_sched_job_seqno(job);
__entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
+ __entry->gt_id = job->q->gt->info.id;
__entry->guc_id = job->q->guc->id;
__entry->guc_state =
atomic_read(&job->q->guc->state);
@@ -232,9 +234,9 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__entry->batch_addr = (u64)job->ptrs[0].batch_addr;
),
- TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
+ TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, gt=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
__get_str(dev), __entry->fence, __entry->seqno,
- __entry->lrc_seqno, __entry->guc_id,
+ __entry->lrc_seqno, __entry->gt_id, __entry->guc_id,
__entry->batch_addr, __entry->guc_state,
__entry->flags, __entry->error)
);
@@ -282,6 +284,7 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
__string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
__field(u32, opcode)
__field(u16, guc_id)
+ __field(u8, gt_id)
),
TP_fast_assign(
@@ -289,9 +292,11 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
__entry->opcode = msg->opcode;
__entry->guc_id =
((struct xe_exec_queue *)msg->private_data)->guc->id;
+ __entry->gt_id =
+ ((struct xe_exec_queue *)msg->private_data)->gt->info.id;
),
- TP_printk("dev=%s, guc_id=%d, opcode=%u", __get_str(dev), __entry->guc_id,
+ TP_printk("dev=%s, gt=%u guc_id=%d, opcode=%u", __get_str(dev), __entry->gt_id, __entry->guc_id,
__entry->opcode)
);
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index 30a3cfbaaa09..ea50fee50c7d 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -48,6 +48,11 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
+DEFINE_EVENT(xe_bo, xe_bo_validate,
+ TP_PROTO(struct xe_bo *bo),
+ TP_ARGS(bo)
+);
+
TRACE_EVENT(xe_bo_move,
TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
bool move_lacks_source),
@@ -55,8 +60,8 @@ TRACE_EVENT(xe_bo_move,
TP_STRUCT__entry(
__field(struct xe_bo *, bo)
__field(size_t, size)
- __field(u32, new_placement)
- __field(u32, old_placement)
+ __string(new_placement_name, xe_mem_type_to_name[new_placement])
+ __string(old_placement_name, xe_mem_type_to_name[old_placement])
__string(device_id, __dev_name_bo(bo))
__field(bool, move_lacks_source)
),
@@ -64,15 +69,15 @@ TRACE_EVENT(xe_bo_move,
TP_fast_assign(
__entry->bo = bo;
__entry->size = bo->size;
- __entry->new_placement = new_placement;
- __entry->old_placement = old_placement;
+ __assign_str(new_placement_name);
+ __assign_str(old_placement_name);
__assign_str(device_id);
__entry->move_lacks_source = move_lacks_source;
),
TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
__entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
- xe_mem_type_to_name[__entry->old_placement],
- xe_mem_type_to_name[__entry->new_placement], __get_str(device_id))
+ __get_str(old_placement_name),
+ __get_str(new_placement_name), __get_str(device_id))
);
DECLARE_EVENT_CLASS(xe_vma,
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.c b/drivers/gpu/drm/xe/xe_trace_lrc.c
new file mode 100644
index 000000000000..ab9b7e2970bc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "xe_trace_lrc.h"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.h b/drivers/gpu/drm/xe/xe_trace_lrc.h
new file mode 100644
index 000000000000..5c669a0b2180
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xe
+
+#if !defined(_XE_TRACE_LRC_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _XE_TRACE_LRC_H_
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+#include "xe_gt_types.h"
+#include "xe_lrc.h"
+#include "xe_lrc_types.h"
+
+#define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev)
+
+TRACE_EVENT(xe_lrc_update_timestamp,
+ TP_PROTO(struct xe_lrc *lrc, uint32_t old),
+ TP_ARGS(lrc, old),
+ TP_STRUCT__entry(
+ __field(struct xe_lrc *, lrc)
+ __field(u32, old)
+ __field(u32, new)
+ __string(name, lrc->fence_ctx.name)
+ __string(device_id, __dev_name_lrc(lrc))
+ ),
+
+ TP_fast_assign(
+ __entry->lrc = lrc;
+ __entry->old = old;
+ __entry->new = lrc->ctx_timestamp;
+ __assign_str(name);
+ __assign_str(device_id);
+ ),
+ TP_printk("lrc=:%p lrc->name=%s old=%u new=%u device_id:%s",
+ __entry->lrc, __get_str(name),
+ __entry->old, __entry->new,
+ __get_str(device_id))
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
+#define TRACE_INCLUDE_FILE xe_trace_lrc
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 423b261ea743..f4a16e5fa770 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_managed.h>
+#include <drm/drm_drv.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
@@ -52,7 +53,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
struct xe_ttm_vram_mgr_resource *vres;
struct drm_buddy *mm = &mgr->mm;
- u64 size, remaining_size, min_page_size;
+ u64 size, min_page_size;
unsigned long lpfn;
int err;
@@ -98,17 +99,6 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
goto error_fini;
}
- if (WARN_ON(min_page_size > SZ_2G)) { /* FIXME: sg limit */
- err = -EINVAL;
- goto error_fini;
- }
-
- if (WARN_ON((size > SZ_2G &&
- (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS)))) {
- err = -EINVAL;
- goto error_fini;
- }
-
if (WARN_ON(!IS_ALIGNED(size, min_page_size))) {
err = -EINVAL;
goto error_fini;
@@ -116,12 +106,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
mutex_lock(&mgr->lock);
if (lpfn <= mgr->visible_size >> PAGE_SHIFT && size > mgr->visible_avail) {
- mutex_unlock(&mgr->lock);
err = -ENOSPC;
- goto error_fini;
+ goto error_unlock;
}
- if (place->fpfn + (size >> PAGE_SHIFT) != place->lpfn &&
+ if (place->fpfn + (size >> PAGE_SHIFT) != lpfn &&
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
size = roundup_pow_of_two(size);
min_page_size = size;
@@ -129,25 +118,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn);
}
- remaining_size = size;
- do {
- /*
- * Limit maximum size to 2GiB due to SG table limitations.
- * FIXME: Should maybe be handled as part of sg construction.
- */
- u64 alloc_size = min_t(u64, remaining_size, SZ_2G);
-
- err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
- (u64)lpfn << PAGE_SHIFT,
- alloc_size,
- min_page_size,
- &vres->blocks,
- vres->flags);
- if (err)
- goto error_free_blocks;
-
- remaining_size -= alloc_size;
- } while (remaining_size);
+ err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
+ (u64)lpfn << PAGE_SHIFT, size,
+ min_page_size, &vres->blocks, vres->flags);
+ if (err)
+ goto error_unlock;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks))
@@ -194,9 +169,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
*res = &vres->base;
return 0;
-
-error_free_blocks:
- drm_buddy_free_list(mm, &vres->blocks, 0);
+error_unlock:
mutex_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &vres->base);
@@ -339,6 +312,13 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
struct ttm_resource_manager *man = &mgr->manager;
int err;
+ if (mem_type != XE_PL_STOLEN) {
+ const char *name = mem_type == XE_PL_VRAM0 ? "vram0" : "vram1";
+ man->cg = drmm_cgroup_register_region(&xe->drm, name, size);
+ if (IS_ERR(man->cg))
+ return PTR_ERR(man->cg);
+ }
+
man->func = &xe_ttm_vram_mgr_func;
mgr->mem_type = mem_type;
mutex_init(&mgr->lock);
@@ -393,7 +373,8 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
xe_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
- xe_res_next(&cursor, cursor.size);
+ /* Limit maximum size to 2GiB due to SG table limitations. */
+ xe_res_next(&cursor, min_t(u64, cursor.size, SZ_2G));
}
r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
@@ -413,7 +394,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
xe_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
- size_t size = cursor.size;
+ size_t size = min_t(u64, cursor.size, SZ_2G);
dma_addr_t addr;
addr = dma_map_resource(dev, phys, size, dir,
@@ -426,7 +407,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
- xe_res_next(&cursor, cursor.size);
+ xe_res_next(&cursor, size);
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index 0d8caa0e7354..ad3b35a0e6eb 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -92,7 +92,7 @@ struct xe_uc_fw {
const enum xe_uc_fw_status status;
/**
* @__status: private firmware load status - only to be used
- * by firmware laoding code
+ * by firmware loading code
*/
enum xe_uc_fw_status __status;
};
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index c99380271de6..690330352d4c 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -10,7 +10,6 @@
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <uapi/drm/xe_drm.h>
#include <linux/ascii85.h>
@@ -733,13 +732,14 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
vops->pt_update_ops[i].ops =
kmalloc_array(vops->pt_update_ops[i].num_ops,
sizeof(*vops->pt_update_ops[i].ops),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!vops->pt_update_ops[i].ops)
return array_of_binds ? -ENOBUFS : -ENOMEM;
}
return 0;
}
+ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
static void xe_vma_ops_fini(struct xe_vma_ops *vops)
{
@@ -1024,7 +1024,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
/*
* Since userptr pages are not pinned, we can't remove
- * the notifer until we're sure the GPU is not accessing
+ * the notifier until we're sure the GPU is not accessing
* them anymore
*/
mmu_interval_notifier_remove(&userptr->notifier);
@@ -1352,6 +1352,7 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
return 0;
}
+ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
static void xe_vm_free_scratch(struct xe_vm *vm)
{
@@ -1978,6 +1979,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
return ops;
}
+ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
u16 pat_index, unsigned int flags)
@@ -2105,7 +2107,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
}
}
- /* Adjust for partial unbind after removin VMA from VM */
+ /* Adjust for partial unbind after removing VMA from VM */
if (!err) {
op->base.remap.unmap->va->va.addr = op->remap.start;
op->base.remap.unmap->va->va.range = op->remap.range;
@@ -2357,13 +2359,15 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
bool validate)
{
struct xe_bo *bo = xe_vma_bo(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
int err = 0;
if (bo) {
if (!bo->vm)
err = drm_exec_lock_obj(exec, &bo->ttm.base);
if (!err && validate)
- err = xe_bo_validate(bo, xe_vma_vm(vma), true);
+ err = xe_bo_validate(bo, vm,
+ !xe_vm_in_preempt_fence_mode(vm));
}
return err;
@@ -2697,6 +2701,7 @@ unlock:
drm_exec_fini(&exec);
return err;
}
+ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
#define SUPPORTED_FLAGS_STUB \
(DRM_XE_VM_BIND_FLAG_READONLY | \
@@ -2733,7 +2738,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
*bind_ops = kvmalloc_array(args->num_binds,
sizeof(struct drm_xe_vm_bind_op),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!*bind_ops)
return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
@@ -2973,14 +2979,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (args->num_binds) {
bos = kvcalloc(args->num_binds, sizeof(*bos),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!bos) {
err = -ENOMEM;
goto release_vm_lock;
}
ops = kvcalloc(args->num_binds, sizeof(*ops),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!ops) {
err = -ENOMEM;
goto release_vm_lock;
@@ -3303,7 +3311,6 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
for (int i = 0; i < snap->num_snaps; i++) {
struct xe_bo *bo = snap->snap[i].bo;
- struct iosys_map src;
int err;
if (IS_ERR(snap->snap[i].data))
@@ -3316,16 +3323,8 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
}
if (bo) {
- xe_bo_lock(bo, false);
- err = ttm_bo_vmap(&bo->ttm, &src);
- if (!err) {
- xe_map_memcpy_from(xe_bo_device(bo),
- snap->snap[i].data,
- &src, snap->snap[i].bo_ofs,
- snap->snap[i].len);
- ttm_bo_vunmap(&bo->ttm, &src);
- }
- xe_bo_unlock(bo);
+ err = xe_bo_read(bo, snap->snap[i].bo_ofs,
+ snap->snap[i].data, snap->snap[i].len);
} else {
void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index c864dba35e1d..23adb7442881 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -17,7 +17,6 @@ struct drm_printer;
struct drm_file;
struct ttm_buffer_object;
-struct ttm_validate_buffer;
struct xe_exec_queue;
struct xe_file;
diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h
index 4d33f310b653..078786958403 100644
--- a/drivers/gpu/drm/xe/xe_vm_doc.h
+++ b/drivers/gpu/drm/xe/xe_vm_doc.h
@@ -64,8 +64,8 @@
* update page level 2 PDE[1] to page level 3b phys address (GPU)
*
* bind BO2 0x1ff000-0x201000
- * update page level 3a PTE[511] to BO2 phys addres (GPU)
- * update page level 3b PTE[0] to BO2 phys addres + 0x1000 (GPU)
+ * update page level 3a PTE[511] to BO2 phys address (GPU)
+ * update page level 3b PTE[0] to BO2 phys address + 0x1000 (GPU)
*
* GPU bypass
* ~~~~~~~~~~
@@ -192,7 +192,7 @@
*
* If a VM is in fault mode (TODO: link to fault mode), new bind operations that
* create mappings are by default deferred to the page fault handler (first
- * use). This behavior can be overriden by setting the flag
+ * use). This behavior can be overridden by setting the flag
* DRM_XE_VM_BIND_FLAG_IMMEDIATE which indicates to creating the mapping
* immediately.
*
@@ -209,7 +209,7 @@
*
* Since this a core kernel managed memory the kernel can move this memory
* whenever it wants. We register an invalidation MMU notifier to alert XE when
- * a user poiter is about to move. The invalidation notifier needs to block
+ * a user pointer is about to move. The invalidation notifier needs to block
* until all pending users (jobs or compute mode engines) of the userptr are
* idle to ensure no faults. This done by waiting on all of VM's dma-resv slots.
*
@@ -252,7 +252,7 @@
* Rebind worker
* -------------
*
- * The rebind worker is very similar to an exec. It is resposible for rebinding
+ * The rebind worker is very similar to an exec. It is responsible for rebinding
* evicted BOs or userptrs, waiting on those operations, installing new preempt
* fences, and finally resuming executing of engines in the VM.
*
@@ -317,11 +317,11 @@
* are not allowed, only long running workloads and ULLS are enabled on a faulting
* VM.
*
- * Defered VM binds
+ * Deferred VM binds
* ----------------
*
* By default, on a faulting VM binds just allocate the VMA and the actual
- * updating of the page tables is defered to the page fault handler. This
+ * updating of the page tables is deferred to the page fault handler. This
* behavior can be overridden by setting the flag DRM_XE_VM_BIND_FLAG_IMMEDIATE in
* the VM bind which will then do the bind immediately.
*
@@ -500,18 +500,18 @@
* Slot waiting
* ------------
*
- * 1. The exection of all jobs from kernel ops shall wait on all slots
+ * 1. The execution of all jobs from kernel ops shall wait on all slots
* (DMA_RESV_USAGE_PREEMPT_FENCE) of either an external BO or VM (depends on if
* kernel op is operating on external or private BO)
*
- * 2. In non-compute mode, the exection of all jobs from rebinds in execs shall
+ * 2. In non-compute mode, the execution of all jobs from rebinds in execs shall
* wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO or VM
* (depends on if the rebind is operatiing on an external or private BO)
*
- * 3. In non-compute mode, the exection of all jobs from execs shall wait on the
+ * 3. In non-compute mode, the execution of all jobs from execs shall wait on the
* last rebind job
*
- * 4. In compute mode, the exection of all jobs from rebinds in the rebind
+ * 4. In compute mode, the execution of all jobs from rebinds in the rebind
* worker shall wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO
* or VM (depends on if rebind is operating on external or private BO)
*
diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
new file mode 100644
index 000000000000..b378848d3b7b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright © 2024 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/intel_vsec.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_drv.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+#include "xe_pm.h"
+#include "xe_vsec.h"
+
+#include "regs/xe_pmt.h"
+
+/* PMT GUID value for BMG devices. NOTE: this is NOT a PCI id */
+#define BMG_DEVICE_ID 0xE2F8
+
+static struct intel_vsec_header bmg_telemetry = {
+ .length = 0x10,
+ .id = VSEC_ID_TELEMETRY,
+ .num_entries = 2,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET,
+};
+
+static struct intel_vsec_header bmg_punit_crashlog = {
+ .length = 0x10,
+ .id = VSEC_ID_CRASHLOG,
+ .num_entries = 1,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET + 0x60,
+};
+
+static struct intel_vsec_header bmg_oobmsm_crashlog = {
+ .length = 0x10,
+ .id = VSEC_ID_CRASHLOG,
+ .num_entries = 1,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET + 0x78,
+};
+
+static struct intel_vsec_header *bmg_capabilities[] = {
+ &bmg_telemetry,
+ &bmg_punit_crashlog,
+ &bmg_oobmsm_crashlog,
+ NULL
+};
+
+enum xe_vsec {
+ XE_VSEC_UNKNOWN = 0,
+ XE_VSEC_BMG,
+};
+
+static struct intel_vsec_platform_info xe_vsec_info[] = {
+ [XE_VSEC_BMG] = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_CRASHLOG,
+ .headers = bmg_capabilities,
+ },
+ { }
+};
+
+/*
+ * The GUID will have the following bits to decode:
+ * [0:3] - {Telemetry space iteration number (0,1,..)}
+ * [4:7] - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
+ * [8:11] - SOC_SKU
+ * [12:27] – Device ID – changes for each down bin SKU’s
+ * [28:29] - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
+ * [30:31] - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
+ */
+#define GUID_TELEM_ITERATION GENMASK(3, 0)
+#define GUID_SEGMENT GENMASK(7, 4)
+#define GUID_SOC_SKU GENMASK(11, 8)
+#define GUID_DEVICE_ID GENMASK(27, 12)
+#define GUID_CAP_TYPE GENMASK(29, 28)
+#define GUID_RECORD_ID GENMASK(31, 30)
+
+#define PUNIT_TELEMETRY_OFFSET 0x0200
+#define PUNIT_WATCHER_OFFSET 0x14A0
+#define OOBMSM_0_WATCHER_OFFSET 0x18D8
+#define OOBMSM_1_TELEMETRY_OFFSET 0x1000
+
+enum record_id {
+ PUNIT,
+ OOBMSM_0,
+ OOBMSM_1,
+};
+
+enum capability {
+ CRASHLOG,
+ TELEMETRY,
+ WATCHER,
+};
+
+static int xe_guid_decode(u32 guid, int *index, u32 *offset)
+{
+ u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
+ u32 cap_type = FIELD_GET(GUID_CAP_TYPE, guid);
+ u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
+
+ if (device_id != BMG_DEVICE_ID)
+ return -ENODEV;
+
+ if (cap_type > WATCHER)
+ return -EINVAL;
+
+ *offset = 0;
+
+ if (cap_type == CRASHLOG) {
+ *index = record_id == PUNIT ? 2 : 4;
+ return 0;
+ }
+
+ switch (record_id) {
+ case PUNIT:
+ *index = 0;
+ if (cap_type == TELEMETRY)
+ *offset = PUNIT_TELEMETRY_OFFSET;
+ else
+ *offset = PUNIT_WATCHER_OFFSET;
+ break;
+
+ case OOBMSM_0:
+ *index = 1;
+ if (cap_type == WATCHER)
+ *offset = OOBMSM_0_WATCHER_OFFSET;
+ break;
+
+ case OOBMSM_1:
+ *index = 1;
+ if (cap_type == TELEMETRY)
+ *offset = OOBMSM_1_TELEMETRY_OFFSET;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
+ u32 count)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
+ u32 mem_region;
+ u32 offset;
+ int ret;
+
+ ret = xe_guid_decode(guid, &mem_region, &offset);
+ if (ret)
+ return ret;
+
+ telem_addr += offset + user_offset;
+
+ guard(mutex)(&xe->pmt.lock);
+
+ /* indicate that we are not at an appropriate power level */
+ if (!xe_pm_runtime_get_if_active(xe))
+ return -ENODATA;
+
+ /* set SoC re-mapper index register based on GUID memory region */
+ xe_mmio_rmw32(xe_root_tile_mmio(xe), SG_REMAP_INDEX1, SG_REMAP_BITS,
+ REG_FIELD_PREP(SG_REMAP_BITS, mem_region));
+
+ memcpy_fromio(data, telem_addr, count);
+ xe_pm_runtime_put(xe);
+
+ return count;
+}
+
+static struct pmt_callbacks xe_pmt_cb = {
+ .read_telem = xe_pmt_telem_read,
+};
+
+static const int vsec_platforms[] = {
+ [XE_BATTLEMAGE] = XE_VSEC_BMG,
+};
+
+static enum xe_vsec get_platform_info(struct xe_device *xe)
+{
+ if (xe->info.platform > XE_BATTLEMAGE)
+ return XE_VSEC_UNKNOWN;
+
+ return vsec_platforms[xe->info.platform];
+}
+
+/**
+ * xe_vsec_init - Initialize resources and add intel_vsec auxiliary
+ * interface
+ * @xe: valid xe instance
+ */
+void xe_vsec_init(struct xe_device *xe)
+{
+ struct intel_vsec_platform_info *info;
+ struct device *dev = xe->drm.dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ enum xe_vsec platform;
+
+ platform = get_platform_info(xe);
+ if (platform == XE_VSEC_UNKNOWN)
+ return;
+
+ info = &xe_vsec_info[platform];
+ if (!info->headers)
+ return;
+
+ switch (platform) {
+ case XE_VSEC_BMG:
+ info->priv_data = &xe_pmt_cb;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Register a VSEC. Cleanup is handled using device managed
+ * resources.
+ */
+ intel_vsec_register(pdev, info);
+}
+MODULE_IMPORT_NS("INTEL_VSEC");
diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
new file mode 100644
index 000000000000..5777c53faec2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef _XE_VSEC_H_
+#define _XE_VSEC_H_
+
+struct xe_device;
+
+void xe_vsec_init(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 02cf647f86d8..570fe0376402 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -607,6 +607,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH))
},
+ { XE_RTP_NAME("16024792527"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(FIELD_SET(SAMPLER_MODE, SMP_WAIT_FETCH_MERGING_COUNTER,
+ SMP_FORCE_128B_OVERFETCH))
+ },
{}
};
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index bcd04464b85e..40438c3d9b72 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -1,3 +1,4 @@
+1607983814 GRAPHICS_VERSION_RANGE(1200, 1210)
22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
PLATFORM(DG2)
@@ -33,7 +34,7 @@
GRAPHICS_VERSION(2004)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
- MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001)
14019789679 GRAPHICS_VERSION(1255)
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index aab79c5e34c2..1bda7ef606cc 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -478,7 +478,6 @@ static const struct drm_driver xen_drm_driver = {
.fops = &xen_drm_dev_fops,
.name = "xendrm-du",
.desc = "Xen PV DRM Display Unit",
- .date = "20180221",
.major = 1,
.minor = 0,
@@ -525,11 +524,6 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
if (ret)
goto fail_register;
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- xen_drm_driver.name, xen_drm_driver.major,
- xen_drm_driver.minor, xen_drm_driver.patchlevel,
- xen_drm_driver.date, drm_dev->primary->index);
-
return 0;
fail_register:
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index 4197f44e202f..dbecca9bdd54 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -17,3 +17,12 @@ config DRM_ZYNQMP_DPSUB
This is a DRM/KMS driver for ZynqMP DisplayPort controller. Choose
this option if you have a Xilinx ZynqMP SoC with DisplayPort
subsystem.
+
+config DRM_ZYNQMP_DPSUB_AUDIO
+ bool "ZynqMP DisplayPort Audio Support"
+ depends on DRM_ZYNQMP_DPSUB
+ depends on SND && SND_SOC
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Choose this option to enable DisplayPort audio support in the ZynqMP
+ DisplayPort driver.
diff --git a/drivers/gpu/drm/xlnx/Makefile b/drivers/gpu/drm/xlnx/Makefile
index ea1422a39502..ab6e2ffd7e8d 100644
--- a/drivers/gpu/drm/xlnx/Makefile
+++ b/drivers/gpu/drm/xlnx/Makefile
@@ -1,2 +1,3 @@
zynqmp-dpsub-y := zynqmp_disp.o zynqmp_dpsub.o zynqmp_dp.o zynqmp_kms.o
+zynqmp-dpsub-$(CONFIG_DRM_ZYNQMP_DPSUB_AUDIO) += zynqmp_dp_audio.o
obj-$(CONFIG_DRM_ZYNQMP_DPSUB) += zynqmp-dpsub.o
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index e4e0e299e8a7..80d1e499a18d 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -143,7 +143,6 @@ struct zynqmp_disp_layer {
* @dpsub: Display subsystem
* @blend: Register I/O base address for the blender
* @avbuf: Register I/O base address for the audio/video buffer manager
- * @audio: Registers I/O base address for the audio mixer
* @layers: Layers (planes)
*/
struct zynqmp_disp {
@@ -152,7 +151,6 @@ struct zynqmp_disp {
void __iomem *blend;
void __iomem *avbuf;
- void __iomem *audio;
struct zynqmp_disp_layer layers[ZYNQMP_DPSUB_NUM_LAYERS];
};
@@ -866,42 +864,6 @@ static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp *disp,
}
/* -----------------------------------------------------------------------------
- * Audio Mixer
- */
-
-static void zynqmp_disp_audio_write(struct zynqmp_disp *disp, int reg, u32 val)
-{
- writel(val, disp->audio + reg);
-}
-
-/**
- * zynqmp_disp_audio_enable - Enable the audio mixer
- * @disp: Display controller
- *
- * Enable the audio mixer by de-asserting the soft reset. The audio state is set to
- * default values by the reset, set the default mixer volume explicitly.
- */
-static void zynqmp_disp_audio_enable(struct zynqmp_disp *disp)
-{
- /* Clear the audio soft reset register as it's an non-reset flop. */
- zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
- zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_MIXER_VOLUME,
- ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE);
-}
-
-/**
- * zynqmp_disp_audio_disable - Disable the audio mixer
- * @disp: Display controller
- *
- * Disable the audio mixer by asserting its soft reset.
- */
-static void zynqmp_disp_audio_disable(struct zynqmp_disp *disp)
-{
- zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET,
- ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
-}
-
-/* -----------------------------------------------------------------------------
* ZynqMP Display Layer & DRM Plane
*/
@@ -1341,8 +1303,6 @@ void zynqmp_disp_enable(struct zynqmp_disp *disp)
disp->dpsub->vid_clk_from_ps);
zynqmp_disp_avbuf_enable_channels(disp);
zynqmp_disp_avbuf_enable_audio(disp);
-
- zynqmp_disp_audio_enable(disp);
}
/**
@@ -1351,8 +1311,6 @@ void zynqmp_disp_enable(struct zynqmp_disp *disp)
*/
void zynqmp_disp_disable(struct zynqmp_disp *disp)
{
- zynqmp_disp_audio_disable(disp);
-
zynqmp_disp_avbuf_disable_audio(disp);
zynqmp_disp_avbuf_disable_channels(disp);
zynqmp_disp_avbuf_disable(disp);
@@ -1421,12 +1379,6 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
goto error;
}
- disp->audio = devm_platform_ioremap_resource_byname(pdev, "aud");
- if (IS_ERR(disp->audio)) {
- ret = PTR_ERR(disp->audio);
- goto error;
- }
-
ret = zynqmp_disp_create_layers(disp);
if (ret)
goto error;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
index fa3935384834..9a4ff094e276 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
@@ -177,12 +177,7 @@
#define ZYNQMP_DISP_AUD_MIXER_VOLUME 0x0
#define ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
#define ZYNQMP_DISP_AUD_MIXER_META_DATA 0x4
-#define ZYNQMP_DISP_AUD_CH_STATUS0 0x8
-#define ZYNQMP_DISP_AUD_CH_STATUS1 0xc
-#define ZYNQMP_DISP_AUD_CH_STATUS2 0x10
-#define ZYNQMP_DISP_AUD_CH_STATUS3 0x14
-#define ZYNQMP_DISP_AUD_CH_STATUS4 0x18
-#define ZYNQMP_DISP_AUD_CH_STATUS5 0x1c
+#define ZYNQMP_DISP_AUD_CH_STATUS(x) (0x8 + ((x) * 4))
#define ZYNQMP_DISP_AUD_CH_A_DATA0 0x20
#define ZYNQMP_DISP_AUD_CH_A_DATA1 0x24
#define ZYNQMP_DISP_AUD_CH_A_DATA2 0x28
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 25c5dc61ee88..0b63fd48ea92 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1342,7 +1342,6 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
{
u8 lane_cnt = dp->mode.lane_cnt;
u32 reg, wpl;
- unsigned int rate;
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_HTOTAL, mode->htotal);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_VTOTAL, mode->vtotal);
@@ -1367,18 +1366,8 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_N_VID, reg);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_M_VID, mode->clock);
- rate = zynqmp_dpsub_get_audio_clk_rate(dp->dpsub);
- if (rate) {
- dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_N_AUD, reg);
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_M_AUD, rate / 1000);
- }
}
- /* Only 2 channel audio is supported now */
- if (zynqmp_dpsub_audio_enabled(dp->dpsub))
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CHANNELS, 1);
-
zynqmp_dp_write(dp, ZYNQMP_DP_USER_PIX_WIDTH, 1);
/* Translate to the native 16 bit datapath based on IP core spec */
@@ -1388,6 +1377,44 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
}
/* -----------------------------------------------------------------------------
+ * Audio
+ */
+
+void zynqmp_dp_audio_set_channels(struct zynqmp_dp *dp,
+ unsigned int num_channels)
+{
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CHANNELS, num_channels - 1);
+}
+
+void zynqmp_dp_audio_enable(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+}
+
+void zynqmp_dp_audio_disable(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
+}
+
+void zynqmp_dp_audio_write_n_m(struct zynqmp_dp *dp)
+{
+ unsigned int rate;
+ u32 link_rate;
+
+ if (!(dp->config.misc0 & ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK))
+ return;
+
+ link_rate = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+
+ rate = clk_get_rate(dp->dpsub->aud_clk);
+
+ dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
+
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_N_AUD, link_rate);
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_M_AUD, rate / 1000);
+}
+
+/* -----------------------------------------------------------------------------
* DISP Configuration
*/
@@ -1577,8 +1604,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
/* Enable the encoder */
dp->enabled = true;
zynqmp_dp_update_misc(dp);
- if (zynqmp_dpsub_audio_enabled(dp->dpsub))
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN, 0);
if (dp->status == connector_status_connected) {
for (i = 0; i < 3; i++) {
@@ -1613,8 +1639,6 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge,
drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
- if (zynqmp_dpsub_audio_enabled(dp->dpsub))
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
zynqmp_dp_disp_disable(dp, old_bridge_state);
mutex_unlock(&dp->lock);
@@ -2190,7 +2214,7 @@ static int zynqmp_dp_rate_get(void *data, u64 *val)
struct zynqmp_dp *dp = data;
mutex_lock(&dp->lock);
- *val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000;
+ *val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000ULL;
mutex_unlock(&dp->lock);
return 0;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.h b/drivers/gpu/drm/xlnx/zynqmp_dp.h
index f077d7fbd0ad..a3257793e23a 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.h
@@ -22,4 +22,11 @@ void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp);
int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub);
void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub);
+void zynqmp_dp_audio_set_channels(struct zynqmp_dp *dp,
+ unsigned int num_channels);
+void zynqmp_dp_audio_enable(struct zynqmp_dp *dp);
+void zynqmp_dp_audio_disable(struct zynqmp_dp *dp);
+
+void zynqmp_dp_audio_write_n_m(struct zynqmp_dp *dp);
+
#endif /* _ZYNQMP_DP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
new file mode 100644
index 000000000000..fa5f0ace6084
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Subsystem Driver - Audio support
+ *
+ * Copyright (C) 2015 - 2024 Xilinx, Inc.
+ *
+ * Authors:
+ * - Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ * - Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+#include <sound/asoundef.h>
+#include <sound/core.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "zynqmp_disp_regs.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+#define ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK 512
+#define ZYNQMP_NUM_PCMS 2
+
+struct zynqmp_dpsub_audio {
+ void __iomem *base;
+
+ struct snd_soc_card card;
+
+ const char *dai_name;
+ const char *link_names[ZYNQMP_NUM_PCMS];
+ const char *pcm_names[ZYNQMP_NUM_PCMS];
+
+ struct snd_soc_dai_driver dai_driver;
+ struct snd_dmaengine_pcm_config pcm_configs[2];
+
+ struct snd_soc_dai_link links[ZYNQMP_NUM_PCMS];
+
+ struct {
+ struct snd_soc_dai_link_component cpu;
+ struct snd_soc_dai_link_component codec;
+ struct snd_soc_dai_link_component platform;
+ } components[ZYNQMP_NUM_PCMS];
+
+ /*
+ * Protects:
+ * - enabled_streams
+ * - volumes
+ * - current_rate
+ */
+ struct mutex enable_lock;
+
+ u32 enabled_streams;
+ u32 current_rate;
+
+ u16 volumes[2];
+};
+
+static const struct snd_pcm_hardware zynqmp_dp_pcm_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ .buffer_bytes_max = 128 * 1024,
+ .period_bytes_min = 256,
+ .period_bytes_max = 1024 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+};
+
+static int zynqmp_dp_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ 256);
+
+ return 0;
+}
+
+static const struct snd_soc_ops zynqmp_dp_ops = {
+ .startup = zynqmp_dp_startup,
+};
+
+static void zynqmp_dp_audio_write(struct zynqmp_dpsub_audio *audio, int reg,
+ u32 val)
+{
+ writel(val, audio->base + reg);
+}
+
+static int dp_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *socdai)
+{
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct zynqmp_dpsub *dpsub =
+ snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0));
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+ int ret;
+ u32 sample_rate;
+ struct snd_aes_iec958 iec = { 0 };
+ unsigned long rate;
+
+ sample_rate = params_rate(params);
+
+ if (sample_rate != 48000 && sample_rate != 44100)
+ return -EINVAL;
+
+ guard(mutex)(&audio->enable_lock);
+
+ if (audio->enabled_streams && audio->current_rate != sample_rate) {
+ dev_err(dpsub->dev,
+ "Can't change rate while playback enabled\n");
+ return -EINVAL;
+ }
+
+ if (audio->enabled_streams > 0) {
+ /* Nothing to do */
+ audio->enabled_streams++;
+ return 0;
+ }
+
+ audio->current_rate = sample_rate;
+
+ /* Note: clock rate can only be changed if the clock is disabled */
+ ret = clk_set_rate(dpsub->aud_clk,
+ sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK);
+ if (ret) {
+ dev_err(dpsub->dev, "can't set aud_clk to %u err:%d\n",
+ sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK, ret);
+ return ret;
+ }
+
+ clk_prepare_enable(dpsub->aud_clk);
+
+ rate = clk_get_rate(dpsub->aud_clk);
+
+ /* Ignore some offset +- 10 */
+ if (abs(sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK - rate) > 10) {
+ dev_err(dpsub->dev, "aud_clk offset is higher: %ld\n",
+ sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK - rate);
+ clk_disable_unprepare(dpsub->aud_clk);
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(dpsub->dev);
+
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ audio->volumes[0] | (audio->volumes[1] << 16));
+
+ /* Clear the audio soft reset register as it's an non-reset flop. */
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
+
+ /* Only 2 channel audio is supported now */
+ zynqmp_dp_audio_set_channels(dpsub->dp, 2);
+
+ zynqmp_dp_audio_write_n_m(dpsub->dp);
+
+ /* Channel status */
+
+ if (sample_rate == 48000)
+ iec.status[3] = IEC958_AES3_CON_FS_48000;
+ else
+ iec.status[3] = IEC958_AES3_CON_FS_44100;
+
+ for (unsigned int i = 0; i < AES_IEC958_STATUS_SIZE / 4; ++i) {
+ u32 v;
+
+ v = (iec.status[(i * 4) + 0] << 0) |
+ (iec.status[(i * 4) + 1] << 8) |
+ (iec.status[(i * 4) + 2] << 16) |
+ (iec.status[(i * 4) + 3] << 24);
+
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_CH_STATUS(i), v);
+ }
+
+ zynqmp_dp_audio_enable(dpsub->dp);
+
+ audio->enabled_streams++;
+
+ return 0;
+}
+
+static int dp_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *socdai)
+{
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct zynqmp_dpsub *dpsub =
+ snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0));
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ guard(mutex)(&audio->enable_lock);
+
+ /* Nothing to do */
+ if (audio->enabled_streams > 1) {
+ audio->enabled_streams--;
+ return 0;
+ }
+
+ pm_runtime_put(dpsub->dev);
+
+ zynqmp_dp_audio_disable(dpsub->dp);
+
+ /*
+ * Reset doesn't work. If we assert reset between audio stop and start,
+ * the audio won't start anymore. Probably we are missing writing
+ * some audio related registers. A/B buf?
+ */
+ /*
+ zynqmp_disp_audio_write(audio, ZYNQMP_DISP_AUD_SOFT_RESET,
+ ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
+ */
+
+ clk_disable_unprepare(dpsub->aud_clk);
+
+ audio->current_rate = 0;
+ audio->enabled_streams--;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops zynqmp_dp_dai_ops = {
+ .hw_params = dp_dai_hw_params,
+ .hw_free = dp_dai_hw_free,
+};
+
+/*
+ * Min = 10 * log10(0x1 / 0x2000) = -39.13
+ * Max = 10 * log10(0xffffff / 0x2000) = 9.03
+ */
+static const DECLARE_TLV_DB_RANGE(zynqmp_dp_tlv,
+ 0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, -3913, 1),
+ 0x1, 0x2000, TLV_DB_LINEAR_ITEM(-3913, 0),
+ 0x2000, 0xffff, TLV_DB_LINEAR_ITEM(0, 903),
+);
+
+static const struct snd_kcontrol_new zynqmp_dp_snd_controls[] = {
+ SOC_SINGLE_TLV("Input0 Playback Volume", 0,
+ 0, 0xffff, 0, zynqmp_dp_tlv),
+ SOC_SINGLE_TLV("Input1 Playback Volume", 1,
+ 0, 0xffff, 0, zynqmp_dp_tlv),
+};
+
+/*
+ * Note: these read & write functions only support two "registers", 0 and 1,
+ * for volume 0 and 1. In other words, these are not real register read/write
+ * functions.
+ *
+ * This is done to support caching the volume value for the case where the
+ * hardware is not enabled, and also to support locking as volumes 0 and 1
+ * are in the same register.
+ */
+static unsigned int zynqmp_dp_dai_read(struct snd_soc_component *component,
+ unsigned int reg)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(component->dev);
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ return audio->volumes[reg];
+}
+
+static int zynqmp_dp_dai_write(struct snd_soc_component *component,
+ unsigned int reg, unsigned int val)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(component->dev);
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ guard(mutex)(&audio->enable_lock);
+
+ audio->volumes[reg] = val;
+
+ if (audio->enabled_streams)
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ audio->volumes[0] |
+ (audio->volumes[1] << 16));
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver zynqmp_dp_component_driver = {
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .controls = zynqmp_dp_snd_controls,
+ .num_controls = ARRAY_SIZE(zynqmp_dp_snd_controls),
+ .read = zynqmp_dp_dai_read,
+ .write = zynqmp_dp_dai_write,
+};
+
+int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
+{
+ struct platform_device *pdev = to_platform_device(dpsub->dev);
+ struct device *dev = dpsub->dev;
+ struct zynqmp_dpsub_audio *audio;
+ struct snd_soc_card *card;
+ void *dev_data;
+ int ret;
+
+ if (!dpsub->aud_clk)
+ return 0;
+
+ audio = devm_kzalloc(dev, sizeof(*audio), GFP_KERNEL);
+ if (!audio)
+ return -ENOMEM;
+
+ dpsub->audio = audio;
+
+ mutex_init(&audio->enable_lock);
+
+ /* 0x2000 is the zero level, no change */
+ audio->volumes[0] = 0x2000;
+ audio->volumes[1] = 0x2000;
+
+ audio->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-dai", dev_name(dev));
+
+ for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
+ audio->link_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-dp-%u", dev_name(dev), i);
+ audio->pcm_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-pcm-%u", dev_name(dev), i);
+ }
+
+ audio->base = devm_platform_ioremap_resource_byname(pdev, "aud");
+ if (IS_ERR(audio->base))
+ return PTR_ERR(audio->base);
+
+ /* Create CPU DAI */
+
+ audio->dai_driver = (struct snd_soc_dai_driver) {
+ .name = audio->dai_name,
+ .ops = &zynqmp_dp_dai_ops,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ };
+
+ ret = devm_snd_soc_register_component(dev, &zynqmp_dp_component_driver,
+ &audio->dai_driver, 1);
+ if (ret) {
+ dev_err(dev, "Failed to register CPU DAI\n");
+ return ret;
+ }
+
+ /* Create PCMs */
+
+ for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
+ struct snd_dmaengine_pcm_config *pcm_config =
+ &audio->pcm_configs[i];
+
+ *pcm_config = (struct snd_dmaengine_pcm_config){
+ .name = audio->pcm_names[i],
+ .pcm_hardware = &zynqmp_dp_pcm_hw,
+ .prealloc_buffer_size = 64 * 1024,
+ .chan_names[SNDRV_PCM_STREAM_PLAYBACK] =
+ i == 0 ? "aud0" : "aud1",
+ };
+
+ ret = devm_snd_dmaengine_pcm_register(dev, pcm_config, 0);
+ if (ret) {
+ dev_err(dev, "Failed to register PCM %u\n", i);
+ return ret;
+ }
+ }
+
+ /* Create card */
+
+ card = &audio->card;
+ card->name = "DisplayPort";
+ card->long_name = "DisplayPort Monitor";
+ card->driver_name = "zynqmp_dpsub";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->num_links = ZYNQMP_NUM_PCMS;
+ card->dai_link = audio->links;
+
+ for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
+ struct snd_soc_dai_link *link = &card->dai_link[i];
+
+ link->ops = &zynqmp_dp_ops;
+
+ link->name = audio->link_names[i];
+ link->stream_name = audio->link_names[i];
+
+ link->cpus = &audio->components[i].cpu;
+ link->num_cpus = 1;
+ link->cpus[0].dai_name = audio->dai_name;
+
+ link->codecs = &audio->components[i].codec;
+ link->num_codecs = 1;
+ link->codecs[0].name = "snd-soc-dummy";
+ link->codecs[0].dai_name = "snd-soc-dummy-dai";
+
+ link->platforms = &audio->components[i].platform;
+ link->num_platforms = 1;
+ link->platforms[0].name = audio->pcm_names[i];
+ }
+
+ /*
+ * HACK: devm_snd_soc_register_card() overwrites current drvdata
+ * so we need to hack it back.
+ */
+ dev_data = dev_get_drvdata(dev);
+ ret = devm_snd_soc_register_card(dev, card);
+ dev_set_drvdata(dev, dev_data);
+ if (ret) {
+ /*
+ * As older dtbs may not have the audio channel dmas defined,
+ * instead of returning an error here we'll continue and just
+ * mark the audio as disabled.
+ */
+ dev_err(dev, "Failed to register sound card, disabling audio support\n");
+
+ devm_kfree(dev, audio);
+ dpsub->audio = NULL;
+
+ return 0;
+ }
+
+ return 0;
+}
+
+void zynqmp_audio_uninit(struct zynqmp_dpsub *dpsub)
+{
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ if (!audio)
+ return;
+
+ if (!dpsub->aud_clk)
+ return;
+
+ mutex_destroy(&audio->enable_lock);
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 07c4d184e7a1..f953ca48a930 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -57,36 +57,6 @@ static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
};
/* -----------------------------------------------------------------------------
- * DPSUB Configuration
- */
-
-/**
- * zynqmp_dpsub_audio_enabled - If the audio is enabled
- * @dpsub: DisplayPort subsystem
- *
- * Return if the audio is enabled depending on the audio clock.
- *
- * Return: true if audio is enabled, or false.
- */
-bool zynqmp_dpsub_audio_enabled(struct zynqmp_dpsub *dpsub)
-{
- return !!dpsub->aud_clk;
-}
-
-/**
- * zynqmp_dpsub_get_audio_clk_rate - Get the current audio clock rate
- * @dpsub: DisplayPort subsystem
- *
- * Return: the current audio clock rate.
- */
-unsigned int zynqmp_dpsub_get_audio_clk_rate(struct zynqmp_dpsub *dpsub)
-{
- if (zynqmp_dpsub_audio_enabled(dpsub))
- return 0;
- return clk_get_rate(dpsub->aud_clk);
-}
-
-/* -----------------------------------------------------------------------------
* Probe & Remove
*/
@@ -264,10 +234,17 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
goto err_disp;
}
+ ret = zynqmp_audio_init(dpsub);
+ if (ret)
+ goto err_drm_cleanup;
+
dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
return 0;
+err_drm_cleanup:
+ if (dpsub->drm)
+ zynqmp_dpsub_drm_cleanup(dpsub);
err_disp:
drm_bridge_remove(dpsub->bridge);
zynqmp_disp_remove(dpsub);
@@ -287,6 +264,8 @@ static void zynqmp_dpsub_remove(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ zynqmp_audio_uninit(dpsub);
+
if (dpsub->drm)
zynqmp_dpsub_drm_cleanup(dpsub);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
index b18554467e9c..49875529c2a4 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
@@ -12,6 +12,8 @@
#ifndef _ZYNQMP_DPSUB_H_
#define _ZYNQMP_DPSUB_H_
+#include <linux/types.h>
+
struct clk;
struct device;
struct drm_bridge;
@@ -39,6 +41,8 @@ enum zynqmp_dpsub_format {
ZYNQMP_DPSUB_FORMAT_YONLY,
};
+struct zynqmp_dpsub_audio;
+
/**
* struct zynqmp_dpsub - ZynqMP DisplayPort Subsystem
* @dev: The physical device
@@ -77,10 +81,17 @@ struct zynqmp_dpsub {
struct zynqmp_dp *dp;
unsigned int dma_align;
+
+ struct zynqmp_dpsub_audio *audio;
};
-bool zynqmp_dpsub_audio_enabled(struct zynqmp_dpsub *dpsub);
-unsigned int zynqmp_dpsub_get_audio_clk_rate(struct zynqmp_dpsub *dpsub);
+#ifdef CONFIG_DRM_ZYNQMP_DPSUB_AUDIO
+int zynqmp_audio_init(struct zynqmp_dpsub *dpsub);
+void zynqmp_audio_uninit(struct zynqmp_dpsub *dpsub);
+#else
+static inline int zynqmp_audio_init(struct zynqmp_dpsub *dpsub) { return 0; }
+static inline void zynqmp_audio_uninit(struct zynqmp_dpsub *dpsub) { }
+#endif
void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index fc81983d9e5e..b47463473472 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -9,12 +9,12 @@
* - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -409,7 +409,6 @@ static const struct drm_driver zynqmp_dpsub_drm_driver = {
.name = "zynqmp-dpsub",
.desc = "Xilinx DisplayPort Subsystem Driver",
- .date = "20130509",
.major = 1,
.minor = 0,
};
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4d2a89d65b65..b53eb569bd49 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -787,7 +787,7 @@ config HID_NINTENDO
Adds support for the Nintendo Switch Joy-Cons, NSO, Pro Controller.
All controllers support bluetooth, and the Pro Controller also supports
its USB mode. This also includes support for the Nintendo Switch Online
- Controllers which include the Genesis, SNES, and N64 controllers.
+ Controllers which include the NES, Genesis, SNES, and N64 controllers.
To compile this driver as a module, choose M here: the
module will be called hid-nintendo.
@@ -1386,4 +1386,6 @@ source "drivers/hid/amd-sfh-hid/Kconfig"
source "drivers/hid/surface-hid/Kconfig"
+source "drivers/hid/intel-thc-hid/Kconfig"
+
endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 24de45f3677d..482b096eea28 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -171,3 +171,5 @@ obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
+
+obj-$(CONFIG_INTEL_THC_HID) += intel-thc-hid/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index e5620d7db569..799b8686a88a 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -43,6 +43,7 @@ struct amd_mp2_sensor_info {
struct sfh_dev_status {
bool is_hpd_present;
bool is_als_present;
+ bool is_sra_present;
};
struct amd_mp2_dev {
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index db36d87d5634..03c028f1aab4 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -30,6 +30,7 @@ static int amd_sfh_get_sensor_num(struct amd_mp2_dev *mp2, u8 *sensor_id)
case ACCEL_IDX:
case GYRO_IDX:
case MAG_IDX:
+ case SRA_IDX:
case ALS_IDX:
case HPD_IDX:
if (BIT(i) & slist->sl.sensors)
@@ -58,6 +59,8 @@ static const char *get_sensor_name(int idx)
return "gyroscope";
case MAG_IDX:
return "magnetometer";
+ case SRA_IDX:
+ return "SRA";
case ALS_IDX:
return "ALS";
case HPD_IDX:
@@ -130,6 +133,23 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_sts[i] = SENSOR_DISABLED;
+
+ if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX)
+ break;
+
+ if (cl_data->sensor_idx[i] == SRA_IDX) {
+ info.sensor_idx = cl_data->sensor_idx[i];
+ writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
+ mp2_ops->start(privdata, info);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
+
+ cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ privdata->dev_en.is_sra_present = true;
+ continue;
+ }
+
cl_data->sensor_requested_cnt[i] = 0;
cl_data->cur_hid_dev = i;
cl_idx = cl_data->sensor_idx[i];
@@ -181,6 +201,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] == SRA_IDX)
+ continue;
cl_data->cur_hid_dev = i;
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
cl_data->is_any_sensor_enabled = true;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index 4676f060da26..ffb98b4c36cb 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -87,6 +87,41 @@ void sfh_interface_init(struct amd_mp2_dev *mp2)
emp2 = mp2;
}
+static int amd_sfh_mode_info(u32 *platform_type, u32 *laptop_placement)
+{
+ struct sfh_op_mode mode;
+
+ if (!platform_type || !laptop_placement)
+ return -EINVAL;
+
+ if (!emp2 || !emp2->dev_en.is_sra_present)
+ return -ENODEV;
+
+ mode.val = readl(emp2->mmio + amd_get_c2p_val(emp2, 3));
+
+ *platform_type = mode.op_mode.devicemode;
+
+ if (mode.op_mode.ontablestate == 1) {
+ *laptop_placement = ON_TABLE;
+ } else if (mode.op_mode.ontablestate == 2) {
+ *laptop_placement = ON_LAP_MOTION;
+ } else if (mode.op_mode.inbagstate == 1) {
+ *laptop_placement = IN_BAG;
+ } else if (mode.op_mode.outbagstate == 1) {
+ *laptop_placement = OUT_OF_BAG;
+ } else if (mode.op_mode.ontablestate == 0 || mode.op_mode.inbagstate == 0 ||
+ mode.op_mode.outbagstate == 0) {
+ *laptop_placement = LP_UNKNOWN;
+ pr_warn_once("Unknown laptop placement\n");
+ } else if (mode.op_mode.ontablestate == 3 || mode.op_mode.inbagstate == 3 ||
+ mode.op_mode.outbagstate == 3) {
+ *laptop_placement = LP_UNDEFINED;
+ pr_warn_once("Undefined laptop placement\n");
+ }
+
+ return 0;
+}
+
static int amd_sfh_hpd_info(u8 *user_present)
{
struct hpd_status hpdstatus;
@@ -131,6 +166,9 @@ int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op)
return amd_sfh_hpd_info(&sfh_info->user_present);
case MT_ALS:
return amd_sfh_als_info(&sfh_info->ambient_light);
+ case MT_SRA:
+ return amd_sfh_mode_info(&sfh_info->platform_type,
+ &sfh_info->laptop_placement);
}
}
return -EINVAL;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
index 2c211d28764d..665c99ad779f 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
@@ -22,8 +22,9 @@ enum sensor_index {
ACCEL_IDX,
GYRO_IDX,
MAG_IDX,
- ALS_IDX = 4,
- HPD_IDX = 5,
+ SRA_IDX,
+ ALS_IDX,
+ HPD_IDX,
MAX_IDX = 15,
};
@@ -164,6 +165,25 @@ struct hpd_status {
};
};
+struct sfh_op_mode {
+ union {
+ u32 val;
+ struct {
+ u32 mode : 3;
+ u32 lidstatus : 1;
+ u32 angle : 10;
+ u32 inbagstatedbg : 2;
+ u32 ontablestate : 2;
+ u32 inbagstate : 2;
+ u32 outbagstate : 2;
+ u32 inbagmlcstate : 1;
+ u32 powerstate : 2;
+ u32 data : 3;
+ u32 devicemode : 4;
+ } op_mode;
+ };
+};
+
void sfh_interface_init(struct amd_mp2_dev *mp2);
void sfh_deinit_emp2(void);
void amd_sfh1_1_set_desc_ops(struct amd_mp2_ops *mp2_ops);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 506c6f377e7d..46e3e42f9eb5 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -432,6 +432,26 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
return ret;
}
+static int asus_kbd_disable_oobe(struct hid_device *hdev)
+{
+ const u8 init[][6] = {
+ { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 },
+ { FEATURE_KBD_REPORT_ID, 0xBA, 0xC5, 0xC4 },
+ { FEATURE_KBD_REPORT_ID, 0xD0, 0x8F, 0x01 },
+ { FEATURE_KBD_REPORT_ID, 0xD0, 0x85, 0xFF }
+ };
+ int ret;
+
+ for (size_t i = 0; i < ARRAY_SIZE(init); i++) {
+ ret = asus_kbd_set_report(hdev, init[i], sizeof(init[i]));
+ if (ret < 0)
+ return ret;
+ }
+
+ hid_info(hdev, "Disabled OOBE for keyboard\n");
+ return 0;
+}
+
static void asus_schedule_work(struct asus_kbd_leds *led)
{
unsigned long flags;
@@ -534,6 +554,12 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
ret = asus_kbd_init(hdev, FEATURE_KBD_LED_REPORT_ID2);
if (ret < 0)
return ret;
+
+ if (dmi_match(DMI_PRODUCT_FAMILY, "ProArt P16")) {
+ ret = asus_kbd_disable_oobe(hdev);
+ if (ret < 0)
+ return ret;
+ }
} else {
/* Initialize keyboard */
ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 33a191973324..4497b50799db 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1163,6 +1163,8 @@ static void hid_apply_multiplier(struct hid_device *hid,
while (multiplier_collection->parent_idx != -1 &&
multiplier_collection->type != HID_COLLECTION_LOGICAL)
multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
+ if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
+ multiplier_collection = NULL;
effective_multiplier = hid_calculate_multiplier(hid, multiplier);
@@ -2174,9 +2176,9 @@ static bool hid_hiddev(struct hid_device *hdev)
static ssize_t
-read_report_descriptor(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+report_descriptor_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct hid_device *hdev = to_hid_device(dev);
@@ -2193,24 +2195,17 @@ read_report_descriptor(struct file *filp, struct kobject *kobj,
}
static ssize_t
-show_country(struct device *dev, struct device_attribute *attr,
- char *buf)
+country_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
return sprintf(buf, "%02x\n", hdev->country & 0xff);
}
-static struct bin_attribute dev_bin_attr_report_desc = {
- .attr = { .name = "report_descriptor", .mode = 0444 },
- .read = read_report_descriptor,
- .size = HID_MAX_DESCRIPTOR_SIZE,
-};
+static const BIN_ATTR_RO(report_descriptor, HID_MAX_DESCRIPTOR_SIZE);
-static const struct device_attribute dev_attr_country = {
- .attr = { .name = "country", .mode = 0444 },
- .show = show_country,
-};
+static const DEVICE_ATTR_RO(country);
int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
{
@@ -2800,13 +2795,13 @@ static struct attribute *hid_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
-static struct bin_attribute *hid_dev_bin_attrs[] = {
- &dev_bin_attr_report_desc,
+static const struct bin_attribute *hid_dev_bin_attrs[] = {
+ &bin_attr_report_descriptor,
NULL
};
static const struct attribute_group hid_dev_group = {
.attrs = hid_dev_attrs,
- .bin_attrs = hid_dev_bin_attrs,
+ .bin_attrs_new = hid_dev_bin_attrs,
};
__ATTRIBUTE_GROUPS(hid_dev);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 1f47fda809b9..c448de53bf91 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -506,7 +506,6 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
#define I2C_VENDOR_ID_GOODIX 0x27c6
-#define I2C_DEVICE_ID_GOODIX_01E0 0x01e0
#define I2C_DEVICE_ID_GOODIX_01E8 0x01e8
#define I2C_DEVICE_ID_GOODIX_01E9 0x01e9
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
@@ -1089,6 +1088,8 @@
#define USB_VENDOR_ID_PRODIGE 0x05af
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
+#define I2C_VENDOR_ID_QTEC 0x6243
+
#define USB_VENDOR_ID_QUANTA 0x0408
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fda9dce3da99..9d80635a91eb 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -810,10 +810,23 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
- if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
- switch (usage->hid & 0xf) {
- case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
- default: goto ignore;
+ if ((usage->hid & 0xf0) == 0x90) { /* SystemControl & D-pad */
+ switch (usage->hid) {
+ case HID_GD_UP: usage->hat_dir = 1; break;
+ case HID_GD_DOWN: usage->hat_dir = 5; break;
+ case HID_GD_RIGHT: usage->hat_dir = 3; break;
+ case HID_GD_LEFT: usage->hat_dir = 7; break;
+ case HID_GD_DO_NOT_DISTURB:
+ map_key_clear(KEY_DO_NOT_DISTURB); break;
+ default: goto unknown;
+ }
+
+ if (usage->hid <= HID_GD_LEFT) {
+ if (field->dpad) {
+ map_abs(field->dpad);
+ goto ignore;
+ }
+ map_abs(ABS_HAT0X);
}
break;
}
@@ -844,22 +857,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
if (field->application == HID_GD_SYSTEM_CONTROL)
goto ignore;
- if ((usage->hid & 0xf0) == 0x90) { /* D-pad */
- switch (usage->hid) {
- case HID_GD_UP: usage->hat_dir = 1; break;
- case HID_GD_DOWN: usage->hat_dir = 5; break;
- case HID_GD_RIGHT: usage->hat_dir = 3; break;
- case HID_GD_LEFT: usage->hat_dir = 7; break;
- default: goto unknown;
- }
- if (field->dpad) {
- map_abs(field->dpad);
- goto ignore;
- }
- map_abs(ABS_HAT0X);
- break;
- }
-
switch (usage->hid) {
/* These usage IDs map directly to the usage codes. */
case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index f66194fde891..4d00bc4d656e 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -32,11 +32,22 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
+#if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE)
+#include <linux/platform_profile.h>
+#endif /* CONFIG_ACPI_PLATFORM_PROFILE */
+
#include "hid-ids.h"
/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
#define LENOVO_KEY_MICMUTE KEY_F20
+/* HID raw events for ThinkPad X12 Tabs*/
+#define TP_X12_RAW_HOTKEY_FN_F4 0x00020003
+#define TP_X12_RAW_HOTKEY_FN_F8 0x38001003
+#define TP_X12_RAW_HOTKEY_FN_F10 0x00000803
+#define TP_X12_RAW_HOTKEY_FN_F12 0x00000403
+#define TP_X12_RAW_HOTKEY_FN_SPACE 0x18001003
+
struct lenovo_drvdata {
u8 led_report[3]; /* Must be first for proper alignment */
int led_state;
@@ -71,6 +82,14 @@ struct lenovo_drvdata {
#define TP10UBKBD_LED_OFF 1
#define TP10UBKBD_LED_ON 2
+/* Function to report raw_events as key events*/
+static inline void report_key_event(struct input_dev *input, int keycode)
+{
+ input_report_key(input, keycode, 1);
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+}
+
static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
enum led_brightness value)
{
@@ -472,6 +491,8 @@ static int lenovo_input_mapping(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
usage, bit, max);
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max);
@@ -582,6 +603,8 @@ static ssize_t attr_fn_lock_store(struct device *dev,
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
lenovo_features_set_cptkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -680,6 +703,62 @@ static const struct attribute_group lenovo_attr_group_cptkbd = {
.attrs = lenovo_attributes_cptkbd,
};
+/* Function to handle Lenovo Thinkpad TAB X12's HID raw inputs for fn keys*/
+static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
+{
+ struct hid_input *hidinput;
+ struct input_dev *input = NULL;
+
+ /* Iterate through all associated input devices */
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+ input = hidinput->input;
+ if (!input)
+ continue;
+
+ switch (raw_data) {
+ /* fn-F20 being used here for MIC mute*/
+ case TP_X12_RAW_HOTKEY_FN_F4:
+ report_key_event(input, LENOVO_KEY_MICMUTE);
+ return 1;
+ /* Power-mode or Airplane mode will be called based on the device*/
+ case TP_X12_RAW_HOTKEY_FN_F8:
+ /*
+ * TP X12 TAB uses Fn-F8 calls Airplanemode
+ * Whereas TP X12 TAB2 uses Fn-F8 for toggling
+ * Power modes
+ */
+ if (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) {
+ report_key_event(input, KEY_RFKILL);
+ return 1;
+ }
+#if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE)
+ else {
+ platform_profile_cycle();
+ return 1;
+ }
+#endif /* CONFIG_ACPI_PLATFORM_PROFILE */
+ return 0;
+ case TP_X12_RAW_HOTKEY_FN_F10:
+ /* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
+ (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) ?
+ report_key_event(input, KEY_PICKUP_PHONE) :
+ report_key_event(input, KEY_SELECTIVE_SCREENSHOT);
+ return 1;
+ case TP_X12_RAW_HOTKEY_FN_F12:
+ /* BookMarks/STAR key*/
+ report_key_event(input, KEY_BOOKMARKS);
+ return 1;
+ case TP_X12_RAW_HOTKEY_FN_SPACE:
+ /* Keyboard LED backlight toggle*/
+ report_key_event(input, KEY_KBDILLUMTOGGLE);
+ return 1;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
static int lenovo_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
@@ -697,6 +776,15 @@ static int lenovo_raw_event(struct hid_device *hdev,
data[2] = 0x01;
}
+ /*
+ * Lenovo TP X12 Tab KBD's Fn+XX is HID raw data defined. Report ID is 0x03
+ * e.g.: Raw data received for MIC mute is 0x00020003.
+ */
+ if (unlikely((hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB
+ || hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2)
+ && size >= 3 && report->id == 0x03))
+ return lenovo_raw_event_TP_X12_tab(hdev, le32_to_cpu(*(u32 *)data));
+
return 0;
}
@@ -776,6 +864,8 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
return lenovo_event_cptkbd(hdev, field, usage, value);
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -1057,6 +1147,8 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
case USB_DEVICE_ID_LENOVO_TPKBD:
lenovo_led_set_tpkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -1243,8 +1335,15 @@ static int lenovo_probe_tp10ubkbd(struct hid_device *hdev)
* We cannot read the state, only set it, so we force it to on here
* (which should be a no-op) to make sure that our state matches the
* keyboard's FN-lock state. This is the same as what Windows does.
+ *
+ * For X12 TAB and TAB2, the default windows behaviour Fn-lock Off.
+ * Adding additional check to ensure the behaviour in case of
+ * Thinkpad X12 Tabs.
*/
- data->fn_lock = true;
+
+ data->fn_lock = !(hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB ||
+ hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2);
+
lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, data->fn_lock);
ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd);
@@ -1288,6 +1387,8 @@ static int lenovo_probe(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
ret = lenovo_probe_cptkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -1375,6 +1476,8 @@ static void lenovo_remove(struct hid_device *hdev)
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
lenovo_remove_cptkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -1429,6 +1532,10 @@ static const struct hid_device_id lenovo_devices[] = {
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB2) },
{ }
};
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index ec110dea8772..a76f17158539 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -52,6 +52,7 @@ module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
#define TRACKPAD2_2021_BT_VERSION 0x110
+#define TRACKPAD_2024_BT_VERSION 0x314
#define TRACKPAD_REPORT_ID 0x28
#define TRACKPAD2_USB_REPORT_ID 0x02
@@ -567,9 +568,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
*/
if (hdev->vendor == BT_VENDOR_ID_APPLE) {
if (input->id.version == TRACKPAD2_2021_BT_VERSION)
+ input->name = "Apple Inc. Magic Trackpad 2021";
+ else if (input->id.version == TRACKPAD_2024_BT_VERSION) {
+ input->name = "Apple Inc. Magic Trackpad USB-C";
+ } else {
input->name = "Apple Inc. Magic Trackpad";
- else
- input->name = "Apple Inc. Magic Trackpad 2";
+ }
} else { /* USB_VENDOR_ID_APPLE */
input->name = hdev->name;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 785743036647..82900857bfd8 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1460,8 +1460,7 @@ static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
{
if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
(hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
- hdev->product == I2C_DEVICE_ID_GOODIX_01E9 ||
- hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) {
+ hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
if (rdesc[607] == 0x15) {
rdesc[607] = 0x25;
dev_info(
@@ -2086,9 +2085,6 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
I2C_DEVICE_ID_GOODIX_01E9) },
- { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
- HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
- I2C_DEVICE_ID_GOODIX_01E0) },
/* GoodTouch panels */
{ .driver_data = MT_CLS_NSMU,
@@ -2318,6 +2314,11 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_SIS_TOUCH,
HID_ANY_ID) },
+ /* Hantick */
+ { .driver_data = MT_CLS_NSMU,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288) },
+
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 55153a2f7988..11ac246176ae 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -456,14 +456,13 @@ static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = {
{ /* sentinel */ },
};
-/*
- * "A", "B", and "C" are mapped positionally, rather than by label (e.g., "A"
- * gets assigned to BTN_EAST instead of BTN_A).
- */
static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
- { BTN_SOUTH, JC_BTN_A, },
- { BTN_EAST, JC_BTN_B, },
- { BTN_WEST, JC_BTN_R, },
+ { BTN_A, JC_BTN_A, },
+ { BTN_B, JC_BTN_B, },
+ { BTN_C, JC_BTN_R, },
+ { BTN_X, JC_BTN_X, }, /* MD/GEN 6B Only */
+ { BTN_Y, JC_BTN_Y, }, /* MD/GEN 6B Only */
+ { BTN_Z, JC_BTN_L, }, /* MD/GEN 6B Only */
{ BTN_SELECT, JC_BTN_ZR, },
{ BTN_START, JC_BTN_PLUS, },
{ BTN_MODE, JC_BTN_HOME, },
@@ -471,9 +470,6 @@ static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
{ /* sentinel */ },
};
-/*
- * N64's C buttons get assigned to d-pad directions and registered as buttons.
- */
static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
{ BTN_A, JC_BTN_A, },
{ BTN_B, JC_BTN_B, },
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index d55aaabab1ed..3048297569c5 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -224,24 +224,24 @@ static ssize_t arvo_sysfs_read(struct file *fp,
}
static ssize_t arvo_sysfs_write_button(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
return arvo_sysfs_write(fp, kobj, buf, off, count,
sizeof(struct arvo_button), ARVO_COMMAND_BUTTON);
}
-static BIN_ATTR(button, 0220, NULL, arvo_sysfs_write_button,
- sizeof(struct arvo_button));
+static const BIN_ATTR(button, 0220, NULL, arvo_sysfs_write_button,
+ sizeof(struct arvo_button));
static ssize_t arvo_sysfs_read_info(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
return arvo_sysfs_read(fp, kobj, buf, off, count,
sizeof(struct arvo_info), ARVO_COMMAND_INFO);
}
-static BIN_ATTR(info, 0440, arvo_sysfs_read_info, NULL,
- sizeof(struct arvo_info));
+static const BIN_ATTR(info, 0440, arvo_sysfs_read_info, NULL,
+ sizeof(struct arvo_info));
static struct attribute *arvo_attrs[] = {
&dev_attr_mode_key.attr,
@@ -250,7 +250,7 @@ static struct attribute *arvo_attrs[] = {
NULL,
};
-static struct bin_attribute *arvo_bin_attributes[] = {
+static const struct bin_attribute *const arvo_bin_attributes[] = {
&bin_attr_button,
&bin_attr_info,
NULL,
@@ -258,7 +258,7 @@ static struct bin_attribute *arvo_bin_attributes[] = {
static const struct attribute_group arvo_group = {
.attrs = arvo_attrs,
- .bin_attrs = arvo_bin_attributes,
+ .bin_attrs_new = arvo_bin_attributes,
};
static const struct attribute_group *arvo_groups[] = {
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index 839ddfd931f0..0f9a2db04df9 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -46,8 +46,8 @@ ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
#define ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \
static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return roccat_common2_sysfs_write(fp, kobj, buf, off, count, \
SIZE, COMMAND); \
@@ -55,8 +55,8 @@ static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \
#define ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE) \
static ssize_t roccat_common2_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return roccat_common2_sysfs_read(fp, kobj, buf, off, count, \
SIZE, COMMAND); \
@@ -68,27 +68,27 @@ ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE)
#define ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(thingy, COMMAND, SIZE) \
ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE); \
-static struct bin_attribute bin_attr_ ## thingy = { \
+static const struct bin_attribute bin_attr_ ## thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = SIZE, \
- .read = roccat_common2_sysfs_read_ ## thingy, \
- .write = roccat_common2_sysfs_write_ ## thingy \
+ .read_new = roccat_common2_sysfs_read_ ## thingy, \
+ .write_new = roccat_common2_sysfs_write_ ## thingy \
}
#define ROCCAT_COMMON2_BIN_ATTRIBUTE_R(thingy, COMMAND, SIZE) \
ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE); \
-static struct bin_attribute bin_attr_ ## thingy = { \
+static const struct bin_attribute bin_attr_ ## thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = SIZE, \
- .read = roccat_common2_sysfs_read_ ## thingy, \
+ .read_new = roccat_common2_sysfs_read_ ## thingy, \
}
#define ROCCAT_COMMON2_BIN_ATTRIBUTE_W(thingy, COMMAND, SIZE) \
ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE); \
-static struct bin_attribute bin_attr_ ## thingy = { \
+static const struct bin_attribute bin_attr_ ## thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = SIZE, \
- .write = roccat_common2_sysfs_write_ ## thingy \
+ .write_new = roccat_common2_sysfs_write_ ## thingy \
}
#endif
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 0cd6208fb371..65a84bfcc2f8 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -156,7 +156,7 @@ static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
#define ISKU_SYSFS_W(thingy, THINGY) \
static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
return isku_sysfs_write(fp, kobj, buf, off, count, \
@@ -165,7 +165,7 @@ static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj
#define ISKU_SYSFS_R(thingy, THINGY) \
static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
return isku_sysfs_read(fp, kobj, buf, off, count, \
@@ -178,27 +178,27 @@ ISKU_SYSFS_W(thingy, THINGY)
#define ISKU_BIN_ATTR_RW(thingy, THINGY) \
ISKU_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = ISKU_SIZE_ ## THINGY, \
- .read = isku_sysfs_read_ ## thingy, \
- .write = isku_sysfs_write_ ## thingy \
+ .read_new = isku_sysfs_read_ ## thingy, \
+ .write_new = isku_sysfs_write_ ## thingy \
}
#define ISKU_BIN_ATTR_R(thingy, THINGY) \
ISKU_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = ISKU_SIZE_ ## THINGY, \
- .read = isku_sysfs_read_ ## thingy, \
+ .read_new = isku_sysfs_read_ ## thingy, \
}
#define ISKU_BIN_ATTR_W(thingy, THINGY) \
ISKU_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = ISKU_SIZE_ ## THINGY, \
- .write = isku_sysfs_write_ ## thingy \
+ .write_new = isku_sysfs_write_ ## thingy \
}
ISKU_BIN_ATTR_RW(macro, MACRO);
@@ -217,7 +217,7 @@ ISKU_BIN_ATTR_W(control, CONTROL);
ISKU_BIN_ATTR_W(reset, RESET);
ISKU_BIN_ATTR_R(info, INFO);
-static struct bin_attribute *isku_bin_attributes[] = {
+static const struct bin_attribute *const isku_bin_attributes[] = {
&bin_attr_macro,
&bin_attr_keys_function,
&bin_attr_keys_easyzone,
@@ -238,7 +238,7 @@ static struct bin_attribute *isku_bin_attributes[] = {
static const struct attribute_group isku_group = {
.attrs = isku_attrs,
- .bin_attrs = isku_bin_attributes,
+ .bin_attrs_new = isku_bin_attributes,
};
static const struct attribute_group *isku_groups[] = {
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 3f8f459edcf3..b3c0242e5a37 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -261,7 +261,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
}
static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -285,7 +285,7 @@ static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
* case of error the old data is still valid
*/
static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -327,11 +327,11 @@ unlock:
return sizeof(struct kone_settings);
}
-static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
- kone_sysfs_write_settings, sizeof(struct kone_settings));
+static const BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
+ kone_sysfs_write_settings, sizeof(struct kone_settings));
static ssize_t kone_sysfs_read_profilex(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -351,7 +351,7 @@ static ssize_t kone_sysfs_read_profilex(struct file *fp,
/* Writes data only if different to stored data */
static ssize_t kone_sysfs_write_profilex(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -382,11 +382,11 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp,
return sizeof(struct kone_profile);
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number = { \
+static const struct bin_attribute bin_attr_profile##number = { \
.attr = { .name = "profile" #number, .mode = 0660 }, \
.size = sizeof(struct kone_profile), \
- .read = kone_sysfs_read_profilex, \
- .write = kone_sysfs_write_profilex, \
+ .read_new = kone_sysfs_read_profilex, \
+ .write_new = kone_sysfs_write_profilex, \
.private = &profile_numbers[number-1], \
}
PROFILE_ATTR(1);
@@ -634,7 +634,7 @@ static struct attribute *kone_attrs[] = {
NULL,
};
-static struct bin_attribute *kone_bin_attributes[] = {
+static const struct bin_attribute *const kone_bin_attributes[] = {
&bin_attr_settings,
&bin_attr_profile1,
&bin_attr_profile2,
@@ -646,7 +646,7 @@ static struct bin_attribute *kone_bin_attributes[] = {
static const struct attribute_group kone_group = {
.attrs = kone_attrs,
- .bin_attrs = kone_bin_attributes,
+ .bin_attrs_new = kone_bin_attributes,
};
static const struct attribute_group *kone_groups[] = {
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 8ccb3b14a1a9..5d8a5ce88b4c 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -128,8 +128,8 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
#define KONEPLUS_SYSFS_W(thingy, THINGY) \
static ssize_t koneplus_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return koneplus_sysfs_write(fp, kobj, buf, off, count, \
KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
@@ -137,8 +137,8 @@ static ssize_t koneplus_sysfs_write_ ## thingy(struct file *fp, \
#define KONEPLUS_SYSFS_R(thingy, THINGY) \
static ssize_t koneplus_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return koneplus_sysfs_read(fp, kobj, buf, off, count, \
KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
@@ -150,27 +150,27 @@ KONEPLUS_SYSFS_R(thingy, THINGY)
#define KONEPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
KONEPLUS_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
- .read = koneplus_sysfs_read_ ## thingy, \
- .write = koneplus_sysfs_write_ ## thingy \
+ .read_new = koneplus_sysfs_read_ ## thingy, \
+ .write_new = koneplus_sysfs_write_ ## thingy \
}
#define KONEPLUS_BIN_ATTRIBUTE_R(thingy, THINGY) \
KONEPLUS_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
- .read = koneplus_sysfs_read_ ## thingy, \
+ .read_new = koneplus_sysfs_read_ ## thingy, \
}
#define KONEPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
KONEPLUS_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
- .write = koneplus_sysfs_write_ ## thingy \
+ .write_new = koneplus_sysfs_write_ ## thingy \
}
KONEPLUS_BIN_ATTRIBUTE_W(control, CONTROL);
KONEPLUS_BIN_ATTRIBUTE_W(talk, TALK);
@@ -183,8 +183,8 @@ KONEPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
KONEPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -201,8 +201,8 @@ static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
}
static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -219,16 +219,16 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number##_settings = { \
+static const struct bin_attribute bin_attr_profile##number##_settings = { \
.attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
.size = KONEPLUS_SIZE_PROFILE_SETTINGS, \
- .read = koneplus_sysfs_read_profilex_settings, \
+ .read_new = koneplus_sysfs_read_profilex_settings, \
.private = &profile_numbers[number-1], \
}; \
-static struct bin_attribute bin_attr_profile##number##_buttons = { \
+static const struct bin_attribute bin_attr_profile##number##_buttons = { \
.attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
.size = KONEPLUS_SIZE_PROFILE_BUTTONS, \
- .read = koneplus_sysfs_read_profilex_buttons, \
+ .read_new = koneplus_sysfs_read_profilex_buttons, \
.private = &profile_numbers[number-1], \
};
PROFILE_ATTR(1);
@@ -321,7 +321,7 @@ static struct attribute *koneplus_attrs[] = {
NULL,
};
-static struct bin_attribute *koneplus_bin_attributes[] = {
+static const struct bin_attribute *const koneplus_bin_attributes[] = {
&bin_attr_control,
&bin_attr_talk,
&bin_attr_macro,
@@ -346,7 +346,7 @@ static struct bin_attribute *koneplus_bin_attributes[] = {
static const struct attribute_group koneplus_group = {
.attrs = koneplus_attrs,
- .bin_attrs = koneplus_bin_attributes,
+ .bin_attrs_new = koneplus_bin_attributes,
};
static const struct attribute_group *koneplus_groups[] = {
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index beca8aef8bbb..7fb705789d4e 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -47,7 +47,7 @@ ROCCAT_COMMON2_BIN_ATTRIBUTE_R(tcu_image, 0x0c, 0x0404);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0x0f, 0x06);
ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x10, 0x10);
-static struct bin_attribute *konepure_bin_attrs[] = {
+static const struct bin_attribute *const konepure_bin_attrs[] = {
&bin_attr_actual_profile,
&bin_attr_control,
&bin_attr_info,
@@ -62,7 +62,7 @@ static struct bin_attribute *konepure_bin_attrs[] = {
};
static const struct attribute_group konepure_group = {
- .bin_attrs = konepure_bin_attrs,
+ .bin_attrs_new = konepure_bin_attrs,
};
static const struct attribute_group *konepure_groups[] = {
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 748d4d7cb2fc..e31e4a2e62d5 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -171,8 +171,8 @@ static ssize_t kovaplus_sysfs_write(struct file *fp, struct kobject *kobj,
#define KOVAPLUS_SYSFS_W(thingy, THINGY) \
static ssize_t kovaplus_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return kovaplus_sysfs_write(fp, kobj, buf, off, count, \
KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
@@ -180,8 +180,8 @@ static ssize_t kovaplus_sysfs_write_ ## thingy(struct file *fp, \
#define KOVAPLUS_SYSFS_R(thingy, THINGY) \
static ssize_t kovaplus_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return kovaplus_sysfs_read(fp, kobj, buf, off, count, \
KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
@@ -193,19 +193,19 @@ KOVAPLUS_SYSFS_R(thingy, THINGY)
#define KOVAPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
KOVAPLUS_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = KOVAPLUS_SIZE_ ## THINGY, \
- .read = kovaplus_sysfs_read_ ## thingy, \
- .write = kovaplus_sysfs_write_ ## thingy \
+ .read_new = kovaplus_sysfs_read_ ## thingy, \
+ .write_new = kovaplus_sysfs_write_ ## thingy \
}
#define KOVAPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
KOVAPLUS_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = KOVAPLUS_SIZE_ ## THINGY, \
- .write = kovaplus_sysfs_write_ ## thingy \
+ .write_new = kovaplus_sysfs_write_ ## thingy \
}
KOVAPLUS_BIN_ATTRIBUTE_W(control, CONTROL);
KOVAPLUS_BIN_ATTRIBUTE_RW(info, INFO);
@@ -213,8 +213,8 @@ KOVAPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
KOVAPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -231,8 +231,8 @@ static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
}
static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -249,16 +249,16 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number##_settings = { \
+static const struct bin_attribute bin_attr_profile##number##_settings = { \
.attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
.size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \
- .read = kovaplus_sysfs_read_profilex_settings, \
+ .read_new = kovaplus_sysfs_read_profilex_settings, \
.private = &profile_numbers[number-1], \
}; \
-static struct bin_attribute bin_attr_profile##number##_buttons = { \
+static const struct bin_attribute bin_attr_profile##number##_buttons = { \
.attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
.size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \
- .read = kovaplus_sysfs_read_profilex_buttons, \
+ .read_new = kovaplus_sysfs_read_profilex_buttons, \
.private = &profile_numbers[number-1], \
};
PROFILE_ATTR(1);
@@ -379,7 +379,7 @@ static struct attribute *kovaplus_attrs[] = {
NULL,
};
-static struct bin_attribute *kovaplus_bin_attributes[] = {
+static const struct bin_attribute *const kovaplus_bin_attributes[] = {
&bin_attr_control,
&bin_attr_info,
&bin_attr_profile_settings,
@@ -399,7 +399,7 @@ static struct bin_attribute *kovaplus_bin_attributes[] = {
static const struct attribute_group kovaplus_group = {
.attrs = kovaplus_attrs,
- .bin_attrs = kovaplus_bin_attributes,
+ .bin_attrs_new = kovaplus_bin_attributes,
};
static const struct attribute_group *kovaplus_groups[] = {
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
index d5ddf0d68346..023ec64b4b0e 100644
--- a/drivers/hid/hid-roccat-lua.c
+++ b/drivers/hid/hid-roccat-lua.c
@@ -66,7 +66,7 @@ static ssize_t lua_sysfs_write(struct file *fp, struct kobject *kobj,
#define LUA_SYSFS_W(thingy, THINGY) \
static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, \
+ struct kobject *kobj, const struct bin_attribute *attr, \
char *buf, loff_t off, size_t count) \
{ \
return lua_sysfs_write(fp, kobj, buf, off, count, \
@@ -75,7 +75,7 @@ static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \
#define LUA_SYSFS_R(thingy, THINGY) \
static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, \
+ struct kobject *kobj, const struct bin_attribute *attr, \
char *buf, loff_t off, size_t count) \
{ \
return lua_sysfs_read(fp, kobj, buf, off, count, \
@@ -85,11 +85,11 @@ static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \
#define LUA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
LUA_SYSFS_W(thingy, THINGY) \
LUA_SYSFS_R(thingy, THINGY) \
-static struct bin_attribute lua_ ## thingy ## _attr = { \
+static const struct bin_attribute lua_ ## thingy ## _attr = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = LUA_SIZE_ ## THINGY, \
- .read = lua_sysfs_read_ ## thingy, \
- .write = lua_sysfs_write_ ## thingy \
+ .read_new = lua_sysfs_read_ ## thingy, \
+ .write_new = lua_sysfs_write_ ## thingy \
};
LUA_BIN_ATTRIBUTE_RW(control, CONTROL)
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index eeb3d38cd805..2b53fbfbb897 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -129,8 +129,8 @@ static ssize_t pyra_sysfs_write(struct file *fp, struct kobject *kobj,
#define PYRA_SYSFS_W(thingy, THINGY) \
static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return pyra_sysfs_write(fp, kobj, buf, off, count, \
PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
@@ -138,8 +138,8 @@ static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \
#define PYRA_SYSFS_R(thingy, THINGY) \
static ssize_t pyra_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return pyra_sysfs_read(fp, kobj, buf, off, count, \
PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
@@ -151,27 +151,27 @@ PYRA_SYSFS_R(thingy, THINGY)
#define PYRA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
PYRA_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = PYRA_SIZE_ ## THINGY, \
- .read = pyra_sysfs_read_ ## thingy, \
- .write = pyra_sysfs_write_ ## thingy \
+ .read_new = pyra_sysfs_read_ ## thingy, \
+ .write_new = pyra_sysfs_write_ ## thingy \
}
#define PYRA_BIN_ATTRIBUTE_R(thingy, THINGY) \
PYRA_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
- .size = PYRA_SIZE_ ## THINGY, \
- .read = pyra_sysfs_read_ ## thingy, \
+ .size_new = PYRA_SIZE_ ## THINGY, \
+ .read_new = pyra_sysfs_read_ ## thingy, \
}
#define PYRA_BIN_ATTRIBUTE_W(thingy, THINGY) \
PYRA_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = PYRA_SIZE_ ## THINGY, \
- .write = pyra_sysfs_write_ ## thingy \
+ .write_new = pyra_sysfs_write_ ## thingy \
}
PYRA_BIN_ATTRIBUTE_W(control, CONTROL);
@@ -180,8 +180,8 @@ PYRA_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
PYRA_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -198,8 +198,8 @@ static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
}
static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -216,16 +216,16 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number##_settings = { \
+static const struct bin_attribute bin_attr_profile##number##_settings = { \
.attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
.size = PYRA_SIZE_PROFILE_SETTINGS, \
- .read = pyra_sysfs_read_profilex_settings, \
+ .read_new = pyra_sysfs_read_profilex_settings, \
.private = &profile_numbers[number-1], \
}; \
-static struct bin_attribute bin_attr_profile##number##_buttons = { \
+static const struct bin_attribute bin_attr_profile##number##_buttons = { \
.attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
.size = PYRA_SIZE_PROFILE_BUTTONS, \
- .read = pyra_sysfs_read_profilex_buttons, \
+ .read_new = pyra_sysfs_read_profilex_buttons, \
.private = &profile_numbers[number-1], \
};
PROFILE_ATTR(1);
@@ -235,8 +235,8 @@ PROFILE_ATTR(4);
PROFILE_ATTR(5);
static ssize_t pyra_sysfs_write_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
@@ -273,7 +273,7 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
}
PYRA_SYSFS_R(settings, SETTINGS);
-static struct bin_attribute bin_attr_settings =
+static const struct bin_attribute bin_attr_settings =
__BIN_ATTR(settings, (S_IWUSR | S_IRUGO),
pyra_sysfs_read_settings, pyra_sysfs_write_settings,
PYRA_SIZE_SETTINGS);
@@ -334,7 +334,7 @@ static struct attribute *pyra_attrs[] = {
NULL,
};
-static struct bin_attribute *pyra_bin_attributes[] = {
+static const struct bin_attribute *const pyra_bin_attributes[] = {
&bin_attr_control,
&bin_attr_info,
&bin_attr_profile_settings,
@@ -355,7 +355,7 @@ static struct bin_attribute *pyra_bin_attributes[] = {
static const struct attribute_group pyra_group = {
.attrs = pyra_attrs,
- .bin_attrs = pyra_bin_attributes,
+ .bin_attrs_new = pyra_bin_attributes,
};
static const struct attribute_group *pyra_groups[] = {
diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
index 57714a4525e2..902dac1e714e 100644
--- a/drivers/hid/hid-roccat-ryos.c
+++ b/drivers/hid/hid-roccat-ryos.c
@@ -47,7 +47,7 @@ ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(stored_lights, 0x17, 0x0566);
ROCCAT_COMMON2_BIN_ATTRIBUTE_W(custom_lights, 0x18, 0x14);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light_macro, 0x19, 0x07d2);
-static struct bin_attribute *ryos_bin_attrs[] = {
+static const struct bin_attribute *const ryos_bin_attrs[] = {
&bin_attr_control,
&bin_attr_profile,
&bin_attr_keys_primary,
@@ -70,7 +70,7 @@ static struct bin_attribute *ryos_bin_attrs[] = {
};
static const struct attribute_group ryos_group = {
- .bin_attrs = ryos_bin_attrs,
+ .bin_attrs_new = ryos_bin_attrs,
};
static const struct attribute_group *ryos_groups[] = {
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 2baa47a0efc5..7399b8ffb5c7 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -30,7 +30,7 @@ ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x8, 0x0823);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x9, 0x08);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0xc, 0x04);
-static struct bin_attribute *savu_bin_attrs[] = {
+static const struct bin_attribute *const savu_bin_attrs[] = {
&bin_attr_control,
&bin_attr_profile,
&bin_attr_general,
@@ -42,7 +42,7 @@ static struct bin_attribute *savu_bin_attrs[] = {
};
static const struct attribute_group savu_group = {
- .bin_attrs = savu_bin_attrs,
+ .bin_attrs_new = savu_bin_attrs,
};
static const struct attribute_group *savu_groups[] = {
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 7bd86eef6ec7..4c94c03cb573 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -730,23 +730,30 @@ err_stop_hw:
return ret;
}
+static int sensor_hub_finalize_pending_fn(struct device *dev, void *data)
+{
+ struct hid_sensor_hub_device *hsdev = dev->platform_data;
+
+ if (hsdev->pending.status)
+ complete(&hsdev->pending.ready);
+
+ return 0;
+}
+
static void sensor_hub_remove(struct hid_device *hdev)
{
struct sensor_hub_data *data = hid_get_drvdata(hdev);
unsigned long flags;
- int i;
hid_dbg(hdev, " hardware removed\n");
hid_hw_close(hdev);
hid_hw_stop(hdev);
+
spin_lock_irqsave(&data->lock, flags);
- for (i = 0; i < data->hid_sensor_client_cnt; ++i) {
- struct hid_sensor_hub_device *hsdev =
- data->hid_sensor_hub_client_devs[i].platform_data;
- if (hsdev->pending.status)
- complete(&hsdev->pending.ready);
- }
+ device_for_each_child(&hdev->dev, NULL,
+ sensor_hub_finalize_pending_fn);
spin_unlock_irqrestore(&data->lock, flags);
+
mfd_remove_devices(&hdev->dev);
mutex_destroy(&data->mutex);
}
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 6439913372a8..af38fc8eb34f 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -1306,6 +1306,7 @@ static void steam_remove(struct hid_device *hdev)
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->work_connect);
+ cancel_work_sync(&steam->rumble_work);
hid_destroy_device(steam->client_hdev);
steam->client_hdev = NULL;
steam->client_opened = 0;
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index f9ff5be94309..d4bd7848b8c6 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -19,6 +19,7 @@
#define STEELSERIES_SRWS1 BIT(0)
#define STEELSERIES_ARCTIS_1 BIT(1)
+#define STEELSERIES_ARCTIS_9 BIT(2)
struct steelseries_device {
struct hid_device *hdev;
@@ -32,6 +33,7 @@ struct steelseries_device {
struct power_supply *battery;
uint8_t battery_capacity;
bool headset_connected;
+ bool battery_charging;
};
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
@@ -368,32 +370,35 @@ static void steelseries_srws1_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
kfree(drv_data);
- return;
}
#endif
#define STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS 3000
#define ARCTIS_1_BATTERY_RESPONSE_LEN 8
+#define ARCTIS_9_BATTERY_RESPONSE_LEN 64
static const char arctis_1_battery_request[] = { 0x06, 0x12 };
+static const char arctis_9_battery_request[] = { 0x00, 0x20 };
-static int steelseries_headset_arctis_1_fetch_battery(struct hid_device *hdev)
+static int steelseries_headset_request_battery(struct hid_device *hdev,
+ const char *request, size_t len)
{
u8 *write_buf;
int ret;
/* Request battery information */
- write_buf = kmemdup(arctis_1_battery_request, sizeof(arctis_1_battery_request), GFP_KERNEL);
+ write_buf = kmemdup(request, len, GFP_KERNEL);
if (!write_buf)
return -ENOMEM;
- ret = hid_hw_raw_request(hdev, arctis_1_battery_request[0],
- write_buf, sizeof(arctis_1_battery_request),
+ hid_dbg(hdev, "Sending battery request report");
+ ret = hid_hw_raw_request(hdev, request[0], write_buf, len,
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
- if (ret < (int)sizeof(arctis_1_battery_request)) {
+ if (ret < (int)len) {
hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
ret = -ENODATA;
}
+
kfree(write_buf);
return ret;
}
@@ -404,7 +409,11 @@ static void steelseries_headset_fetch_battery(struct hid_device *hdev)
int ret = 0;
if (sd->quirks & STEELSERIES_ARCTIS_1)
- ret = steelseries_headset_arctis_1_fetch_battery(hdev);
+ ret = steelseries_headset_request_battery(hdev,
+ arctis_1_battery_request, sizeof(arctis_1_battery_request));
+ else if (sd->quirks & STEELSERIES_ARCTIS_9)
+ ret = steelseries_headset_request_battery(hdev,
+ arctis_9_battery_request, sizeof(arctis_9_battery_request));
if (ret < 0)
hid_dbg(hdev,
@@ -429,6 +438,9 @@ static void steelseries_headset_battery_timer_tick(struct work_struct *work)
steelseries_headset_fetch_battery(hdev);
}
+#define STEELSERIES_PREFIX "SteelSeries "
+#define STEELSERIES_PREFIX_LEN strlen(STEELSERIES_PREFIX)
+
static int steelseries_headset_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -437,13 +449,24 @@ static int steelseries_headset_battery_get_property(struct power_supply *psy,
int ret = 0;
switch (psp) {
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = sd->hdev->name;
+ while (!strncmp(val->strval, STEELSERIES_PREFIX, STEELSERIES_PREFIX_LEN))
+ val->strval += STEELSERIES_PREFIX_LEN;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "SteelSeries";
+ break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = 1;
break;
case POWER_SUPPLY_PROP_STATUS:
- val->intval = sd->headset_connected ?
- POWER_SUPPLY_STATUS_DISCHARGING :
- POWER_SUPPLY_STATUS_UNKNOWN;
+ if (sd->headset_connected) {
+ val->intval = sd->battery_charging ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING;
+ } else
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
break;
case POWER_SUPPLY_PROP_SCOPE:
val->intval = POWER_SUPPLY_SCOPE_DEVICE;
@@ -477,6 +500,8 @@ steelseries_headset_set_wireless_status(struct hid_device *hdev,
}
static enum power_supply_property steelseries_headset_battery_props[] = {
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_SCOPE,
@@ -505,6 +530,7 @@ static int steelseries_headset_battery_register(struct steelseries_device *sd)
/* avoid the warning of 0% battery while waiting for the first info */
steelseries_headset_set_wireless_status(sd->hdev, false);
sd->battery_capacity = 100;
+ sd->battery_charging = false;
sd->battery = devm_power_supply_register(&sd->hdev->dev,
&sd->battery_desc, &battery_cfg);
@@ -520,9 +546,22 @@ static int steelseries_headset_battery_register(struct steelseries_device *sd)
INIT_DELAYED_WORK(&sd->battery_work, steelseries_headset_battery_timer_tick);
steelseries_headset_fetch_battery(sd->hdev);
+ if (sd->quirks & STEELSERIES_ARCTIS_9) {
+ /* The first fetch_battery request can remain unanswered in some cases */
+ schedule_delayed_work(&sd->battery_work,
+ msecs_to_jiffies(STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS));
+ }
+
return 0;
}
+static bool steelseries_is_vendor_usage_page(struct hid_device *hdev, uint8_t usage_page)
+{
+ return hdev->rdesc[0] == 0x06 &&
+ hdev->rdesc[1] == usage_page &&
+ hdev->rdesc[2] == 0xff;
+}
+
static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct steelseries_device *sd;
@@ -548,12 +587,20 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
if (ret)
return ret;
+ if (sd->quirks & STEELSERIES_ARCTIS_9 &&
+ !steelseries_is_vendor_usage_page(hdev, 0xc0))
+ return -ENODEV;
+
spin_lock_init(&sd->lock);
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
return ret;
+ ret = hid_hw_open(hdev);
+ if (ret)
+ return ret;
+
if (steelseries_headset_battery_register(sd) < 0)
hid_err(sd->hdev,
"Failed to register battery for headset\n");
@@ -580,6 +627,7 @@ static void steelseries_remove(struct hid_device *hdev)
cancel_delayed_work_sync(&sd->battery_work);
+ hid_hw_close(hdev);
hid_hw_stop(hdev);
}
@@ -599,6 +647,15 @@ static const __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev,
return rdesc;
}
+static uint8_t steelseries_headset_map_capacity(uint8_t capacity, uint8_t min_in, uint8_t max_in)
+{
+ if (capacity >= max_in)
+ return 100;
+ if (capacity <= min_in)
+ return 0;
+ return (capacity - min_in) * 100 / (max_in - min_in);
+}
+
static int steelseries_headset_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *read_buf,
int size)
@@ -606,6 +663,7 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
struct steelseries_device *sd = hid_get_drvdata(hdev);
int capacity = sd->battery_capacity;
bool connected = sd->headset_connected;
+ bool charging = sd->battery_charging;
unsigned long flags;
/* Not a headset */
@@ -630,6 +688,34 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
}
}
+ if (sd->quirks & STEELSERIES_ARCTIS_9) {
+ hid_dbg(sd->hdev,
+ "Parsing raw event for Arctis 9 headset (%*ph)\n", size, read_buf);
+ if (size < ARCTIS_9_BATTERY_RESPONSE_LEN) {
+ if (!delayed_work_pending(&sd->battery_work))
+ goto request_battery;
+ return 0;
+ }
+
+ if (read_buf[0] == 0xaa && read_buf[1] == 0x01) {
+ connected = true;
+ charging = read_buf[4] == 0x01;
+
+ /*
+ * Found no official documentation about min and max.
+ * Values defined by testing.
+ */
+ capacity = steelseries_headset_map_capacity(read_buf[3], 0x68, 0x9d);
+ } else {
+ /*
+ * Device is off and sends the last known status read_buf[1] == 0x03 or
+ * there is no known status of the device read_buf[0] == 0x55
+ */
+ connected = false;
+ charging = false;
+ }
+ }
+
if (connected != sd->headset_connected) {
hid_dbg(sd->hdev,
"Connected status changed from %sconnected to %sconnected\n",
@@ -647,6 +733,15 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
power_supply_changed(sd->battery);
}
+ if (charging != sd->battery_charging) {
+ hid_dbg(sd->hdev,
+ "Battery charging status changed from %scharging to %scharging\n",
+ sd->battery_charging ? "" : "not ",
+ charging ? "" : "not ");
+ sd->battery_charging = charging;
+ power_supply_changed(sd->battery);
+ }
+
request_battery:
spin_lock_irqsave(&sd->lock, flags);
if (!sd->removed)
@@ -665,6 +760,10 @@ static const struct hid_device_id steelseries_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12b6),
.driver_data = STEELSERIES_ARCTIS_1 },
+ { /* SteelSeries Arctis 9 Wireless for XBox */
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12c2),
+ .driver_data = STEELSERIES_ARCTIS_9 },
+
{ }
};
MODULE_DEVICE_TABLE(hid, steelseries_devices);
@@ -683,3 +782,4 @@ MODULE_DESCRIPTION("HID driver for Steelseries devices");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
MODULE_AUTHOR("Simon Wood <simon@mungewell.org>");
+MODULE_AUTHOR("Christian Mayer <git@mayer-bgk.de>");
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index cf1679b0d4fb..6c3e758bbb09 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -170,6 +170,14 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
ep = &usbif->cur_altsetting->endpoint[1];
b_ep = ep->desc.bEndpointAddress;
+ /* Are the expected endpoints present? */
+ u8 ep_addr[1] = {b_ep};
+
+ if (!usb_check_int_endpoints(usbif, ep_addr)) {
+ hid_err(hdev, "Unexpected non-int endpoint\n");
+ return;
+ }
+
for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) {
memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]);
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index ef26c7defcf6..a6044996abf2 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -842,7 +842,7 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
__u8 *params_ptr = NULL;
size_t params_len = 0;
/* Parameters string descriptor of a model with touch ring (HS610) */
- const __u8 touch_ring_model_params_buf[] = {
+ static const __u8 touch_ring_model_params_buf[] = {
0x13, 0x03, 0x70, 0xC6, 0x00, 0x06, 0x7C, 0x00,
0xFF, 0x1F, 0xD8, 0x13, 0x03, 0x0D, 0x10, 0x01,
0x04, 0x3C, 0x3E
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 4e87380d3edd..75544448c239 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -51,6 +51,7 @@
#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4)
#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5)
#define I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME BIT(6)
+#define I2C_HID_QUIRK_RE_POWER_ON BIT(7)
/* Command opcodes */
#define I2C_HID_OPCODE_RESET 0x01
@@ -136,6 +137,11 @@ static const struct i2c_hid_quirks {
{ I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063,
I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND },
/*
+ * Without additional power on command, at least some QTEC devices send garbage
+ */
+ { I2C_VENDOR_ID_QTEC, HID_ANY_ID,
+ I2C_HID_QUIRK_RE_POWER_ON },
+ /*
* Sending the wakeup after reset actually break ELAN touchscreen controller
*/
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
@@ -1073,7 +1079,11 @@ static int i2c_hid_core_register_hid(struct i2c_hid *ihid)
return ret;
}
- return 0;
+ /* At least some QTEC devices need this after initialization */
+ if (ihid->quirks & I2C_HID_QUIRK_RE_POWER_ON)
+ ret = i2c_hid_set_power(ihid, I2C_HID_PWR_ON);
+
+ return ret;
}
static int i2c_hid_core_probe_panel_follower(struct i2c_hid *ihid)
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
index 513d7a4a1b8a..97f4026b1627 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -252,27 +252,6 @@ int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
/**
- * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
- * @cl: Pointer to client device instance
- *
- * Look client device tx buffer list, and check whether this list is empty
- *
- * Return: true if client tx buffer list is empty else false
- */
-bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
-{
- int tx_list_empty;
- unsigned long tx_flags;
-
- spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
- tx_list_empty = list_empty(&cl->tx_list.list);
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
-
- return !!tx_list_empty;
-}
-EXPORT_SYMBOL(ishtp_cl_tx_empty);
-
-/**
* ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
* @cl: Pointer to client device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index e61b01e9902e..21a2c0773cc2 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -14,25 +14,6 @@
#include "hbm.h"
#include "client.h"
-int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
-{
- unsigned long tx_free_flags;
- int size;
-
- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
- size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
- spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
-
- return size;
-}
-EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
-
-int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
-{
- return cl->tx_ring_free_size;
-}
-EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
-
/**
* ishtp_read_list_flush() - Flush read queue
* @cl: ishtp client instance
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index d9d398fadcf7..0efd49dd2530 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -120,8 +120,6 @@ int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
-int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl);
-int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl);
/* DMA I/F functions */
void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
index 07fdd52e4c5e..26bf9045a8de 100644
--- a/drivers/hid/intel-ish-hid/ishtp/init.c
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -15,36 +15,6 @@
#include "loader.h"
/**
- * ishtp_dev_state_str() -Convert to string format
- * @state: state to convert
- *
- * Convert state to string for prints
- *
- * Return: character pointer to converted string
- */
-const char *ishtp_dev_state_str(int state)
-{
- switch (state) {
- case ISHTP_DEV_INITIALIZING:
- return "INITIALIZING";
- case ISHTP_DEV_INIT_CLIENTS:
- return "INIT_CLIENTS";
- case ISHTP_DEV_ENABLED:
- return "ENABLED";
- case ISHTP_DEV_RESETTING:
- return "RESETTING";
- case ISHTP_DEV_DISABLED:
- return "DISABLED";
- case ISHTP_DEV_POWER_DOWN:
- return "POWER_DOWN";
- case ISHTP_DEV_POWER_UP:
- return "POWER_UP";
- default:
- return "unknown";
- }
-}
-
-/**
* ishtp_device_init() - ishtp device init
* @dev: ISHTP device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index effbb442c727..44eddc411e97 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -57,7 +57,6 @@ enum ishtp_dev_state {
ISHTP_DEV_POWER_DOWN,
ISHTP_DEV_POWER_UP
};
-const char *ishtp_dev_state_str(int state);
struct ishtp_cl;
diff --git a/drivers/hid/intel-thc-hid/Kconfig b/drivers/hid/intel-thc-hid/Kconfig
new file mode 100644
index 000000000000..91ec84902db8
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/Kconfig
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2024, Intel Corporation.
+
+menu "Intel THC HID Support"
+ depends on X86_64 && PCI
+
+config INTEL_THC_HID
+ tristate "Intel Touch Host Controller"
+ depends on ACPI
+ select HID
+ help
+ THC (Touch Host Controller) is the name of the IP block in PCH that
+ interfaces with Touch Devices (ex: touchscreen, touchpad etc.). It
+ is comprised of 3 key functional blocks: A natively half-duplex
+ Quad I/O capable SPI master; a low latency I2C interface to support
+ HIDI2C compliant devices; a hardware sequencer with Read/Write DMA
+ capability to system memory.
+
+ Say Y/M here if you want to support Intel THC. If unsure, say N.
+
+config INTEL_QUICKSPI
+ tristate "Intel QuickSPI driver based on Intel Touch Host Controller"
+ depends on INTEL_THC_HID
+ help
+ Intel QuickSPI, based on Touch Host Controller (THC), implements
+ HIDSPI (HID over SPI) protocol. It configures THC to work at SPI
+ mode, and controls THC hardware sequencer to accelerate HIDSPI
+ transaction flow.
+
+ Say Y/M here if you want to support Intel QuickSPI. If unsure, say N.
+
+config INTEL_QUICKI2C
+ tristate "Intel QuickI2C driver based on Intel Touch Host Controller"
+ depends on INTEL_THC_HID
+ help
+ Intel QuickI2C, uses Touch Host Controller (THC) hardware, implements
+ HIDI2C (HID over I2C) protocol. It configures THC to work in I2C
+ mode, and controls THC hardware sequencer to accelerate HIDI2C
+ transaction flow.
+
+ Say Y/M here if you want to support Intel QuickI2C. If unsure, say N.
+
+endmenu
diff --git a/drivers/hid/intel-thc-hid/Makefile b/drivers/hid/intel-thc-hid/Makefile
new file mode 100644
index 000000000000..6f762d87af07
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile - Intel Touch Host Controller (THC) drivers
+# Copyright (c) 2024, Intel Corporation.
+#
+#
+
+obj-$(CONFIG_INTEL_THC_HID) += intel-thc.o
+intel-thc-objs += intel-thc/intel-thc-dev.o
+intel-thc-objs += intel-thc/intel-thc-dma.o
+
+obj-$(CONFIG_INTEL_QUICKSPI) += intel-quickspi.o
+intel-quickspi-objs += intel-quickspi/pci-quickspi.o
+intel-quickspi-objs += intel-quickspi/quickspi-hid.o
+intel-quickspi-objs += intel-quickspi/quickspi-protocol.o
+
+obj-$(CONFIG_INTEL_QUICKI2C) += intel-quicki2c.o
+intel-quicki2c-objs += intel-quicki2c/pci-quicki2c.o
+intel-quicki2c-objs += intel-quicki2c/quicki2c-hid.o
+intel-quicki2c-objs += intel-quicki2c/quicki2c-protocol.o
+
+ccflags-y += -I $(src)/intel-thc
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
new file mode 100644
index 000000000000..2de93f4a25ca
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -0,0 +1,969 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/pm_runtime.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-hw.h"
+
+#include "quicki2c-dev.h"
+#include "quicki2c-hid.h"
+#include "quicki2c-protocol.h"
+
+/* THC QuickI2C ACPI method to get device properties */
+/* HIDI2C device method */
+static guid_t i2c_hid_guid =
+ GUID_INIT(0x3cdff6f7, 0x4267, 0x4555, 0xad, 0x05, 0xb3, 0x0a, 0x3d, 0x89, 0x38, 0xde);
+
+/* platform method */
+static guid_t thc_platform_guid =
+ GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30, 0xf7, 0x87, 0xa1, 0x38);
+
+/**
+ * quicki2c_acpi_get_dsm_property - Query device ACPI DSM parameter
+ *
+ * @adev: point to ACPI device
+ * @guid: ACPI method's guid
+ * @rev: ACPI method's revision
+ * @func: ACPI method's function number
+ * @type: ACPI parameter's data type
+ * @prop_buf: point to return buffer
+ *
+ * This is a helper function for device to query its ACPI DSM parameters.
+ *
+ * Return: 0 if success or ENODEV on failed.
+ */
+static int quicki2c_acpi_get_dsm_property(struct acpi_device *adev, const guid_t *guid,
+ u64 rev, u64 func, acpi_object_type type, void *prop_buf)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm_typed(handle, guid, rev, func, NULL, type);
+ if (!obj) {
+ acpi_handle_err(handle,
+ "Error _DSM call failed, rev: %d, func: %d, type: %d\n",
+ (int)rev, (int)func, (int)type);
+ return -ENODEV;
+ }
+
+ if (type == ACPI_TYPE_INTEGER)
+ *(u32 *)prop_buf = (u32)obj->integer.value;
+ else if (type == ACPI_TYPE_BUFFER)
+ memcpy(prop_buf, obj->buffer.pointer, obj->buffer.length);
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+/**
+ * quicki2c_acpi_get_dsd_property - Query device ACPI DSD parameter
+ *
+ * @adev: point to ACPI device
+ * @dsd_method_name: ACPI method's property name
+ * @type: ACPI parameter's data type
+ * @prop_buf: point to return buffer
+ *
+ * This is a helper function for device to query its ACPI DSD parameters.
+ *
+ * Return: 0 if success or ENODEV on failed.
+ */
+static int quicki2c_acpi_get_dsd_property(struct acpi_device *adev, acpi_string dsd_method_name,
+ acpi_object_type type, void *prop_buf)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object obj = { .type = type };
+ struct acpi_object_list arg_list = {
+ .count = 1,
+ .pointer = &obj,
+ };
+ union acpi_object *ret_obj;
+ acpi_status status;
+
+ status = acpi_evaluate_object(handle, dsd_method_name, &arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle,
+ "Can't evaluate %s method: %d\n", dsd_method_name, status);
+ return -ENODEV;
+ }
+
+ ret_obj = buffer.pointer;
+
+ memcpy(prop_buf, ret_obj->buffer.pointer, ret_obj->buffer.length);
+
+ return 0;
+}
+
+/**
+ * quicki2c_get_acpi_resources - Query all quicki2c devices' ACPI parameters
+ *
+ * @qcdev: point to quicki2c device
+ *
+ * This function gets all quicki2c devices' ACPI resource.
+ *
+ * Return: 0 if success or error code on failed.
+ */
+static int quicki2c_get_acpi_resources(struct quicki2c_device *qcdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(qcdev->dev);
+ struct quicki2c_subip_acpi_parameter i2c_param;
+ struct quicki2c_subip_acpi_config i2c_config;
+ u32 hid_desc_addr;
+ int ret = -EINVAL;
+
+ if (!adev) {
+ dev_err(qcdev->dev, "Invalid acpi device pointer\n");
+ return ret;
+ }
+
+ qcdev->acpi_dev = adev;
+
+ ret = quicki2c_acpi_get_dsm_property(adev, &i2c_hid_guid,
+ QUICKI2C_ACPI_REVISION_NUM,
+ QUICKI2C_ACPI_FUNC_NUM_HID_DESC_ADDR,
+ ACPI_TYPE_INTEGER,
+ &hid_desc_addr);
+ if (ret)
+ return ret;
+
+ qcdev->hid_desc_addr = (u16)hid_desc_addr;
+
+ ret = quicki2c_acpi_get_dsm_property(adev, &thc_platform_guid,
+ QUICKI2C_ACPI_REVISION_NUM,
+ QUICKI2C_ACPI_FUNC_NUM_ACTIVE_LTR_VAL,
+ ACPI_TYPE_INTEGER,
+ &qcdev->active_ltr_val);
+ if (ret)
+ return ret;
+
+ ret = quicki2c_acpi_get_dsm_property(adev, &thc_platform_guid,
+ QUICKI2C_ACPI_REVISION_NUM,
+ QUICKI2C_ACPI_FUNC_NUM_LP_LTR_VAL,
+ ACPI_TYPE_INTEGER,
+ &qcdev->low_power_ltr_val);
+ if (ret)
+ return ret;
+
+ ret = quicki2c_acpi_get_dsd_property(adev, QUICKI2C_ACPI_METHOD_NAME_ICRS,
+ ACPI_TYPE_BUFFER, &i2c_param);
+ if (ret)
+ return ret;
+
+ if (i2c_param.addressing_mode != HIDI2C_ADDRESSING_MODE_7BIT)
+ return -EOPNOTSUPP;
+
+ qcdev->i2c_slave_addr = i2c_param.device_address;
+
+ ret = quicki2c_acpi_get_dsd_property(adev, QUICKI2C_ACPI_METHOD_NAME_ISUB,
+ ACPI_TYPE_BUFFER, &i2c_config);
+ if (ret)
+ return ret;
+
+ if (i2c_param.connection_speed > 0 &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_STANDARD_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_STANDARD;
+ qcdev->i2c_clock_hcnt = i2c_config.SMHX;
+ qcdev->i2c_clock_lcnt = i2c_config.SMLX;
+ } else if (i2c_param.connection_speed > QUICKI2C_SUBIP_STANDARD_MODE_MAX_SPEED &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_FAST_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_FAST_AND_PLUS;
+ qcdev->i2c_clock_hcnt = i2c_config.FMHX;
+ qcdev->i2c_clock_lcnt = i2c_config.FMLX;
+ } else if (i2c_param.connection_speed > QUICKI2C_SUBIP_FAST_MODE_MAX_SPEED &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_FASTPLUS_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_FAST_AND_PLUS;
+ qcdev->i2c_clock_hcnt = i2c_config.FPHX;
+ qcdev->i2c_clock_lcnt = i2c_config.FPLX;
+ } else if (i2c_param.connection_speed > QUICKI2C_SUBIP_FASTPLUS_MODE_MAX_SPEED &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_HIGH_SPEED_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_HIGH_SPEED;
+ qcdev->i2c_clock_hcnt = i2c_config.HMHX;
+ qcdev->i2c_clock_lcnt = i2c_config.HMLX;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * quicki2c_irq_quick_handler - The ISR of the quicki2c driver
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: IRQ_WAKE_THREAD if further process needed.
+ */
+static irqreturn_t quicki2c_irq_quick_handler(int irq, void *dev_id)
+{
+ struct quicki2c_device *qcdev = dev_id;
+
+ if (qcdev->state == QUICKI2C_DISABLED)
+ return IRQ_HANDLED;
+
+ /* Disable THC interrupt before current interrupt be handled */
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * try_recover - Try to recovery THC and Device
+ * @qcdev: pointer to quicki2c device
+ *
+ * This function is a error handler, called when fatal error happens.
+ * It try to reset Touch Device and re-configure THC to recovery
+ * transferring between Device and THC.
+ *
+ * Return: 0 if successful or error code on failed
+ */
+static int try_recover(struct quicki2c_device *qcdev)
+{
+ int ret;
+
+ thc_dma_unconfigure(qcdev->thc_hw);
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret) {
+ dev_err(qcdev->dev, "Reconfig DMA failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int handle_input_report(struct quicki2c_device *qcdev)
+{
+ struct hidi2c_report_packet *pkt = (struct hidi2c_report_packet *)qcdev->input_buf;
+ int rx_dma_finished = 0;
+ size_t report_len;
+ int ret;
+
+ while (!rx_dma_finished) {
+ ret = thc_rxdma_read(qcdev->thc_hw, THC_RXDMA2,
+ (u8 *)pkt, &report_len,
+ &rx_dma_finished);
+ if (ret)
+ return ret;
+
+ if (!pkt->len) {
+ if (qcdev->state == QUICKI2C_RESETING) {
+ qcdev->reset_ack = true;
+ wake_up(&qcdev->reset_ack_wq);
+
+ qcdev->state = QUICKI2C_RESETED;
+ } else {
+ dev_warn(qcdev->dev, "unexpected DIR happen\n");
+ }
+
+ continue;
+ }
+
+ /* discard samples before driver probe complete */
+ if (qcdev->state != QUICKI2C_ENABLED)
+ continue;
+
+ quicki2c_hid_send_report(qcdev, pkt->data,
+ HIDI2C_DATA_LEN(le16_to_cpu(pkt->len)));
+ }
+
+ return 0;
+}
+
+/**
+ * quicki2c_irq_thread_handler - IRQ thread handler of quicki2c driver
+ *
+ * @irq: The IRQ number
+ * @dev_id: pointer to the quicki2c device structure
+ *
+ * Return: IRQ_HANDLED to finish this handler.
+ */
+static irqreturn_t quicki2c_irq_thread_handler(int irq, void *dev_id)
+{
+ struct quicki2c_device *qcdev = dev_id;
+ int err_recover = 0;
+ int int_mask;
+ int ret;
+
+ if (qcdev->state == QUICKI2C_DISABLED)
+ return IRQ_HANDLED;
+
+ ret = pm_runtime_resume_and_get(qcdev->dev);
+ if (ret)
+ return IRQ_HANDLED;
+
+ int_mask = thc_interrupt_handler(qcdev->thc_hw);
+
+ if (int_mask & BIT(THC_FATAL_ERR_INT) || int_mask & BIT(THC_TXN_ERR_INT) ||
+ int_mask & BIT(THC_UNKNOWN_INT)) {
+ err_recover = 1;
+ goto exit;
+ }
+
+ if (int_mask & BIT(THC_RXDMA2_INT)) {
+ err_recover = handle_input_report(qcdev);
+ if (err_recover)
+ goto exit;
+ }
+
+exit:
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ if (err_recover)
+ if (try_recover(qcdev))
+ qcdev->state = QUICKI2C_DISABLED;
+
+ pm_runtime_mark_last_busy(qcdev->dev);
+ pm_runtime_put_autosuspend(qcdev->dev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * quicki2c_dev_init - Initialize quicki2c device
+ *
+ * @pdev: pointer to the thc pci device
+ * @mem_addr: The pointer of MMIO memory address
+ *
+ * Alloc quicki2c device structure and initialized THC device,
+ * then configure THC to HIDI2C mode.
+ *
+ * If success, enable THC hardware interrupt.
+ *
+ * Return: pointer to the quicki2c device structure if success
+ * or NULL on failed.
+ */
+static struct quicki2c_device *quicki2c_dev_init(struct pci_dev *pdev, void __iomem *mem_addr)
+{
+ struct device *dev = &pdev->dev;
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = devm_kzalloc(dev, sizeof(struct quicki2c_device), GFP_KERNEL);
+ if (!qcdev)
+ return ERR_PTR(-ENOMEM);
+
+ qcdev->pdev = pdev;
+ qcdev->dev = dev;
+ qcdev->mem_addr = mem_addr;
+ qcdev->state = QUICKI2C_DISABLED;
+
+ init_waitqueue_head(&qcdev->reset_ack_wq);
+
+ /* thc hw init */
+ qcdev->thc_hw = thc_dev_init(qcdev->dev, qcdev->mem_addr);
+ if (IS_ERR(qcdev->thc_hw)) {
+ ret = PTR_ERR(qcdev->thc_hw);
+ dev_err_once(dev, "Failed to initialize THC device context, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = quicki2c_get_acpi_resources(qcdev);
+ if (ret) {
+ dev_err_once(dev, "Get ACPI resources failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = thc_port_select(qcdev->thc_hw, THC_PORT_TYPE_I2C);
+ if (ret) {
+ dev_err_once(dev, "Failed to select THC port, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = thc_i2c_subip_init(qcdev->thc_hw, qcdev->i2c_slave_addr,
+ qcdev->i2c_speed_mode,
+ qcdev->i2c_clock_hcnt,
+ qcdev->i2c_clock_lcnt);
+ if (ret)
+ return ERR_PTR(ret);
+
+ thc_int_trigger_type_select(qcdev->thc_hw, false);
+
+ thc_interrupt_config(qcdev->thc_hw);
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ qcdev->state = QUICKI2C_INITED;
+
+ return qcdev;
+}
+
+/**
+ * quicki2c_dev_deinit - De-initialize quicki2c device
+ *
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * Disable THC interrupt and deinitilize THC.
+ */
+static void quicki2c_dev_deinit(struct quicki2c_device *qcdev)
+{
+ thc_interrupt_enable(qcdev->thc_hw, false);
+ thc_ltr_unconfig(qcdev->thc_hw);
+
+ qcdev->state = QUICKI2C_DISABLED;
+}
+
+/**
+ * quicki2c_dma_init - Configure THC DMA for quicki2c device
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * This function uses TIC's parameters(such as max input length, max output
+ * length) to allocate THC DMA buffers and configure THC DMA engines.
+ *
+ * Return: 0 if success or error code on failed.
+ */
+static int quicki2c_dma_init(struct quicki2c_device *qcdev)
+{
+ size_t swdma_max_len;
+ int ret;
+
+ swdma_max_len = max(le16_to_cpu(qcdev->dev_desc.max_input_len),
+ le16_to_cpu(qcdev->dev_desc.report_desc_len));
+
+ ret = thc_dma_set_max_packet_sizes(qcdev->thc_hw, 0,
+ le16_to_cpu(qcdev->dev_desc.max_input_len),
+ le16_to_cpu(qcdev->dev_desc.max_output_len),
+ swdma_max_len);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_allocate(qcdev->thc_hw);
+ if (ret) {
+ dev_err(qcdev->dev, "Allocate THC DMA buffer failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* Enable RxDMA */
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret) {
+ dev_err(qcdev->dev, "Configure THC DMA failed, ret = %d\n", ret);
+ thc_dma_unconfigure(qcdev->thc_hw);
+ thc_dma_release(qcdev->thc_hw);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * quicki2c_dma_deinit - Release THC DMA for quicki2c device
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * Stop THC DMA engines and release all DMA buffers.
+ *
+ */
+static void quicki2c_dma_deinit(struct quicki2c_device *qcdev)
+{
+ thc_dma_unconfigure(qcdev->thc_hw);
+ thc_dma_release(qcdev->thc_hw);
+}
+
+/**
+ * quicki2c_alloc_report_buf - Alloc report buffers
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * Allocate report descriptor buffer, it will be used for restore TIC HID
+ * report descriptor.
+ *
+ * Allocate input report buffer, it will be used for receive HID input report
+ * data from TIC.
+ *
+ * Allocate output report buffer, it will be used for store HID output report,
+ * such as set feature.
+ *
+ * Return: 0 if success or error code on failed.
+ */
+static int quicki2c_alloc_report_buf(struct quicki2c_device *qcdev)
+{
+ size_t max_report_len;
+
+ qcdev->report_descriptor = devm_kzalloc(qcdev->dev,
+ le16_to_cpu(qcdev->dev_desc.report_desc_len),
+ GFP_KERNEL);
+ if (!qcdev->report_descriptor)
+ return -ENOMEM;
+
+ /*
+ * Some HIDI2C devices don't declare input/output max length correctly,
+ * give default 4K buffer to avoid DMA buffer overrun.
+ */
+ max_report_len = max(le16_to_cpu(qcdev->dev_desc.max_input_len), SZ_4K);
+
+ qcdev->input_buf = devm_kzalloc(qcdev->dev, max_report_len, GFP_KERNEL);
+ if (!qcdev->input_buf)
+ return -ENOMEM;
+
+ if (!le16_to_cpu(qcdev->dev_desc.max_output_len))
+ qcdev->dev_desc.max_output_len = cpu_to_le16(SZ_4K);
+
+ max_report_len = max(le16_to_cpu(qcdev->dev_desc.max_output_len),
+ max_report_len);
+
+ qcdev->report_buf = devm_kzalloc(qcdev->dev, max_report_len, GFP_KERNEL);
+ if (!qcdev->report_buf)
+ return -ENOMEM;
+
+ qcdev->report_len = max_report_len;
+
+ return 0;
+}
+
+/*
+ * quicki2c_probe: Quicki2c driver probe function
+ *
+ * @pdev: point to pci device
+ * @id: point to pci_device_id structure
+ *
+ * This function initializes THC and HIDI2C device, the flow is:
+ * - do THC pci device initialization
+ * - query HIDI2C ACPI parameters
+ * - configure THC to HIDI2C mode
+ * - go through HIDI2C enumeration flow
+ * |- read device descriptor
+ * |- reset HIDI2C device
+ * - enable THC interrupt and DMA
+ * - read report descriptor
+ * - register HID device
+ * - enable runtime power management
+ *
+ * Return 0 if success or error code on failed.
+ */
+static int quicki2c_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct quicki2c_device *qcdev;
+ void __iomem *mem_addr;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err_once(&pdev->dev, "Failed to enable PCI device, ret = %d.\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ if (ret) {
+ dev_err_once(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
+ goto disable_pci_device;
+ }
+
+ mem_addr = pcim_iomap_table(pdev)[0];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err_once(&pdev->dev, "No usable DMA configuration %d\n", ret);
+ goto unmap_io_region;
+ }
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err_once(&pdev->dev,
+ "Failed to allocate IRQ vectors. ret = %d\n", ret);
+ goto unmap_io_region;
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
+
+ qcdev = quicki2c_dev_init(pdev, mem_addr);
+ if (IS_ERR(qcdev)) {
+ dev_err_once(&pdev->dev, "QuickI2C device init failed\n");
+ ret = PTR_ERR(qcdev);
+ goto unmap_io_region;
+ }
+
+ pci_set_drvdata(pdev, qcdev);
+
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ quicki2c_irq_quick_handler,
+ quicki2c_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME,
+ qcdev);
+ if (ret) {
+ dev_err_once(&pdev->dev,
+ "Failed to request threaded IRQ, irq = %d.\n", pdev->irq);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_get_device_descriptor(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Get device descriptor failed, ret = %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_alloc_report_buf(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Alloc report buffers failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_dma_init(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Setup THC DMA failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ goto dev_deinit;
+
+ ret = quicki2c_set_power(qcdev, HIDI2C_ON);
+ if (ret) {
+ dev_err(&pdev->dev, "Set Power On command failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_reset(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Reset HIDI2C device failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_get_report_descriptor(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Get report descriptor failed, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ ret = quicki2c_hid_probe(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register HID device, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ qcdev->state = QUICKI2C_ENABLED;
+
+ /* Enable runtime power management */
+ pm_runtime_use_autosuspend(qcdev->dev);
+ pm_runtime_set_autosuspend_delay(qcdev->dev, DEFAULT_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_mark_last_busy(qcdev->dev);
+ pm_runtime_put_noidle(qcdev->dev);
+ pm_runtime_put_autosuspend(qcdev->dev);
+
+ dev_dbg(&pdev->dev, "QuickI2C probe success\n");
+
+ return 0;
+
+dma_deinit:
+ quicki2c_dma_deinit(qcdev);
+dev_deinit:
+ quicki2c_dev_deinit(qcdev);
+unmap_io_region:
+ pcim_iounmap_regions(pdev, BIT(0));
+disable_pci_device:
+ pci_clear_master(pdev);
+
+ return ret;
+}
+
+/**
+ * quicki2c_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void quicki2c_remove(struct pci_dev *pdev)
+{
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return;
+
+ quicki2c_hid_remove(qcdev);
+ quicki2c_dma_deinit(qcdev);
+
+ pm_runtime_get_noresume(qcdev->dev);
+
+ quicki2c_dev_deinit(qcdev);
+
+ pcim_iounmap_regions(pdev, BIT(0));
+ pci_clear_master(pdev);
+}
+
+/**
+ * quicki2c_shutdown - Device Shutdown Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void quicki2c_shutdown(struct pci_dev *pdev)
+{
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return;
+
+ /* Must stop DMA before reboot to avoid DMA entering into unknown state */
+ quicki2c_dma_deinit(qcdev);
+
+ quicki2c_dev_deinit(qcdev);
+}
+
+static int quicki2c_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ /*
+ * As I2C is THC subsystem, no register auto save/restore support,
+ * need driver to do that explicitly for every D3 case.
+ */
+ ret = thc_i2c_subip_regs_save(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ thc_dma_unconfigure(qcdev->thc_hw);
+
+ return 0;
+}
+
+static int quicki2c_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_port_select(qcdev->thc_hw, THC_PORT_TYPE_I2C);
+ if (ret)
+ return ret;
+
+ ret = thc_i2c_subip_regs_restore(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qcdev->thc_hw);
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quicki2c_freeze(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ thc_dma_unconfigure(qcdev->thc_hw);
+
+ return 0;
+}
+
+static int quicki2c_thaw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quicki2c_poweroff(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ thc_ltr_unconfig(qcdev->thc_hw);
+
+ quicki2c_dma_deinit(qcdev);
+
+ return 0;
+}
+
+static int quicki2c_restore(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ /* Reconfig THC HW when back from hibernate */
+ ret = thc_port_select(qcdev->thc_hw, THC_PORT_TYPE_I2C);
+ if (ret)
+ return ret;
+
+ ret = thc_i2c_subip_init(qcdev->thc_hw, qcdev->i2c_slave_addr,
+ qcdev->i2c_speed_mode,
+ qcdev->i2c_clock_hcnt,
+ qcdev->i2c_clock_lcnt);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qcdev->thc_hw);
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_ltr_config(qcdev->thc_hw,
+ qcdev->active_ltr_val,
+ qcdev->low_power_ltr_val);
+
+ thc_change_ltr_mode(qcdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static int quicki2c_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qcdev->thc_hw, THC_LTR_MODE_LP);
+
+ pci_save_state(pdev);
+
+ return 0;
+}
+
+static int quicki2c_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qcdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static const struct dev_pm_ops quicki2c_pm_ops = {
+ .suspend = quicki2c_suspend,
+ .resume = quicki2c_resume,
+ .freeze = quicki2c_freeze,
+ .thaw = quicki2c_thaw,
+ .poweroff = quicki2c_poweroff,
+ .restore = quicki2c_restore,
+ .runtime_suspend = quicki2c_runtime_suspend,
+ .runtime_resume = quicki2c_runtime_resume,
+ .runtime_idle = NULL,
+};
+
+static const struct pci_device_id quicki2c_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, THC_LNL_DEVICE_ID_I2C_PORT1), },
+ {PCI_VDEVICE(INTEL, THC_LNL_DEVICE_ID_I2C_PORT2), },
+ {PCI_VDEVICE(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT1), },
+ {PCI_VDEVICE(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2), },
+ {PCI_VDEVICE(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1), },
+ {PCI_VDEVICE(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2), },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);
+
+static struct pci_driver quicki2c_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = quicki2c_pci_tbl,
+ .probe = quicki2c_probe,
+ .remove = quicki2c_remove,
+ .shutdown = quicki2c_shutdown,
+ .driver.pm = &quicki2c_pm_ops,
+ .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+};
+
+module_pci_driver(quicki2c_driver);
+
+MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
+MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) QuickI2C Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
new file mode 100644
index 000000000000..6ddb584bd611
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKI2C_DEV_H_
+#define _QUICKI2C_DEV_H_
+
+#include <linux/hid-over-i2c.h>
+#include <linux/workqueue.h>
+
+#define THC_LNL_DEVICE_ID_I2C_PORT1 0xA848
+#define THC_LNL_DEVICE_ID_I2C_PORT2 0xA84A
+#define THC_PTL_H_DEVICE_ID_I2C_PORT1 0xE348
+#define THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
+#define THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
+#define THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
+
+/* Packet size value, the unit is 16 bytes */
+#define MAX_PACKET_SIZE_VALUE_LNL 256
+
+/* HIDI2C special ACPI parameters DSD name */
+#define QUICKI2C_ACPI_METHOD_NAME_ICRS "ICRS"
+#define QUICKI2C_ACPI_METHOD_NAME_ISUB "ISUB"
+
+/* HIDI2C special ACPI parameters DSM methods */
+#define QUICKI2C_ACPI_REVISION_NUM 1
+#define QUICKI2C_ACPI_FUNC_NUM_HID_DESC_ADDR 1
+#define QUICKI2C_ACPI_FUNC_NUM_ACTIVE_LTR_VAL 1
+#define QUICKI2C_ACPI_FUNC_NUM_LP_LTR_VAL 2
+
+#define QUICKI2C_SUBIP_STANDARD_MODE_MAX_SPEED 100000
+#define QUICKI2C_SUBIP_FAST_MODE_MAX_SPEED 400000
+#define QUICKI2C_SUBIP_FASTPLUS_MODE_MAX_SPEED 1000000
+#define QUICKI2C_SUBIP_HIGH_SPEED_MODE_MAX_SPEED 3400000
+
+#define QUICKI2C_DEFAULT_ACTIVE_LTR_VALUE 5
+#define QUICKI2C_DEFAULT_LP_LTR_VALUE 500
+#define QUICKI2C_RPM_TIMEOUT_MS 500
+
+/*
+ * THC uses runtime auto suspend to dynamically switch between THC active LTR
+ * and low power LTR to save CPU power.
+ * Default value is 5000ms, that means if no touch event in this time, THC will
+ * change to low power LTR mode.
+ */
+#define DEFAULT_AUTO_SUSPEND_DELAY_MS 5000
+
+enum quicki2c_dev_state {
+ QUICKI2C_NONE,
+ QUICKI2C_RESETING,
+ QUICKI2C_RESETED,
+ QUICKI2C_INITED,
+ QUICKI2C_ENABLED,
+ QUICKI2C_DISABLED,
+};
+
+enum {
+ HIDI2C_ADDRESSING_MODE_7BIT,
+ HIDI2C_ADDRESSING_MODE_10BIT,
+};
+
+/**
+ * struct quicki2c_subip_acpi_parameter - QuickI2C ACPI DSD parameters
+ * @device_address: I2C device slave address
+ * @connection_speed: I2C device expected connection speed
+ * @addressing_mode: I2C device slave address mode, 7bit or 10bit
+ *
+ * Those properties get from QUICKI2C_ACPI_METHOD_NAME_ICRS method, used for
+ * Bus parameter.
+ */
+struct quicki2c_subip_acpi_parameter {
+ u16 device_address;
+ u64 connection_speed;
+ u8 addressing_mode;
+} __packed;
+
+/**
+ * struct quicki2c_subip_acpi_config - QuickI2C ACPI DSD parameters
+ * @SMHX: Standard Mode (100 kbit/s) Serial Clock Line HIGH Period
+ * @SMLX: Standard Mode (100 kbit/s) Serial Clock Line LOW Period
+ * @SMTD: Standard Mode (100 kbit/s) Serial Data Line Transmit Hold Period
+ * @SMRD: Standard Mode (100 kbit/s) Serial Data Receive Hold Period
+ * @FMHX: Fast Mode (400 kbit/s) Serial Clock Line HIGH Period
+ * @FMLX: Fast Mode (400 kbit/s) Serial Clock Line LOW Period
+ * @FMTD: Fast Mode (400 kbit/s) Serial Data Line Transmit Hold Period
+ * @FMRD: Fast Mode (400 kbit/s) Serial Data Line Receive Hold Period
+ * @FMSL: Maximum length (in ic_clk_cycles) of suppressed spikes
+ * in Standard Mode, Fast Mode and Fast Mode Plus
+ * @FPHX: Fast Mode Plus (1Mbit/sec) Serial Clock Line HIGH Period
+ * @FPLX: Fast Mode Plus (1Mbit/sec) Serial Clock Line LOW Period
+ * @FPTD: Fast Mode Plus (1Mbit/sec) Serial Data Line Transmit HOLD Period
+ * @FPRD: Fast Mode Plus (1Mbit/sec) Serial Data Line Receive HOLD Period
+ * @HMHX: High Speed Mode Plus (3.4Mbits/sec) Serial Clock Line HIGH Period
+ * @HMLX: High Speed Mode Plus (3.4Mbits/sec) Serial Clock Line LOW Period
+ * @HMTD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Transmit HOLD Period
+ * @HMRD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Receive HOLD Period
+ * @HMSL: Maximum length (in ic_clk_cycles) of suppressed spikes in High Speed Mode
+ *
+ * Those properties get from QUICKI2C_ACPI_METHOD_NAME_ISUB method, used for
+ * I2C timing configure.
+ */
+struct quicki2c_subip_acpi_config {
+ u64 SMHX;
+ u64 SMLX;
+ u64 SMTD;
+ u64 SMRD;
+
+ u64 FMHX;
+ u64 FMLX;
+ u64 FMTD;
+ u64 FMRD;
+ u64 FMSL;
+
+ u64 FPHX;
+ u64 FPLX;
+ u64 FPTD;
+ u64 FPRD;
+
+ u64 HMHX;
+ u64 HMLX;
+ u64 HMTD;
+ u64 HMRD;
+ u64 HMSL;
+};
+
+struct device;
+struct pci_dev;
+struct thc_device;
+struct hid_device;
+struct acpi_device;
+
+/**
+ * struct quicki2c_device - THC QuickI2C device struct
+ * @dev: point to kernel device
+ * @pdev: point to PCI device
+ * @thc_hw: point to THC device
+ * @hid_dev: point to hid device
+ * @acpi_dev: point to ACPI device
+ * @driver_data: point to quicki2c specific driver data
+ * @state: THC I2C device state
+ * @mem_addr: MMIO memory address
+ * @dev_desc: device descriptor for HIDI2C protocol
+ * @i2c_slave_addr: HIDI2C device slave address
+ * @hid_desc_addr: Register address for retrieve HID device descriptor
+ * @active_ltr_val: THC active LTR value
+ * @low_power_ltr_val: THC low power LTR value
+ * @i2c_speed_mode: 0 - standard mode, 1 - fast mode, 2 - fast mode plus
+ * @i2c_clock_hcnt: I2C CLK high period time (unit in cycle count)
+ * @i2c_clock_lcnt: I2C CLK low period time (unit in cycle count)
+ * @report_descriptor: store a copy of device report descriptor
+ * @input_buf: store a copy of latest input report data
+ * @report_buf: store a copy of latest input/output report packet from set/get feature
+ * @report_len: the length of input/output report packet
+ * @reset_ack_wq: workqueue for waiting reset response from device
+ * @reset_ack: indicate reset response received or not
+ */
+struct quicki2c_device {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct thc_device *thc_hw;
+ struct hid_device *hid_dev;
+ struct acpi_device *acpi_dev;
+ enum quicki2c_dev_state state;
+
+ void __iomem *mem_addr;
+
+ struct hidi2c_dev_descriptor dev_desc;
+ u8 i2c_slave_addr;
+ u16 hid_desc_addr;
+
+ u32 active_ltr_val;
+ u32 low_power_ltr_val;
+
+ u32 i2c_speed_mode;
+ u32 i2c_clock_hcnt;
+ u32 i2c_clock_lcnt;
+
+ u8 *report_descriptor;
+ u8 *input_buf;
+ u8 *report_buf;
+ u32 report_len;
+
+ wait_queue_head_t reset_ack_wq;
+ bool reset_ack;
+};
+
+#endif /* _QUICKI2C_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
new file mode 100644
index 000000000000..5c3ec95bb3fd
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/pm_runtime.h>
+
+#include "quicki2c-dev.h"
+#include "quicki2c-hid.h"
+#include "quicki2c-protocol.h"
+
+/**
+ * quicki2c_hid_parse() - HID core parse() callback
+ *
+ * @hid: HID device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error.
+ */
+static int quicki2c_hid_parse(struct hid_device *hid)
+{
+ struct quicki2c_device *qcdev = hid->driver_data;
+
+ if (qcdev->report_descriptor)
+ return hid_parse_report(hid, qcdev->report_descriptor,
+ le16_to_cpu(qcdev->dev_desc.report_desc_len));
+
+ dev_err_once(qcdev->dev, "invalid report descriptor\n");
+ return -EINVAL;
+}
+
+static int quicki2c_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quicki2c_hid_stop(struct hid_device *hid)
+{
+}
+
+static int quicki2c_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quicki2c_hid_close(struct hid_device *hid)
+{
+}
+
+static int quicki2c_hid_raw_request(struct hid_device *hid,
+ unsigned char reportnum,
+ __u8 *buf, size_t len,
+ unsigned char rtype, int reqtype)
+{
+ struct quicki2c_device *qcdev = hid->driver_data;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(qcdev->dev);
+ if (ret)
+ return ret;
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ ret = quicki2c_get_report(qcdev, rtype, reportnum, buf, len);
+ break;
+ case HID_REQ_SET_REPORT:
+ ret = quicki2c_set_report(qcdev, rtype, reportnum, buf, len);
+ break;
+ default:
+ dev_err(qcdev->dev, "Not supported request type %d\n", reqtype);
+ break;
+ }
+
+ pm_runtime_mark_last_busy(qcdev->dev);
+ pm_runtime_put_autosuspend(qcdev->dev);
+
+ return ret;
+}
+
+static int quicki2c_hid_power(struct hid_device *hid, int lvl)
+{
+ return 0;
+}
+
+static struct hid_ll_driver quicki2c_hid_ll_driver = {
+ .parse = quicki2c_hid_parse,
+ .start = quicki2c_hid_start,
+ .stop = quicki2c_hid_stop,
+ .open = quicki2c_hid_open,
+ .close = quicki2c_hid_close,
+ .power = quicki2c_hid_power,
+ .raw_request = quicki2c_hid_raw_request,
+};
+
+/**
+ * quicki2c_hid_probe() - Register HID low level driver
+ *
+ * @qcdev: point to quicki2c device
+ *
+ * This function is used to allocate and add HID device.
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quicki2c_hid_probe(struct quicki2c_device *qcdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &quicki2c_hid_ll_driver;
+ hid->bus = BUS_PCI;
+ hid->dev.parent = qcdev->dev;
+ hid->driver_data = qcdev;
+ hid->version = le16_to_cpu(qcdev->dev_desc.version_id);
+ hid->vendor = le16_to_cpu(qcdev->dev_desc.vendor_id);
+ hid->product = le16_to_cpu(qcdev->dev_desc.product_id);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quicki2c-hid",
+ hid->vendor, hid->product);
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ qcdev->hid_dev = hid;
+
+ return 0;
+}
+
+/**
+ * quicki2c_hid_remove() - Destroy HID device
+ *
+ * @qcdev: point to quicki2c device
+ *
+ * Return: 0 on success, non zero on error.
+ */
+void quicki2c_hid_remove(struct quicki2c_device *qcdev)
+{
+ hid_destroy_device(qcdev->hid_dev);
+}
+
+/**
+ * quicki2c_hid_send_report() - Send HID input report data to HID core
+ *
+ * @qcdev: point to quicki2c device
+ * @data: point to input report data buffer
+ * @data_len: the length of input report data
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quicki2c_hid_send_report(struct quicki2c_device *qcdev,
+ void *data, size_t data_len)
+{
+ int ret;
+
+ ret = hid_input_report(qcdev->hid_dev, HID_INPUT_REPORT, data, data_len, 1);
+ if (ret)
+ dev_err(qcdev->dev, "Failed to send HID input report, ret = %d.\n", ret);
+
+ return ret;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h
new file mode 100644
index 000000000000..e80df5f339fe
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKI2C_HID_H_
+#define _QUICKI2C_HID_H_
+
+struct quicki2c_device;
+
+int quicki2c_hid_send_report(struct quicki2c_device *qcdev,
+ void *data, size_t data_size);
+int quicki2c_hid_probe(struct quicki2c_device *qcdev);
+void quicki2c_hid_remove(struct quicki2c_device *qcdev);
+
+#endif /* _QUICKI2C_HID_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c
new file mode 100644
index 000000000000..f493df0d5dc4
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/hid.h>
+#include <linux/hid-over-i2c.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-dma.h"
+
+#include "quicki2c-dev.h"
+#include "quicki2c-hid.h"
+#include "quicki2c-protocol.h"
+
+static int quicki2c_init_write_buf(struct quicki2c_device *qcdev, u32 cmd, int cmd_len,
+ bool append_data_reg, u8 *data, int data_len,
+ u8 *write_buf, int write_buf_len)
+{
+ int buf_len, offset = 0;
+
+ buf_len = HIDI2C_REG_LEN + cmd_len;
+
+ if (append_data_reg)
+ buf_len += HIDI2C_REG_LEN;
+
+ if (data && data_len)
+ buf_len += data_len + HIDI2C_LENGTH_LEN;
+
+ if (buf_len > write_buf_len)
+ return -EINVAL;
+
+ memcpy(write_buf, &qcdev->dev_desc.cmd_reg, HIDI2C_REG_LEN);
+ offset += HIDI2C_REG_LEN;
+ memcpy(write_buf + offset, &cmd, cmd_len);
+ offset += cmd_len;
+
+ if (append_data_reg) {
+ memcpy(write_buf + offset, &qcdev->dev_desc.data_reg, HIDI2C_REG_LEN);
+ offset += HIDI2C_REG_LEN;
+ }
+
+ if (data && data_len) {
+ __le16 len = cpu_to_le16(data_len + HIDI2C_LENGTH_LEN);
+
+ memcpy(write_buf + offset, &len, HIDI2C_LENGTH_LEN);
+ offset += HIDI2C_LENGTH_LEN;
+ memcpy(write_buf + offset, data, data_len);
+ }
+
+ return buf_len;
+}
+
+static int quicki2c_encode_cmd(struct quicki2c_device *qcdev, u32 *cmd_buf,
+ u8 opcode, u8 report_type, u8 report_id)
+{
+ int cmd_len;
+
+ *cmd_buf = FIELD_PREP(HIDI2C_CMD_OPCODE, opcode) |
+ FIELD_PREP(HIDI2C_CMD_REPORT_TYPE, report_type);
+
+ if (report_id < HIDI2C_CMD_MAX_RI) {
+ *cmd_buf |= FIELD_PREP(HIDI2C_CMD_REPORT_ID, report_id);
+ cmd_len = HIDI2C_CMD_LEN;
+ } else {
+ *cmd_buf |= FIELD_PREP(HIDI2C_CMD_REPORT_ID, HIDI2C_CMD_MAX_RI) |
+ FIELD_PREP(HIDI2C_CMD_3RD_BYTE, report_id);
+ cmd_len = HIDI2C_CMD_LEN_OPT;
+ }
+
+ return cmd_len;
+}
+
+static int write_cmd_to_txdma(struct quicki2c_device *qcdev, int opcode,
+ int report_type, int report_id, u8 *buf, int buf_len)
+{
+ size_t write_buf_len;
+ int cmd_len, ret;
+ u32 cmd;
+
+ cmd_len = quicki2c_encode_cmd(qcdev, &cmd, opcode, report_type, report_id);
+
+ ret = quicki2c_init_write_buf(qcdev, cmd, cmd_len, buf ? true : false, buf,
+ buf_len, qcdev->report_buf, qcdev->report_len);
+ if (ret < 0)
+ return ret;
+
+ write_buf_len = ret;
+
+ return thc_dma_write(qcdev->thc_hw, qcdev->report_buf, write_buf_len);
+}
+
+int quicki2c_set_power(struct quicki2c_device *qcdev, enum hidi2c_power_state power_state)
+{
+ return write_cmd_to_txdma(qcdev, HIDI2C_SET_POWER, HIDI2C_RESERVED, power_state, NULL, 0);
+}
+
+int quicki2c_get_device_descriptor(struct quicki2c_device *qcdev)
+{
+ u32 read_len = 0;
+ int ret;
+
+ ret = thc_tic_pio_write_and_read(qcdev->thc_hw, qcdev->hid_desc_addr,
+ HIDI2C_REG_LEN, NULL, HIDI2C_DEV_DESC_LEN,
+ &read_len, (u32 *)&qcdev->dev_desc);
+ if (ret || HIDI2C_DEV_DESC_LEN != read_len) {
+ dev_err_once(qcdev->dev, "Get device descriptor failed, ret %d, read len %u\n",
+ ret, read_len);
+ return -EIO;
+ }
+
+ if (le16_to_cpu(qcdev->dev_desc.bcd_ver) != HIDI2C_HID_DESC_BCDVERSION)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int quicki2c_get_report_descriptor(struct quicki2c_device *qcdev)
+{
+ u16 desc_reg = le16_to_cpu(qcdev->dev_desc.report_desc_reg);
+ size_t read_len = le16_to_cpu(qcdev->dev_desc.report_desc_len);
+ u32 prd_len = read_len;
+
+ return thc_swdma_read(qcdev->thc_hw, (u8 *)&desc_reg, HIDI2C_REG_LEN,
+ &prd_len, qcdev->report_descriptor, &read_len);
+}
+
+int quicki2c_get_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len)
+{
+ struct hidi2c_report_packet *rpt;
+ size_t write_buf_len, read_len = 0;
+ int cmd_len, rep_type;
+ u32 cmd;
+ int ret;
+
+ if (report_type == HID_INPUT_REPORT) {
+ rep_type = HIDI2C_INPUT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = HIDI2C_FEATURE;
+ } else {
+ dev_err(qcdev->dev, "Unsupported report type for GET REPORT: %d\n", report_type);
+ return -EINVAL;
+ }
+
+ cmd_len = quicki2c_encode_cmd(qcdev, &cmd, HIDI2C_GET_REPORT, rep_type, reportnum);
+
+ ret = quicki2c_init_write_buf(qcdev, cmd, cmd_len, true, NULL, 0,
+ qcdev->report_buf, qcdev->report_len);
+ if (ret < 0)
+ return ret;
+
+ write_buf_len = ret;
+
+ rpt = (struct hidi2c_report_packet *)qcdev->input_buf;
+
+ ret = thc_swdma_read(qcdev->thc_hw, qcdev->report_buf, write_buf_len,
+ NULL, rpt, &read_len);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Get report failed, ret %d, read len (%zu vs %d)\n",
+ ret, read_len, buf_len);
+ return ret;
+ }
+
+ if (HIDI2C_DATA_LEN(le16_to_cpu(rpt->len)) != buf_len || rpt->data[0] != reportnum) {
+ dev_err_once(qcdev->dev, "Invalid packet, len (%d vs %d) report id (%d vs %d)\n",
+ le16_to_cpu(rpt->len), buf_len, rpt->data[0], reportnum);
+ return -EINVAL;
+ }
+
+ memcpy(buf, rpt->data, buf_len);
+
+ return buf_len;
+}
+
+int quicki2c_set_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len)
+{
+ int rep_type;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT) {
+ rep_type = HIDI2C_OUTPUT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = HIDI2C_FEATURE;
+ } else {
+ dev_err(qcdev->dev, "Unsupported report type for SET REPORT: %d\n", report_type);
+ return -EINVAL;
+ }
+
+ ret = write_cmd_to_txdma(qcdev, HIDI2C_SET_REPORT, rep_type, reportnum, buf, buf_len);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Set Report failed, ret %d\n", ret);
+ return ret;
+ }
+
+ return buf_len;
+}
+
+#define HIDI2C_RESET_TIMEOUT 5
+
+int quicki2c_reset(struct quicki2c_device *qcdev)
+{
+ int ret;
+
+ qcdev->reset_ack = false;
+ qcdev->state = QUICKI2C_RESETING;
+
+ ret = write_cmd_to_txdma(qcdev, HIDI2C_RESET, HIDI2C_RESERVED, 0, NULL, 0);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Send reset command failed, ret %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qcdev->reset_ack_wq, qcdev->reset_ack,
+ HIDI2C_RESET_TIMEOUT * HZ);
+ if (ret <= 0 || !qcdev->reset_ack) {
+ dev_err_once(qcdev->dev,
+ "Wait reset response timed out ret:%d timeout:%ds\n",
+ ret, HIDI2C_RESET_TIMEOUT);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h
new file mode 100644
index 000000000000..bf4908cce59c
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKI2C_PROTOCOL_H_
+#define _QUICKI2C_PROTOCOL_H_
+
+#include <linux/hid-over-i2c.h>
+
+struct quicki2c_device;
+
+int quicki2c_set_power(struct quicki2c_device *qcdev, enum hidi2c_power_state power_state);
+int quicki2c_get_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len);
+int quicki2c_set_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len);
+int quicki2c_get_device_descriptor(struct quicki2c_device *qcdev);
+int quicki2c_get_report_descriptor(struct quicki2c_device *qcdev);
+int quicki2c_reset(struct quicki2c_device *qcdev);
+
+#endif /* _QUICKI2C_PROTOCOL_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
new file mode 100644
index 000000000000..4641e818dfa4
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -0,0 +1,987 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-hw.h"
+
+#include "quickspi-dev.h"
+#include "quickspi-hid.h"
+#include "quickspi-protocol.h"
+
+struct quickspi_driver_data mtl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_MTL,
+};
+
+struct quickspi_driver_data lnl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_LNL,
+};
+
+struct quickspi_driver_data ptl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_LNL,
+};
+
+/* THC QuickSPI ACPI method to get device properties */
+/* HIDSPI Method: {6e2ac436-0fcf-41af-a265-b32a220dcfab} */
+static guid_t hidspi_guid =
+ GUID_INIT(0x6e2ac436, 0x0fcf, 0x41af, 0xa2, 0x65, 0xb3, 0x2a,
+ 0x22, 0x0d, 0xcf, 0xab);
+
+/* QuickSpi Method: {300D35b7-ac20-413e-8e9c-92e4dafd0afe} */
+static guid_t thc_quickspi_guid =
+ GUID_INIT(0x300d35b7, 0xac20, 0x413e, 0x8e, 0x9c, 0x92, 0xe4,
+ 0xda, 0xfd, 0x0a, 0xfe);
+
+/* Platform Method: {84005682-5b71-41a4-0x8d668130f787a138} */
+static guid_t thc_platform_guid =
+ GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30,
+ 0xf7, 0x87, 0xa1, 0x38);
+
+/**
+ * thc_acpi_get_property - Query device ACPI parameter
+ *
+ * @adev: point to ACPI device
+ * @guid: ACPI method's guid
+ * @rev: ACPI method's revision
+ * @func: ACPI method's function number
+ * @type: ACPI parameter's data type
+ * @prop_buf: point to return buffer
+ *
+ * This is a helper function for device to query its ACPI parameters.
+ *
+ * Return: 0 if successful or ENODEV on failed.
+ */
+static int thc_acpi_get_property(struct acpi_device *adev, const guid_t *guid,
+ u64 rev, u64 func, acpi_object_type type, void *prop_buf)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm_typed(handle, guid, rev, func, NULL, type);
+ if (!obj) {
+ acpi_handle_err(handle,
+ "Error _DSM call failed, rev: %llu, func: %llu, type: %u\n",
+ rev, func, type);
+ return -ENODEV;
+ }
+
+ if (type == ACPI_TYPE_INTEGER)
+ *(u32 *)prop_buf = (u32)obj->integer.value;
+ else if (type == ACPI_TYPE_BUFFER)
+ memcpy(prop_buf, obj->buffer.pointer, obj->buffer.length);
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+/**
+ * quickspi_get_acpi_resources - Query all quickspi devices' ACPI parameters
+ *
+ * @qsdev: point to quickspi device
+ *
+ * This function gets all quickspi devices' ACPI resource.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int quickspi_get_acpi_resources(struct quickspi_device *qsdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(qsdev->dev);
+ int ret = -EINVAL;
+
+ if (!adev) {
+ dev_err(qsdev->dev, "no valid ACPI companion\n");
+ return ret;
+ }
+
+ qsdev->acpi_dev = adev;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_HDR_ADDR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->input_report_hdr_addr);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_BDY_ADDR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->input_report_bdy_addr);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_OUTPUT_REP_ADDR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->output_report_addr);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_READ_OPCODE,
+ ACPI_TYPE_BUFFER,
+ &qsdev->spi_read_opcode);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_WRITE_OPCODE,
+ ACPI_TYPE_BUFFER,
+ &qsdev->spi_write_opcode);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_IO_MODE,
+ ACPI_TYPE_INTEGER,
+ &qsdev->spi_read_io_mode);
+ if (ret)
+ return ret;
+
+ if (qsdev->spi_read_io_mode & SPI_WRITE_IO_MODE)
+ qsdev->spi_write_io_mode = FIELD_GET(SPI_IO_MODE_OPCODE, qsdev->spi_read_io_mode);
+ else
+ qsdev->spi_write_io_mode = THC_SINGLE_IO;
+
+ qsdev->spi_read_io_mode = FIELD_GET(SPI_IO_MODE_OPCODE, qsdev->spi_read_io_mode);
+
+ ret = thc_acpi_get_property(adev, &thc_quickspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_CONNECTION_SPEED,
+ ACPI_TYPE_INTEGER,
+ &qsdev->spi_freq_val);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &thc_quickspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_LIMIT_PACKET_SIZE,
+ ACPI_TYPE_INTEGER,
+ &qsdev->limit_packet_size);
+ if (ret)
+ return ret;
+
+ if (qsdev->limit_packet_size || !qsdev->driver_data)
+ qsdev->spi_packet_size = DEFAULT_MIN_PACKET_SIZE_VALUE;
+ else
+ qsdev->spi_packet_size = qsdev->driver_data->max_packet_size_value;
+
+ ret = thc_acpi_get_property(adev, &thc_quickspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_PERFORMANCE_LIMIT,
+ ACPI_TYPE_INTEGER,
+ &qsdev->performance_limit);
+ if (ret)
+ return ret;
+
+ qsdev->performance_limit = FIELD_GET(PERFORMANCE_LIMITATION, qsdev->performance_limit);
+
+ ret = thc_acpi_get_property(adev, &thc_platform_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_ACTIVE_LTR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->active_ltr_val);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &thc_platform_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_LP_LTR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->low_power_ltr_val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * quickspi_irq_quick_handler - The ISR of the quickspi driver
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: IRQ_WAKE_THREAD if further process needed.
+ */
+static irqreturn_t quickspi_irq_quick_handler(int irq, void *dev_id)
+{
+ struct quickspi_device *qsdev = dev_id;
+
+ if (qsdev->state == QUICKSPI_DISABLED)
+ return IRQ_HANDLED;
+
+ /* Disable THC interrupt before current interrupt be handled */
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * try_recover - Try to recovery THC and Device
+ * @qsdev: pointer to quickspi device
+ *
+ * This function is a error handler, called when fatal error happens.
+ * It try to reset Touch Device and re-configure THC to recovery
+ * transferring between Device and THC.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int try_recover(struct quickspi_device *qsdev)
+{
+ int ret;
+
+ ret = reset_tic(qsdev);
+ if (ret) {
+ dev_err(qsdev->dev, "Reset touch device failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ thc_dma_unconfigure(qsdev->thc_hw);
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret) {
+ dev_err(qsdev->dev, "Re-configure THC DMA failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * quickspi_irq_thread_handler - IRQ thread handler of quickspi driver
+ *
+ * @irq: The IRQ number
+ * @dev_id: pointer to the quickspi device structure
+ *
+ * Return: IRQ_HANDLED to finish this handler.
+ */
+static irqreturn_t quickspi_irq_thread_handler(int irq, void *dev_id)
+{
+ struct quickspi_device *qsdev = dev_id;
+ size_t input_len;
+ int read_finished = 0;
+ int err_recover = 0;
+ int int_mask;
+ int ret;
+
+ if (qsdev->state == QUICKSPI_DISABLED)
+ return IRQ_HANDLED;
+
+ ret = pm_runtime_resume_and_get(qsdev->dev);
+ if (ret)
+ return IRQ_HANDLED;
+
+ int_mask = thc_interrupt_handler(qsdev->thc_hw);
+
+ if (int_mask & BIT(THC_FATAL_ERR_INT) || int_mask & BIT(THC_TXN_ERR_INT)) {
+ err_recover = 1;
+ goto end;
+ }
+
+ if (int_mask & BIT(THC_NONDMA_INT)) {
+ if (qsdev->state == QUICKSPI_RESETING) {
+ qsdev->reset_ack = true;
+ wake_up_interruptible(&qsdev->reset_ack_wq);
+ } else {
+ qsdev->nondma_int_received = true;
+ wake_up_interruptible(&qsdev->nondma_int_received_wq);
+ }
+ }
+
+ if (int_mask & BIT(THC_RXDMA2_INT)) {
+ while (!read_finished) {
+ ret = thc_rxdma_read(qsdev->thc_hw, THC_RXDMA2, qsdev->input_buf,
+ &input_len, &read_finished);
+ if (ret) {
+ err_recover = 1;
+ goto end;
+ }
+
+ quickspi_handle_input_data(qsdev, input_len);
+ }
+ }
+
+end:
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ if (err_recover)
+ if (try_recover(qsdev))
+ qsdev->state = QUICKSPI_DISABLED;
+
+ pm_runtime_mark_last_busy(qsdev->dev);
+ pm_runtime_put_autosuspend(qsdev->dev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * quickspi_dev_init - Initialize quickspi device
+ *
+ * @pdev: pointer to the thc pci device
+ * @mem_addr: The pointer of MMIO memory address
+ * @id: point to pci_device_id structure
+ *
+ * Alloc quickspi device structure and initialized THC device,
+ * then configure THC to HIDSPI mode.
+ *
+ * If success, enable THC hardware interrupt.
+ *
+ * Return: pointer to the quickspi device structure if success
+ * or NULL on failed.
+ */
+static struct quickspi_device *quickspi_dev_init(struct pci_dev *pdev, void __iomem *mem_addr,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = devm_kzalloc(dev, sizeof(struct quickspi_device), GFP_KERNEL);
+ if (!qsdev)
+ return ERR_PTR(-ENOMEM);
+
+ qsdev->pdev = pdev;
+ qsdev->dev = dev;
+ qsdev->mem_addr = mem_addr;
+ qsdev->state = QUICKSPI_DISABLED;
+ qsdev->driver_data = (struct quickspi_driver_data *)id->driver_data;
+
+ init_waitqueue_head(&qsdev->reset_ack_wq);
+ init_waitqueue_head(&qsdev->nondma_int_received_wq);
+ init_waitqueue_head(&qsdev->report_desc_got_wq);
+ init_waitqueue_head(&qsdev->get_report_cmpl_wq);
+ init_waitqueue_head(&qsdev->set_report_cmpl_wq);
+
+ /* thc hw init */
+ qsdev->thc_hw = thc_dev_init(qsdev->dev, qsdev->mem_addr);
+ if (IS_ERR(qsdev->thc_hw)) {
+ ret = PTR_ERR(qsdev->thc_hw);
+ dev_err(dev, "Failed to initialize THC device context, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = thc_port_select(qsdev->thc_hw, THC_PORT_TYPE_SPI);
+ if (ret) {
+ dev_err(dev, "Failed to select THC port, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = quickspi_get_acpi_resources(qsdev);
+ if (ret) {
+ dev_err(dev, "Get ACPI resources failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* THC config for input/output address */
+ thc_spi_input_output_address_config(qsdev->thc_hw,
+ qsdev->input_report_hdr_addr,
+ qsdev->input_report_bdy_addr,
+ qsdev->output_report_addr);
+
+ /* THC config for spi read operation */
+ ret = thc_spi_read_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_read_io_mode,
+ qsdev->spi_read_opcode,
+ qsdev->spi_packet_size);
+ if (ret) {
+ dev_err(dev, "thc_spi_read_config failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* THC config for spi write operation */
+ ret = thc_spi_write_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_write_io_mode,
+ qsdev->spi_write_opcode,
+ qsdev->spi_packet_size,
+ qsdev->performance_limit);
+ if (ret) {
+ dev_err(dev, "thc_spi_write_config failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ thc_ltr_config(qsdev->thc_hw,
+ qsdev->active_ltr_val,
+ qsdev->low_power_ltr_val);
+
+ thc_interrupt_config(qsdev->thc_hw);
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ qsdev->state = QUICKSPI_INITED;
+
+ return qsdev;
+}
+
+/**
+ * quickspi_dev_deinit - De-initialize quickspi device
+ *
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * Disable THC interrupt and deinitilize THC.
+ */
+static void quickspi_dev_deinit(struct quickspi_device *qsdev)
+{
+ thc_interrupt_enable(qsdev->thc_hw, false);
+ thc_ltr_unconfig(qsdev->thc_hw);
+
+ qsdev->state = QUICKSPI_DISABLED;
+}
+
+/**
+ * quickspi_dma_init - Configure THC DMA for quickspi device
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * This function uses TIC's parameters(such as max input length, max output
+ * length) to allocate THC DMA buffers and configure THC DMA engines.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int quickspi_dma_init(struct quickspi_device *qsdev)
+{
+ int ret;
+
+ ret = thc_dma_set_max_packet_sizes(qsdev->thc_hw, 0,
+ le16_to_cpu(qsdev->dev_desc.max_input_len),
+ le16_to_cpu(qsdev->dev_desc.max_output_len),
+ 0);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_allocate(qsdev->thc_hw);
+ if (ret) {
+ dev_err(qsdev->dev, "Allocate THC DMA buffer failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* Enable RxDMA */
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret) {
+ dev_err(qsdev->dev, "Configure THC DMA failed, ret = %d\n", ret);
+ thc_dma_unconfigure(qsdev->thc_hw);
+ thc_dma_release(qsdev->thc_hw);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * quickspi_dma_deinit - Release THC DMA for quickspi device
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * Stop THC DMA engines and release all DMA buffers.
+ *
+ */
+static void quickspi_dma_deinit(struct quickspi_device *qsdev)
+{
+ thc_dma_unconfigure(qsdev->thc_hw);
+ thc_dma_release(qsdev->thc_hw);
+}
+
+/**
+ * quickspi_alloc_report_buf - Alloc report buffers
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * Allocate report descriptor buffer, it will be used for restore TIC HID
+ * report descriptor.
+ *
+ * Allocate input report buffer, it will be used for receive HID input report
+ * data from TIC.
+ *
+ * Allocate output report buffer, it will be used for store HID output report,
+ * such as set feature.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int quickspi_alloc_report_buf(struct quickspi_device *qsdev)
+{
+ size_t max_report_len;
+ size_t max_input_len;
+
+ qsdev->report_descriptor = devm_kzalloc(qsdev->dev,
+ le16_to_cpu(qsdev->dev_desc.rep_desc_len),
+ GFP_KERNEL);
+ if (!qsdev->report_descriptor)
+ return -ENOMEM;
+
+ max_input_len = max(le16_to_cpu(qsdev->dev_desc.rep_desc_len),
+ le16_to_cpu(qsdev->dev_desc.max_input_len));
+
+ qsdev->input_buf = devm_kzalloc(qsdev->dev, max_input_len, GFP_KERNEL);
+ if (!qsdev->input_buf)
+ return -ENOMEM;
+
+ max_report_len = max(le16_to_cpu(qsdev->dev_desc.max_output_len),
+ le16_to_cpu(qsdev->dev_desc.max_input_len));
+
+ qsdev->report_buf = devm_kzalloc(qsdev->dev, max_report_len, GFP_KERNEL);
+ if (!qsdev->report_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * quickspi_probe: Quickspi driver probe function
+ *
+ * @pdev: point to pci device
+ * @id: point to pci_device_id structure
+ *
+ * This function initializes THC and HIDSPI device, the flow is:
+ * - do THC pci device initialization
+ * - query HIDSPI ACPI parameters
+ * - configure THC to HIDSPI mode
+ * - go through HIDSPI enumeration flow
+ * |- reset HIDSPI device
+ * |- read device descriptor
+ * - enable THC interrupt and DMA
+ * - read report descriptor
+ * - register HID device
+ * - enable runtime power management
+ *
+ * Return 0 if success or error code on failure.
+ */
+static int quickspi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct quickspi_device *qsdev;
+ void __iomem *mem_addr;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PCI device, ret = %d.\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
+ goto disable_pci_device;
+ }
+
+ mem_addr = pcim_iomap_table(pdev)[0];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration %d\n", ret);
+ goto unmap_io_region;
+ }
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Failed to allocate IRQ vectors. ret = %d\n", ret);
+ goto unmap_io_region;
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
+
+ qsdev = quickspi_dev_init(pdev, mem_addr, id);
+ if (IS_ERR(qsdev)) {
+ dev_err(&pdev->dev, "QuickSPI device init failed\n");
+ ret = PTR_ERR(qsdev);
+ goto unmap_io_region;
+ }
+
+ pci_set_drvdata(pdev, qsdev);
+
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ quickspi_irq_quick_handler,
+ quickspi_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME,
+ qsdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to request threaded IRQ, irq = %d.\n", pdev->irq);
+ goto dev_deinit;
+ }
+
+ ret = reset_tic(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Reset Touch Device failed, ret = %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quickspi_alloc_report_buf(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Alloc report buffers failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quickspi_dma_init(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Setup THC DMA failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quickspi_get_report_descriptor(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Get report descriptor failed, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ ret = quickspi_hid_probe(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register HID device, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ qsdev->state = QUICKSPI_ENABLED;
+
+ /* Enable runtime power management */
+ pm_runtime_use_autosuspend(qsdev->dev);
+ pm_runtime_set_autosuspend_delay(qsdev->dev, DEFAULT_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_mark_last_busy(qsdev->dev);
+ pm_runtime_put_noidle(qsdev->dev);
+ pm_runtime_put_autosuspend(qsdev->dev);
+
+ dev_dbg(&pdev->dev, "QuickSPI probe success\n");
+
+ return 0;
+
+dma_deinit:
+ quickspi_dma_deinit(qsdev);
+dev_deinit:
+ quickspi_dev_deinit(qsdev);
+unmap_io_region:
+ pcim_iounmap_regions(pdev, BIT(0));
+disable_pci_device:
+ pci_clear_master(pdev);
+
+ return ret;
+}
+
+/**
+ * quickspi_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void quickspi_remove(struct pci_dev *pdev)
+{
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return;
+
+ quickspi_hid_remove(qsdev);
+ quickspi_dma_deinit(qsdev);
+
+ pm_runtime_get_noresume(qsdev->dev);
+
+ quickspi_dev_deinit(qsdev);
+
+ pcim_iounmap_regions(pdev, BIT(0));
+ pci_clear_master(pdev);
+}
+
+/**
+ * quickspi_shutdown - Device Shutdown Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void quickspi_shutdown(struct pci_dev *pdev)
+{
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return;
+
+ /* Must stop DMA before reboot to avoid DMA entering into unknown state */
+ quickspi_dma_deinit(qsdev);
+
+ quickspi_dev_deinit(qsdev);
+}
+
+static int quickspi_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = quickspi_set_power(qsdev, HIDSPI_SLEEP);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ thc_dma_unconfigure(qsdev->thc_hw);
+
+ return 0;
+}
+
+static int quickspi_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_port_select(qsdev->thc_hw, THC_PORT_TYPE_SPI);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qsdev->thc_hw);
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ ret = quickspi_set_power(qsdev, HIDSPI_ON);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quickspi_freeze(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ thc_dma_unconfigure(qsdev->thc_hw);
+
+ return 0;
+}
+
+static int quickspi_thaw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quickspi_poweroff(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ thc_ltr_unconfig(qsdev->thc_hw);
+
+ quickspi_dma_deinit(qsdev);
+
+ return 0;
+}
+
+static int quickspi_restore(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ /* Reconfig THC HW when back from hibernate */
+ ret = thc_port_select(qsdev->thc_hw, THC_PORT_TYPE_SPI);
+ if (ret)
+ return ret;
+
+ thc_spi_input_output_address_config(qsdev->thc_hw,
+ qsdev->input_report_hdr_addr,
+ qsdev->input_report_bdy_addr,
+ qsdev->output_report_addr);
+
+ ret = thc_spi_read_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_read_io_mode,
+ qsdev->spi_read_opcode,
+ qsdev->spi_packet_size);
+ if (ret)
+ return ret;
+
+ ret = thc_spi_write_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_write_io_mode,
+ qsdev->spi_write_opcode,
+ qsdev->spi_packet_size,
+ qsdev->performance_limit);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qsdev->thc_hw);
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ /* TIC may lose power, needs go through reset flow */
+ ret = reset_tic(qsdev);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_ltr_config(qsdev->thc_hw,
+ qsdev->active_ltr_val,
+ qsdev->low_power_ltr_val);
+
+ thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static int quickspi_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_LP);
+
+ pci_save_state(pdev);
+
+ return 0;
+}
+
+static int quickspi_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static const struct dev_pm_ops quickspi_pm_ops = {
+ .suspend = quickspi_suspend,
+ .resume = quickspi_resume,
+ .freeze = quickspi_freeze,
+ .thaw = quickspi_thaw,
+ .poweroff = quickspi_poweroff,
+ .restore = quickspi_restore,
+ .runtime_suspend = quickspi_runtime_suspend,
+ .runtime_resume = quickspi_runtime_resume,
+ .runtime_idle = NULL,
+};
+
+static const struct pci_device_id quickspi_pci_tbl[] = {
+ {PCI_DEVICE_DATA(INTEL, THC_MTL_DEVICE_ID_SPI_PORT1, &mtl), },
+ {PCI_DEVICE_DATA(INTEL, THC_MTL_DEVICE_ID_SPI_PORT2, &mtl), },
+ {PCI_DEVICE_DATA(INTEL, THC_LNL_DEVICE_ID_SPI_PORT1, &lnl), },
+ {PCI_DEVICE_DATA(INTEL, THC_LNL_DEVICE_ID_SPI_PORT2, &lnl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT2, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl);
+
+static struct pci_driver quickspi_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = quickspi_pci_tbl,
+ .probe = quickspi_probe,
+ .remove = quickspi_remove,
+ .shutdown = quickspi_shutdown,
+ .driver.pm = &quickspi_pm_ops,
+ .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+};
+
+module_pci_driver(quickspi_driver);
+
+MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
+MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) QuickSPI Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
new file mode 100644
index 000000000000..75179bb26767
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKSPI_DEV_H_
+#define _QUICKSPI_DEV_H_
+
+#include <linux/bits.h>
+#include <linux/hid-over-spi.h>
+#include <linux/sizes.h>
+#include <linux/wait.h>
+
+#include "quickspi-protocol.h"
+
+#define PCI_DEVICE_ID_INTEL_THC_MTL_DEVICE_ID_SPI_PORT1 0x7E49
+#define PCI_DEVICE_ID_INTEL_THC_MTL_DEVICE_ID_SPI_PORT2 0x7E4B
+#define PCI_DEVICE_ID_INTEL_THC_LNL_DEVICE_ID_SPI_PORT1 0xA849
+#define PCI_DEVICE_ID_INTEL_THC_LNL_DEVICE_ID_SPI_PORT2 0xA84B
+#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT1 0xE349
+#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT2 0xE34B
+#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT1 0xE449
+#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B
+
+/* HIDSPI special ACPI parameters DSM methods */
+#define ACPI_QUICKSPI_REVISION_NUM 2
+#define ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_HDR_ADDR 1
+#define ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_BDY_ADDR 2
+#define ACPI_QUICKSPI_FUNC_NUM_OUTPUT_REP_ADDR 3
+#define ACPI_QUICKSPI_FUNC_NUM_READ_OPCODE 4
+#define ACPI_QUICKSPI_FUNC_NUM_WRITE_OPCODE 5
+#define ACPI_QUICKSPI_FUNC_NUM_IO_MODE 6
+
+/* QickSPI device special ACPI parameters DSM methods */
+#define ACPI_QUICKSPI_FUNC_NUM_CONNECTION_SPEED 1
+#define ACPI_QUICKSPI_FUNC_NUM_LIMIT_PACKET_SIZE 2
+#define ACPI_QUICKSPI_FUNC_NUM_PERFORMANCE_LIMIT 3
+
+/* Platform special ACPI parameters DSM methods */
+#define ACPI_QUICKSPI_FUNC_NUM_ACTIVE_LTR 1
+#define ACPI_QUICKSPI_FUNC_NUM_LP_LTR 2
+
+#define SPI_WRITE_IO_MODE BIT(13)
+#define SPI_IO_MODE_OPCODE GENMASK(15, 14)
+#define PERFORMANCE_LIMITATION GENMASK(15, 0)
+
+/* Packet size value, the unit is 16 bytes */
+#define DEFAULT_MIN_PACKET_SIZE_VALUE 4
+#define MAX_PACKET_SIZE_VALUE_MTL 128
+#define MAX_PACKET_SIZE_VALUE_LNL 256
+
+/*
+ * THC uses runtime auto suspend to dynamically switch between THC active LTR
+ * and low power LTR to save CPU power.
+ * Default value is 5000ms, that means if no touch event in this time, THC will
+ * change to low power LTR mode.
+ */
+#define DEFAULT_AUTO_SUSPEND_DELAY_MS 5000
+
+enum quickspi_dev_state {
+ QUICKSPI_NONE,
+ QUICKSPI_RESETING,
+ QUICKSPI_RESETED,
+ QUICKSPI_INITED,
+ QUICKSPI_ENABLED,
+ QUICKSPI_DISABLED,
+};
+
+/**
+ * struct quickspi_driver_data - Driver specific data for quickspi device
+ * @max_packet_size_value: identify max packet size, unit is 16 bytes
+ */
+struct quickspi_driver_data {
+ u32 max_packet_size_value;
+};
+
+struct device;
+struct pci_dev;
+struct thc_device;
+struct hid_device;
+struct acpi_device;
+
+/**
+ * struct quickspi_device - THC QuickSpi device struct
+ * @dev: point to kernel device
+ * @pdev: point to PCI device
+ * @thc_hw: point to THC device
+ * @hid_dev: point to hid device
+ * @acpi_dev: point to ACPI device
+ * @driver_data: point to quickspi specific driver data
+ * @state: THC SPI device state
+ * @mem_addr: MMIO memory address
+ * @dev_desc: device descriptor for HIDSPI protocol
+ * @input_report_hdr_addr: device input report header address
+ * @input_report_bdy_addr: device input report body address
+ * @output_report_bdy_addr: device output report address
+ * @spi_freq_val: device supported max SPI frequnecy, in Hz
+ * @spi_read_io_mode: device supported SPI read io mode
+ * @spi_write_io_mode: device supported SPI write io mode
+ * @spi_read_opcode: device read opcode
+ * @spi_write_opcode: device write opcode
+ * @limit_packet_size: 1 - limit read/write packet to 64Bytes
+ * 0 - device no packet size limiation for read/write
+ * @performance_limit: delay time, in ms.
+ * if device has performance limitation, must give a delay
+ * before write operation after a read operation.
+ * @active_ltr_val: THC active LTR value
+ * @low_power_ltr_val: THC low power LTR value
+ * @report_descriptor: store a copy of device report descriptor
+ * @input_buf: store a copy of latest input report data
+ * @report_buf: store a copy of latest input/output report packet from set/get feature
+ * @report_len: the length of input/output report packet
+ * @reset_ack_wq: workqueue for waiting reset response from device
+ * @reset_ack: indicate reset response received or not
+ * @nondma_int_received_wq: workqueue for waiting THC non-DMA interrupt
+ * @nondma_int_received: indicate THC non-DMA interrupt received or not
+ * @report_desc_got_wq: workqueue for waiting device report descriptor
+ * @report_desc_got: indicate device report descritor received or not
+ * @set_power_on_wq: workqueue for waiting set power on response from device
+ * @set_power_on: indicate set power on response received or not
+ * @get_feature_cmpl_wq: workqueue for waiting get feature response from device
+ * @get_feature_cmpl: indicate get feature received or not
+ * @set_feature_cmpl_wq: workqueue for waiting set feature to device
+ * @set_feature_cmpl: indicate set feature send complete or not
+ */
+struct quickspi_device {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct thc_device *thc_hw;
+ struct hid_device *hid_dev;
+ struct acpi_device *acpi_dev;
+ struct quickspi_driver_data *driver_data;
+ enum quickspi_dev_state state;
+
+ void __iomem *mem_addr;
+
+ struct hidspi_dev_descriptor dev_desc;
+ u32 input_report_hdr_addr;
+ u32 input_report_bdy_addr;
+ u32 output_report_addr;
+ u32 spi_freq_val;
+ u32 spi_read_io_mode;
+ u32 spi_write_io_mode;
+ u32 spi_read_opcode;
+ u32 spi_write_opcode;
+ u32 limit_packet_size;
+ u32 spi_packet_size;
+ u32 performance_limit;
+
+ u32 active_ltr_val;
+ u32 low_power_ltr_val;
+
+ u8 *report_descriptor;
+ u8 *input_buf;
+ u8 *report_buf;
+ u32 report_len;
+
+ wait_queue_head_t reset_ack_wq;
+ bool reset_ack;
+
+ wait_queue_head_t nondma_int_received_wq;
+ bool nondma_int_received;
+
+ wait_queue_head_t report_desc_got_wq;
+ bool report_desc_got;
+
+ wait_queue_head_t get_report_cmpl_wq;
+ bool get_report_cmpl;
+
+ wait_queue_head_t set_report_cmpl_wq;
+ bool set_report_cmpl;
+};
+
+#endif /* _QUICKSPI_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
new file mode 100644
index 000000000000..ad52e402c28a
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/pm_runtime.h>
+
+#include "quickspi-dev.h"
+#include "quickspi-hid.h"
+
+/**
+ * quickspi_hid_parse() - HID core parse() callback
+ *
+ * @hid: HID device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error.
+ */
+static int quickspi_hid_parse(struct hid_device *hid)
+{
+ struct quickspi_device *qsdev = hid->driver_data;
+
+ if (qsdev->report_descriptor)
+ return hid_parse_report(hid, qsdev->report_descriptor,
+ le16_to_cpu(qsdev->dev_desc.rep_desc_len));
+
+ dev_err(qsdev->dev, "invalid report descriptor\n");
+ return -EINVAL;
+}
+
+static int quickspi_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quickspi_hid_stop(struct hid_device *hid)
+{
+}
+
+static int quickspi_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quickspi_hid_close(struct hid_device *hid)
+{
+}
+
+static int quickspi_hid_raw_request(struct hid_device *hid,
+ unsigned char reportnum,
+ __u8 *buf, size_t len,
+ unsigned char rtype, int reqtype)
+{
+ struct quickspi_device *qsdev = hid->driver_data;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(qsdev->dev);
+ if (ret)
+ return ret;
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ ret = quickspi_get_report(qsdev, rtype, reportnum, buf);
+ break;
+ case HID_REQ_SET_REPORT:
+ ret = quickspi_set_report(qsdev, rtype, reportnum, buf, len);
+ break;
+ default:
+ dev_err_once(qsdev->dev, "Not supported request type %d\n", reqtype);
+ break;
+ }
+
+ pm_runtime_mark_last_busy(qsdev->dev);
+ pm_runtime_put_autosuspend(qsdev->dev);
+
+ return ret;
+}
+
+static int quickspi_hid_power(struct hid_device *hid, int lvl)
+{
+ return 0;
+}
+
+static struct hid_ll_driver quickspi_hid_ll_driver = {
+ .parse = quickspi_hid_parse,
+ .start = quickspi_hid_start,
+ .stop = quickspi_hid_stop,
+ .open = quickspi_hid_open,
+ .close = quickspi_hid_close,
+ .power = quickspi_hid_power,
+ .raw_request = quickspi_hid_raw_request,
+};
+
+/**
+ * quickspi_hid_probe() - Register HID low level driver
+ *
+ * @qsdev: point to quickspi device
+ *
+ * This function is used to allocate and add HID device.
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quickspi_hid_probe(struct quickspi_device *qsdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &quickspi_hid_ll_driver;
+ hid->bus = BUS_PCI;
+ hid->dev.parent = qsdev->dev;
+ hid->driver_data = qsdev;
+ hid->version = le16_to_cpu(qsdev->dev_desc.version_id);
+ hid->vendor = le16_to_cpu(qsdev->dev_desc.vendor_id);
+ hid->product = le16_to_cpu(qsdev->dev_desc.product_id);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quickspi-hid",
+ hid->vendor, hid->product);
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ qsdev->hid_dev = hid;
+
+ return 0;
+}
+
+/**
+ * quickspi_hid_remove() - Destroy HID device
+ *
+ * @qsdev: point to quickspi device
+ *
+ * Return: 0 on success, non zero on error.
+ */
+void quickspi_hid_remove(struct quickspi_device *qsdev)
+{
+ hid_destroy_device(qsdev->hid_dev);
+}
+
+/**
+ * quickspi_hid_send_report() - Send HID input report data to HID core
+ *
+ * @qsdev: point to quickspi device
+ * @data: point to input report data buffer
+ * @data_len: the length of input report data
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quickspi_hid_send_report(struct quickspi_device *qsdev,
+ void *data, size_t data_len)
+{
+ int ret;
+
+ ret = hid_input_report(qsdev->hid_dev, HID_INPUT_REPORT, data, data_len, 1);
+ if (ret)
+ dev_err(qsdev->dev, "Failed to send HID input report, ret = %d.\n", ret);
+
+ return ret;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h
new file mode 100644
index 000000000000..f640fa876a40
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKSPI_HID_H_
+#define _QUICKSPI_HID_H_
+
+struct quickspi_device;
+
+int quickspi_hid_send_report(struct quickspi_device *qsdev,
+ void *data, size_t data_size);
+int quickspi_hid_probe(struct quickspi_device *qsdev);
+void quickspi_hid_remove(struct quickspi_device *qsdev);
+
+#endif /* _QUICKSPI_HID_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
new file mode 100644
index 000000000000..7373238ceb18
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/hid.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-dma.h"
+
+#include "quickspi-dev.h"
+#include "quickspi-hid.h"
+#include "quickspi-protocol.h"
+
+/* THC uses HW to accelerate HID over SPI protocol, THC_M_PRT_DEV_INT_CAUSE
+ * register is used to store message header and body header, below definition
+ * let driver retrieve needed data filed easier from THC_M_PRT_DEV_INT_CAUSE
+ * register.
+ */
+#define HIDSPI_IN_REP_BDY_HDR_REP_TYPE GENMASK(7, 0)
+
+static int write_cmd_to_txdma(struct quickspi_device *qsdev,
+ int report_type, int report_id,
+ u8 *report_buf, const int report_buf_len)
+{
+ struct output_report *write_buf;
+ int write_buf_len;
+ int ret;
+
+ write_buf = (struct output_report *)qsdev->report_buf;
+
+ write_buf->output_hdr.report_type = report_type;
+ write_buf->output_hdr.content_len = cpu_to_le16(report_buf_len);
+ write_buf->output_hdr.content_id = report_id;
+
+ if (report_buf && report_buf_len > 0)
+ memcpy(write_buf->content, report_buf, report_buf_len);
+
+ write_buf_len = HIDSPI_OUTPUT_REPORT_SIZE(report_buf_len);
+
+ ret = thc_dma_write(qsdev->thc_hw, write_buf, write_buf_len);
+ if (ret)
+ dev_err_once(qsdev->dev, "DMA write failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int quickspi_get_device_descriptor(struct quickspi_device *qsdev)
+{
+ u8 read_buf[HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE];
+ struct output_report output_rep;
+ u32 input_len, read_len = 0;
+ u32 int_cause_val;
+ u8 input_rep_type;
+ int ret;
+
+ output_rep.output_hdr.report_type = DEVICE_DESCRIPTOR;
+ output_rep.output_hdr.content_len = 0;
+ output_rep.output_hdr.content_id = 0;
+
+ qsdev->nondma_int_received = false;
+
+ ret = thc_tic_pio_write(qsdev->thc_hw, qsdev->output_report_addr,
+ HIDSPI_OUTPUT_REPORT_SIZE(0), (u32 *)&output_rep);
+ if (ret) {
+ dev_err_once(qsdev->dev,
+ "Write DEVICE_DESCRIPTOR command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->nondma_int_received_wq,
+ qsdev->nondma_int_received,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->nondma_int_received) {
+ dev_err_once(qsdev->dev, "Wait DEVICE_DESCRIPTOR timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->nondma_int_received = false;
+
+ int_cause_val = thc_int_cause_read(qsdev->thc_hw);
+ input_len = FIELD_GET(HIDSPI_INPUT_HEADER_REPORT_LEN, int_cause_val);
+
+ input_len = input_len * sizeof(u32);
+ if (input_len != HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE) {
+ dev_err_once(qsdev->dev, "Receive wrong DEVICE_DESCRIPTOR length, len = %u\n",
+ input_len);
+ return -EINVAL;
+ }
+
+ ret = thc_tic_pio_read(qsdev->thc_hw, qsdev->input_report_bdy_addr,
+ input_len, &read_len, (u32 *)read_buf);
+ if (ret || read_len != input_len) {
+ dev_err_once(qsdev->dev, "Read DEVICE_DESCRIPTOR failed, ret = %d\n", ret);
+ dev_err_once(qsdev->dev, "DEVICE_DESCRIPTOR expected len = %u, actual read = %u\n",
+ input_len, read_len);
+ return ret;
+ }
+
+ input_rep_type = ((struct input_report_body_header *)read_buf)->input_report_type;
+
+ if (input_rep_type == DEVICE_DESCRIPTOR_RESPONSE) {
+ memcpy(&qsdev->dev_desc,
+ read_buf + HIDSPI_INPUT_BODY_HEADER_SIZE,
+ HIDSPI_DEVICE_DESCRIPTOR_SIZE);
+
+ return 0;
+ }
+
+ dev_err_once(qsdev->dev, "Unexpected intput report type: %d\n", input_rep_type);
+ return -EINVAL;
+}
+
+int quickspi_get_report_descriptor(struct quickspi_device *qsdev)
+{
+ int ret;
+
+ ret = write_cmd_to_txdma(qsdev, REPORT_DESCRIPTOR, 0, NULL, 0);
+ if (ret) {
+ dev_err_once(qsdev->dev,
+ "Write REPORT_DESCRIPTOR command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->report_desc_got_wq,
+ qsdev->report_desc_got,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->report_desc_got) {
+ dev_err_once(qsdev->dev, "Wait Report Descriptor timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->report_desc_got = false;
+
+ return 0;
+}
+
+int quickspi_set_power(struct quickspi_device *qsdev,
+ enum hidspi_power_state power_state)
+{
+ u8 cmd_content = power_state;
+ int ret;
+
+ ret = write_cmd_to_txdma(qsdev, COMMAND_CONTENT,
+ HIDSPI_SET_POWER_CMD_ID,
+ &cmd_content,
+ sizeof(cmd_content));
+ if (ret) {
+ dev_err_once(qsdev->dev, "Write SET_POWER command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void quickspi_handle_input_data(struct quickspi_device *qsdev, u32 buf_len)
+{
+ struct input_report_body_header *body_hdr;
+ struct input_report_body *input_body;
+ u8 *input_report;
+ u32 input_len;
+ int ret = 0;
+
+ input_body = (struct input_report_body *)qsdev->input_buf;
+ body_hdr = &input_body->body_hdr;
+ input_len = le16_to_cpu(body_hdr->content_len);
+
+ if (HIDSPI_INPUT_BODY_SIZE(input_len) > buf_len) {
+ dev_err_once(qsdev->dev, "Wrong input report length: %u",
+ input_len);
+ return;
+ }
+
+ switch (body_hdr->input_report_type) {
+ case REPORT_DESCRIPTOR_RESPONSE:
+ if (input_len != le16_to_cpu(qsdev->dev_desc.rep_desc_len)) {
+ dev_err_once(qsdev->dev, "Unexpected report descriptor length: %u\n",
+ input_len);
+ return;
+ }
+
+ memcpy(qsdev->report_descriptor, input_body->content, input_len);
+
+ qsdev->report_desc_got = true;
+ wake_up_interruptible(&qsdev->report_desc_got_wq);
+
+ break;
+
+ case COMMAND_RESPONSE:
+ if (body_hdr->content_id == HIDSPI_SET_POWER_CMD_ID) {
+ dev_dbg(qsdev->dev, "Receive set power on response\n");
+ } else {
+ dev_err_once(qsdev->dev, "Unknown command response type: %u\n",
+ body_hdr->content_id);
+ }
+
+ break;
+
+ case RESET_RESPONSE:
+ if (qsdev->state == QUICKSPI_RESETING) {
+ qsdev->reset_ack = true;
+ wake_up_interruptible(&qsdev->reset_ack_wq);
+ dev_dbg(qsdev->dev, "Receive HIR reset response\n");
+ } else {
+ dev_info(qsdev->dev, "Receive DIR\n");
+ }
+ break;
+
+ case GET_FEATURE_RESPONSE:
+ case GET_INPUT_REPORT_RESPONSE:
+ qsdev->report_len = sizeof(body_hdr->content_id) + input_len;
+ input_report = input_body->content - sizeof(body_hdr->content_id);
+
+ memcpy(qsdev->report_buf, input_report, qsdev->report_len);
+
+ qsdev->get_report_cmpl = true;
+ wake_up_interruptible(&qsdev->get_report_cmpl_wq);
+
+ break;
+
+ case SET_FEATURE_RESPONSE:
+ case OUTPUT_REPORT_RESPONSE:
+ qsdev->set_report_cmpl = true;
+ wake_up_interruptible(&qsdev->set_report_cmpl_wq);
+
+ break;
+
+ case DATA:
+ if (qsdev->state != QUICKSPI_ENABLED)
+ return;
+
+ if (input_len > le16_to_cpu(qsdev->dev_desc.max_input_len)) {
+ dev_err_once(qsdev->dev, "Unexpected too large input report length: %u\n",
+ input_len);
+ return;
+ }
+
+ input_len = sizeof(body_hdr->content_id) + input_len;
+ input_report = input_body->content - sizeof(body_hdr->content_id);
+
+ ret = quickspi_hid_send_report(qsdev, input_report, input_len);
+ if (ret)
+ dev_err_once(qsdev->dev, "Failed to send HID input report: %d\n", ret);
+
+ break;
+
+ default:
+ dev_err_once(qsdev->dev, "Unsupported input report type: %u\n",
+ body_hdr->input_report_type);
+ break;
+ }
+}
+
+static int acpi_tic_reset(struct quickspi_device *qsdev)
+{
+ acpi_status status = 0;
+ acpi_handle handle;
+
+ if (!qsdev->acpi_dev)
+ return -ENODEV;
+
+ handle = acpi_device_handle(qsdev->acpi_dev);
+ status = acpi_execute_simple_method(handle, "_RST", 0);
+ if (ACPI_FAILURE(status)) {
+ dev_err_once(qsdev->dev,
+ "Failed to reset device through ACPI method, ret = %d\n", status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int reset_tic(struct quickspi_device *qsdev)
+{
+ u32 actual_read_len, read_len = 0;
+ u32 input_report_len, reset_response, int_cause_val;
+ u8 input_rep_type;
+ int ret;
+
+ qsdev->state = QUICKSPI_RESETING;
+
+ qsdev->reset_ack = false;
+
+ /* First interrupt uses level trigger to avoid missing interrupt */
+ thc_int_trigger_type_select(qsdev->thc_hw, false);
+
+ ret = acpi_tic_reset(qsdev);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ ret = wait_event_interruptible_timeout(qsdev->reset_ack_wq,
+ qsdev->reset_ack,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->reset_ack) {
+ dev_err_once(qsdev->dev, "Wait RESET_RESPONSE timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+
+ int_cause_val = thc_int_cause_read(qsdev->thc_hw);
+ input_report_len = FIELD_GET(HIDSPI_INPUT_HEADER_REPORT_LEN, int_cause_val);
+
+ read_len = input_report_len * sizeof(u32);
+ if (read_len != HIDSPI_INPUT_BODY_SIZE(0)) {
+ dev_err_once(qsdev->dev, "Receive wrong RESET_RESPONSE, len = %u\n",
+ read_len);
+ return -EINVAL;
+ }
+
+ /* Switch to edge trigger matching with HIDSPI protocol definition */
+ thc_int_trigger_type_select(qsdev->thc_hw, true);
+
+ ret = thc_tic_pio_read(qsdev->thc_hw, qsdev->input_report_bdy_addr,
+ read_len, &actual_read_len,
+ (u32 *)&reset_response);
+ if (ret || actual_read_len != read_len) {
+ dev_err_once(qsdev->dev, "Read RESET_RESPONSE body failed, ret = %d\n", ret);
+ dev_err_once(qsdev->dev, "RESET_RESPONSE body expected len = %u, actual = %u\n",
+ read_len, actual_read_len);
+ return ret;
+ }
+
+ input_rep_type = FIELD_GET(HIDSPI_IN_REP_BDY_HDR_REP_TYPE, reset_response);
+
+ if (input_rep_type == RESET_RESPONSE) {
+ dev_dbg(qsdev->dev, "RESET_RESPONSE received\n");
+ } else {
+ dev_err_once(qsdev->dev,
+ "Unexpected input report type: %d, expect RESET_RESPONSE\n",
+ input_rep_type);
+ return -EINVAL;
+ }
+
+ qsdev->state = QUICKSPI_RESETED;
+
+ ret = quickspi_get_device_descriptor(qsdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int quickspi_get_report(struct quickspi_device *qsdev,
+ u8 report_type, unsigned int report_id, void *buf)
+{
+ int rep_type;
+ int ret;
+
+ if (report_type == HID_INPUT_REPORT) {
+ rep_type = GET_INPUT_REPORT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = GET_FEATURE;
+ } else {
+ dev_err_once(qsdev->dev, "Unsupported report type for GET REPORT: %d\n",
+ report_type);
+ return -EINVAL;
+ }
+
+ ret = write_cmd_to_txdma(qsdev, rep_type, report_id, NULL, 0);
+ if (ret) {
+ dev_err_once(qsdev->dev, "Write GET_REPORT command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->get_report_cmpl_wq,
+ qsdev->get_report_cmpl,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->get_report_cmpl) {
+ dev_err_once(qsdev->dev, "Wait Get Report Response timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->get_report_cmpl = false;
+
+ memcpy(buf, qsdev->report_buf, qsdev->report_len);
+
+ return qsdev->report_len;
+}
+
+int quickspi_set_report(struct quickspi_device *qsdev,
+ u8 report_type, unsigned int report_id,
+ void *buf, u32 buf_len)
+{
+ int rep_type;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT) {
+ rep_type = OUTPUT_REPORT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = SET_FEATURE;
+ } else {
+ dev_err_once(qsdev->dev, "Unsupported report type for SET REPORT: %d\n",
+ report_type);
+ return -EINVAL;
+ }
+
+ ret = write_cmd_to_txdma(qsdev, rep_type, report_id, buf + 1, buf_len - 1);
+ if (ret) {
+ dev_err_once(qsdev->dev, "Write SET_REPORT command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->set_report_cmpl_wq,
+ qsdev->set_report_cmpl,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->set_report_cmpl) {
+ dev_err_once(qsdev->dev, "Wait Set Report Response timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->set_report_cmpl = false;
+
+ return buf_len;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h
new file mode 100644
index 000000000000..775e29c1ed13
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKSPI_PROTOCOL_H_
+#define _QUICKSPI_PROTOCOL_H_
+
+#include <linux/hid-over-spi.h>
+
+#define QUICKSPI_ACK_WAIT_TIMEOUT 5
+
+struct quickspi_device;
+
+void quickspi_handle_input_data(struct quickspi_device *qsdev, u32 buf_len);
+int quickspi_get_report(struct quickspi_device *qsdev, u8 report_type,
+ unsigned int report_id, void *buf);
+int quickspi_set_report(struct quickspi_device *qsdev, u8 report_type,
+ unsigned int report_id, void *buf, u32 buf_len);
+int quickspi_get_report_descriptor(struct quickspi_device *qsdev);
+
+int quickspi_set_power(struct quickspi_device *qsdev,
+ enum hidspi_power_state power_state);
+
+int reset_tic(struct quickspi_device *qsdev);
+
+#endif /* _QUICKSPI_PROTOCOL_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
new file mode 100644
index 000000000000..4fc78b5a04b5
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -0,0 +1,1578 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-hw.h"
+
+static int thc_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct thc_device *thc_ctx = context;
+ void __iomem *base = thc_ctx->mmio_addr;
+
+ *val = ioread32(base + reg);
+ return 0;
+}
+
+static int thc_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct thc_device *thc_ctx = context;
+ void __iomem *base = thc_ctx->mmio_addr;
+
+ iowrite32(val, base + reg);
+ return 0;
+}
+
+static const struct regmap_range thc_rw_ranges[] = {
+ regmap_reg_range(0x10, 0x14),
+ regmap_reg_range(0x1000, 0x1320),
+};
+
+static const struct regmap_access_table thc_rw_table = {
+ .yes_ranges = thc_rw_ranges,
+ .n_yes_ranges = ARRAY_SIZE(thc_rw_ranges),
+};
+
+static const struct regmap_config thc_regmap_cfg = {
+ .name = "thc_regmap_common",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1320,
+ .reg_read = thc_regmap_read,
+ .reg_write = thc_regmap_write,
+ .cache_type = REGCACHE_NONE,
+ .fast_io = true,
+ .rd_table = &thc_rw_table,
+ .wr_table = &thc_rw_table,
+ .volatile_table = &thc_rw_table,
+};
+
+/**
+ * thc_clear_state - Clear THC hardware state
+ *
+ * @dev: The pointer of THC device structure
+ */
+static void thc_clear_state(const struct thc_device *dev)
+{
+ u32 val;
+
+ /* Clear interrupt cause register */
+ val = THC_M_PRT_ERR_CAUSE_INVLD_DEV_ENTRY |
+ THC_M_PRT_ERR_CAUSE_FRAME_BABBLE_ERR |
+ THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR |
+ THC_M_PRT_ERR_CAUSE_PRD_ENTRY_ERR;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, val, val);
+
+ /* Clear interrupt error state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS,
+ THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS,
+ THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS);
+
+ val = THC_M_PRT_INT_EN_TXN_ERR_INT_EN |
+ THC_M_PRT_INT_EN_FATAL_ERR_INT_EN |
+ THC_M_PRT_INT_EN_BUF_OVRRUN_ERR_INT_EN;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_EN_OFFSET, val, val);
+
+ val = THC_M_PRT_SW_SEQ_STS_THC_SS_ERR |
+ THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, val, val);
+
+ /* Clear RxDMA state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_EOF, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_EOF, 0);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_2_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS);
+
+ /* Clear TxDMA state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL);
+
+ val = THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ERROR_STS |
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_IOC_STS |
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, val, val);
+
+ /* Reset all DMAs count */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DB_CNT_1_OFFSET,
+ THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT_RST,
+ THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DEVINT_CNT_OFFSET,
+ THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT_RST,
+ THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+
+ /* Reset THC hardware sequence state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRAME_DROP_CNT_1_OFFSET,
+ THC_M_PRT_FRAME_DROP_CNT_1_RFDC,
+ THC_M_PRT_FRAME_DROP_CNT_1_RFDC);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRAME_DROP_CNT_2_OFFSET,
+ THC_M_PRT_FRAME_DROP_CNT_2_RFDC,
+ THC_M_PRT_FRAME_DROP_CNT_2_RFDC);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRM_CNT_1_OFFSET,
+ THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT_RST,
+ THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRM_CNT_2_OFFSET,
+ THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT_RST,
+ THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_RXDMA_PKT_CNT_1_OFFSET,
+ THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT_RST,
+ THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_RXDMA_PKT_CNT_2_OFFSET,
+ THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT_RST,
+ THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SWINT_CNT_1_OFFSET,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SWINT_CNT_1_OFFSET,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TX_FRM_CNT_OFFSET,
+ THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT_RST,
+ THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TXDMA_PKT_CNT_OFFSET,
+ THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT_RST,
+ THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_UFRM_CNT_1_OFFSET,
+ THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT_RST,
+ THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_UFRM_CNT_2_OFFSET,
+ THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT_RST,
+ THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_PRD_EMPTY_CNT_1_OFFSET,
+ THC_M_PRT_PRD_EMPTY_CNT_1_RPTEC,
+ THC_M_PRT_PRD_EMPTY_CNT_1_RPTEC);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_PRD_EMPTY_CNT_2_OFFSET,
+ THC_M_PRT_PRD_EMPTY_CNT_2_RPTEC,
+ THC_M_PRT_PRD_EMPTY_CNT_2_RPTEC);
+}
+
+/**
+ * thc_dev_init - Allocate and initialize the THC device structure
+ *
+ * @device: The pointer of device structure
+ * @mem_addr: The pointer of MMIO memory address
+ *
+ * Return: The thc_device pointer on success, NULL on failed.
+ */
+struct thc_device *thc_dev_init(struct device *device, void __iomem *mem_addr)
+{
+ struct thc_device *thc_dev;
+ int ret;
+
+ thc_dev = devm_kzalloc(device, sizeof(*thc_dev), GFP_KERNEL);
+ if (!thc_dev)
+ return ERR_PTR(-ENOMEM);
+
+ thc_dev->dev = device;
+ thc_dev->mmio_addr = mem_addr;
+ thc_dev->thc_regmap = devm_regmap_init(device, NULL, thc_dev, &thc_regmap_cfg);
+ if (IS_ERR(thc_dev->thc_regmap)) {
+ ret = PTR_ERR(thc_dev->thc_regmap);
+ dev_err_once(device, "Failed to init thc_regmap: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ thc_clear_state(thc_dev);
+
+ mutex_init(&thc_dev->thc_bus_lock);
+ init_waitqueue_head(&thc_dev->write_complete_wait);
+ init_waitqueue_head(&thc_dev->swdma_complete_wait);
+
+ thc_dev->dma_ctx = thc_dma_init(thc_dev);
+ if (!thc_dev->dma_ctx) {
+ dev_err_once(device, "DMA context init failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return thc_dev;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dev_init, "INTEL_THC");
+
+static int prepare_pio(const struct thc_device *dev, const u8 pio_op,
+ const u32 address, const u32 size)
+{
+ u32 sts, ctrl, addr, mask;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &sts);
+
+ /* Check if THC previous PIO still in progress */
+ if (sts & THC_M_PRT_SW_SEQ_STS_THC_SS_CIP) {
+ dev_err_once(dev->dev, "THC PIO is still busy!\n");
+ return -EBUSY;
+ }
+
+ /* Clear error bit and complete bit in state register */
+ sts |= THC_M_PRT_SW_SEQ_STS_THC_SS_ERR |
+ THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, sts);
+
+ /* Set PIO data size, opcode and interrupt capability */
+ ctrl = FIELD_PREP(THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC, size) |
+ FIELD_PREP(THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CMD, pio_op);
+ if (dev->pio_int_supported)
+ ctrl |= THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE;
+
+ mask = THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC |
+ THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CMD |
+ THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_CNTRL_OFFSET, mask, ctrl);
+
+ /* Set PIO target address */
+ addr = FIELD_PREP(THC_M_PRT_SW_SEQ_DATA0_ADDR_THC_SW_SEQ_DATA0_ADDR, address);
+ mask = THC_M_PRT_SW_SEQ_DATA0_ADDR_THC_SW_SEQ_DATA0_ADDR;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, mask, addr);
+ return 0;
+}
+
+static void pio_start(const struct thc_device *dev,
+ u32 size_in_bytes, const u32 *buffer)
+{
+ if (size_in_bytes && buffer)
+ regmap_bulk_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET,
+ buffer, size_in_bytes / sizeof(u32));
+
+ /* Enable Start bit */
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_CNTRL_OFFSET,
+ THC_M_PRT_SW_SEQ_CNTRL_TSSGO,
+ THC_M_PRT_SW_SEQ_CNTRL_TSSGO);
+}
+
+static int pio_complete(const struct thc_device *dev,
+ u32 *buffer, u32 *size)
+{
+ u32 sts, ctrl;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &sts);
+ if (sts & THC_M_PRT_SW_SEQ_STS_THC_SS_ERR) {
+ dev_err_once(dev->dev, "PIO operation error\n");
+ return -EBUSY;
+ }
+
+ if (buffer && size) {
+ regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_CNTRL_OFFSET, &ctrl);
+ *size = FIELD_GET(THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC, ctrl);
+
+ regmap_bulk_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET,
+ buffer, *size / sizeof(u32));
+ }
+
+ sts |= THC_M_PRT_SW_SEQ_STS_THC_SS_ERR | THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, sts);
+ return 0;
+}
+
+static int pio_wait(const struct thc_device *dev)
+{
+ u32 sts = 0;
+ int ret;
+
+ ret = regmap_read_poll_timeout(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, sts,
+ !(sts & THC_M_PRT_SW_SEQ_STS_THC_SS_CIP ||
+ !(sts & THC_M_PRT_SW_SEQ_STS_TSSDONE)),
+ THC_REGMAP_POLLING_INTERVAL_US, THC_PIO_DONE_TIMEOUT_US);
+ if (ret)
+ dev_err_once(dev->dev, "Timeout while polling PIO operation done\n");
+
+ return ret;
+}
+
+/**
+ * thc_tic_pio_read - Read data from touch device by PIO
+ *
+ * @dev: The pointer of THC private device context
+ * @address: Slave address for the PIO operation
+ * @size: Expected read data size
+ * @actual_size: The pointer of the actual data size read from touch device
+ * @buffer: The pointer of data buffer to store the data read from touch device
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_tic_pio_read(struct thc_device *dev, const u32 address,
+ const u32 size, u32 *actual_size, u32 *buffer)
+{
+ u8 opcode;
+ int ret;
+
+ if (size <= 0 || !actual_size || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %u, actual_size %p, buffer %p\n",
+ size, actual_size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ opcode = (dev->port_type == THC_PORT_TYPE_SPI) ?
+ THC_PIO_OP_SPI_TIC_READ : THC_PIO_OP_I2C_TIC_READ;
+
+ ret = prepare_pio(dev, opcode, address, size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, 0, NULL);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, buffer, actual_size);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_tic_pio_read, "INTEL_THC");
+
+/**
+ * thc_tic_pio_write - Write data to touch device by PIO
+ *
+ * @dev: The pointer of THC private device context
+ * @address: Slave address for the PIO operation
+ * @size: PIO write data size
+ * @buffer: The pointer of the write data buffer
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_tic_pio_write(struct thc_device *dev, const u32 address,
+ const u32 size, const u32 *buffer)
+{
+ u8 opcode;
+ int ret;
+
+ if (size <= 0 || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %u, buffer %p\n",
+ size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ opcode = (dev->port_type == THC_PORT_TYPE_SPI) ?
+ THC_PIO_OP_SPI_TIC_WRITE : THC_PIO_OP_I2C_TIC_WRITE;
+
+ ret = prepare_pio(dev, opcode, address, size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, size, buffer);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, NULL, NULL);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_tic_pio_write, "INTEL_THC");
+
+/**
+ * thc_tic_pio_write_and_read - Write data followed by read data by PIO
+ *
+ * @dev: The pointer of THC private device context
+ * @address: Slave address for the PIO operation
+ * @write_size: PIO write data size
+ * @write_buffer: The pointer of the write data buffer
+ * @read_size: Expected PIO read data size
+ * @actual_size: The pointer of the actual read data size
+ * @read_buffer: The pointer of PIO read data buffer
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_tic_pio_write_and_read(struct thc_device *dev, const u32 address,
+ const u32 write_size, const u32 *write_buffer,
+ const u32 read_size, u32 *actual_size, u32 *read_buffer)
+{
+ u32 i2c_ctrl, mask;
+ int ret;
+
+ if (dev->port_type == THC_PORT_TYPE_SPI) {
+ dev_err(dev->dev, "SPI port type doesn't support pio write and read!");
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ /* Config i2c PIO write and read sequence */
+ i2c_ctrl = FIELD_PREP(THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_PIO_I2C_WBC, write_size);
+ mask = THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_PIO_I2C_WBC;
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_OFFSET,
+ mask, i2c_ctrl);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_OFFSET,
+ THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_I2C_RW_PIO_EN,
+ THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_I2C_RW_PIO_EN);
+
+ ret = prepare_pio(dev, THC_PIO_OP_I2C_TIC_WRITE_AND_READ, address, read_size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, write_size, write_buffer);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, read_buffer, actual_size);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_tic_pio_write_and_read, "INTEL_THC");
+
+/**
+ * thc_interrupt_config - Configure THC interrupts
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_interrupt_config(struct thc_device *dev)
+{
+ u32 mbits, mask, r_dma_ctrl_1;
+
+ /* Clear Error reporting interrupt status bits */
+ mbits = THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS |
+ THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_INT_STATUS_OFFSET,
+ mbits, mbits);
+
+ /* Enable Error Reporting Interrupts */
+ mbits = THC_M_PRT_INT_EN_TXN_ERR_INT_EN |
+ THC_M_PRT_INT_EN_FATAL_ERR_INT_EN |
+ THC_M_PRT_INT_EN_BUF_OVRRUN_ERR_INT_EN;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_INT_EN_OFFSET,
+ mbits, mbits);
+
+ /* Clear PIO Interrupt status bits */
+ mbits = THC_M_PRT_SW_SEQ_STS_THC_SS_ERR |
+ THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_STS_OFFSET,
+ mbits, mbits);
+
+ /* Read Interrupts */
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ &r_dma_ctrl_1);
+ /* Disable RxDMA1 */
+ r_dma_ctrl_1 &= ~THC_M_PRT_READ_DMA_CNTRL_IE_EOF;
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ r_dma_ctrl_1);
+
+ /* Ack EOF Interrupt RxDMA1 */
+ mbits = THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS;
+ /* Ack NonDMA Interrupt */
+ mbits |= THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ mbits, mbits);
+
+ /* Ack EOF Interrupt RxDMA2 */
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_2_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS);
+
+ /* Write Interrupts */
+ /* Disable TxDMA */
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL,
+ 0);
+
+ /* Clear TxDMA interrupt status bits */
+ mbits = THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ERROR_STS;
+ mbits |= THC_M_PRT_WRITE_INT_STS_THC_WRDMA_IOC_STS;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_WRITE_INT_STS_OFFSET,
+ mbits, mbits);
+
+ /* Enable Non-DMA device inband interrupt */
+ r_dma_ctrl_1 |= THC_M_PRT_READ_DMA_CNTRL_IE_NDDI;
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ r_dma_ctrl_1);
+
+ if (dev->port_type == THC_PORT_TYPE_SPI) {
+ /* Edge triggered interrupt */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TSEQ_CNTRL_1_OFFSET,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN);
+ } else {
+ /* Level triggered interrupt */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TSEQ_CNTRL_1_OFFSET,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN, 0);
+
+ mbits = THC_M_PRT_INT_EN_THC_I2C_IC_MST_ON_HOLD_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_SCL_STUCK_AT_LOW_DET_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_TX_ABRT_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_TX_OVER_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_RX_FULL_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_RX_OVER_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_RX_UNDER_INT_EN;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_EN_OFFSET,
+ mbits, mbits);
+ }
+
+ thc_set_pio_interrupt_support(dev, false);
+
+ /* HIDSPI specific settings */
+ if (dev->port_type == THC_PORT_TYPE_SPI) {
+ mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_OFFSET,
+ THC_BIT_OFFSET_INTERRUPT_TYPE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_LEN,
+ THC_BIT_LENGTH_INTERRUPT_TYPE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_EOF_OFFSET,
+ THC_BIT_OFFSET_LAST_FRAGMENT_FLAG) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL,
+ THC_BITMASK_INVALID_TYPE_DATA);
+ mask = THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_OFFSET |
+ THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_LEN |
+ THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_EOF_OFFSET |
+ THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DEVINT_CFG_1_OFFSET,
+ mask, mbits);
+
+ mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_OFFSET,
+ THC_BIT_OFFSET_MICROFRAME_SIZE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_LEN,
+ THC_BIT_LENGTH_MICROFRAME_SIZE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_UNIT,
+ THC_UNIT_MICROFRAME_SIZE) |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_IGNORE |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_VAL;
+ mask = THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_OFFSET |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_LEN |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_UNIT |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_IGNORE |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_VAL;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DEVINT_CFG_2_OFFSET,
+ mask, mbits);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_config, "INTEL_THC");
+
+/**
+ * thc_int_trigger_type_select - Select THC interrupt trigger type
+ *
+ * @dev: the pointer of THC private device context
+ * @edge_trigger: determine the interrupt is edge triggered or level triggered
+ */
+void thc_int_trigger_type_select(struct thc_device *dev, bool edge_trigger)
+{
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TSEQ_CNTRL_1_OFFSET,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN,
+ edge_trigger ? THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN : 0);
+}
+EXPORT_SYMBOL_NS_GPL(thc_int_trigger_type_select, "INTEL_THC");
+
+/**
+ * thc_interrupt_enable - Enable or disable THC interrupt
+ *
+ * @dev: the pointer of THC private device context
+ * @int_enable: the flag to control THC interrupt enable or disable
+ */
+void thc_interrupt_enable(struct thc_device *dev, bool int_enable)
+{
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_EN_OFFSET,
+ THC_M_PRT_INT_EN_GBL_INT_EN,
+ int_enable ? THC_M_PRT_INT_EN_GBL_INT_EN : 0);
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_enable, "INTEL_THC");
+
+/**
+ * thc_interrupt_quiesce - Quiesce or unquiesce external touch device interrupt
+ *
+ * @dev: the pointer of THC private device context
+ * @int_quiesce: the flag to determine quiesce or unquiesce device interrupt
+ *
+ * Return: 0 on success, other error codes on failed
+ */
+int thc_interrupt_quiesce(const struct thc_device *dev, bool int_quiesce)
+{
+ u32 ctrl;
+ int ret;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, &ctrl);
+ if (!(ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN) && !int_quiesce) {
+ dev_warn(dev->dev, "THC interrupt already unquiesce\n");
+ return 0;
+ }
+
+ if ((ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN) && int_quiesce) {
+ dev_warn(dev->dev, "THC interrupt already quiesce\n");
+ return 0;
+ }
+
+ /* Quiesce device interrupt - Set quiesce bit and waiting for THC HW to ACK */
+ if (int_quiesce)
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET,
+ THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN,
+ THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN);
+
+ ret = regmap_read_poll_timeout(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, ctrl,
+ ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS,
+ THC_REGMAP_POLLING_INTERVAL_US, THC_QUIESCE_EN_TIMEOUT_US);
+ if (ret) {
+ dev_err_once(dev->dev,
+ "Timeout while waiting THC idle, target quiesce state = %s\n",
+ int_quiesce ? "true" : "false");
+ return ret;
+ }
+
+ /* Unquiesce device interrupt - Clear the quiesce bit */
+ if (!int_quiesce)
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET,
+ THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_quiesce, "INTEL_THC");
+
+/**
+ * thc_set_pio_interrupt_support - Determine PIO interrupt is supported or not
+ *
+ * @dev: The pointer of THC private device context
+ * @supported: The flag to determine enabling PIO interrupt or not
+ */
+void thc_set_pio_interrupt_support(struct thc_device *dev, bool supported)
+{
+ dev->pio_int_supported = supported;
+}
+EXPORT_SYMBOL_NS_GPL(thc_set_pio_interrupt_support, "INTEL_THC");
+
+/**
+ * thc_ltr_config - Configure THC Latency Tolerance Reporting(LTR) settings
+ *
+ * @dev: The pointer of THC private device context
+ * @active_ltr_us: active LTR value, unit is us
+ * @lp_ltr_us: low power LTR value, unit is us
+ */
+void thc_ltr_config(struct thc_device *dev, u32 active_ltr_us, u32 lp_ltr_us)
+{
+ u32 active_ltr_scale, lp_ltr_scale, ltr_ctrl, ltr_mask, orig, tmp;
+
+ if (active_ltr_us >= THC_LTR_MIN_VAL_SCALE_3 &&
+ active_ltr_us < THC_LTR_MAX_VAL_SCALE_3) {
+ active_ltr_scale = THC_LTR_SCALE_3;
+ active_ltr_us = active_ltr_us >> 5;
+ } else if (active_ltr_us >= THC_LTR_MIN_VAL_SCALE_4 &&
+ active_ltr_us < THC_LTR_MAX_VAL_SCALE_4) {
+ active_ltr_scale = THC_LTR_SCALE_4;
+ active_ltr_us = active_ltr_us >> 10;
+ } else if (active_ltr_us >= THC_LTR_MIN_VAL_SCALE_5 &&
+ active_ltr_us < THC_LTR_MAX_VAL_SCALE_5) {
+ active_ltr_scale = THC_LTR_SCALE_5;
+ active_ltr_us = active_ltr_us >> 15;
+ } else {
+ active_ltr_scale = THC_LTR_SCALE_2;
+ }
+
+ if (lp_ltr_us >= THC_LTR_MIN_VAL_SCALE_3 &&
+ lp_ltr_us < THC_LTR_MAX_VAL_SCALE_3) {
+ lp_ltr_scale = THC_LTR_SCALE_3;
+ lp_ltr_us = lp_ltr_us >> 5;
+ } else if (lp_ltr_us >= THC_LTR_MIN_VAL_SCALE_4 &&
+ lp_ltr_us < THC_LTR_MAX_VAL_SCALE_4) {
+ lp_ltr_scale = THC_LTR_SCALE_4;
+ lp_ltr_us = lp_ltr_us >> 10;
+ } else if (lp_ltr_us >= THC_LTR_MIN_VAL_SCALE_5 &&
+ lp_ltr_us < THC_LTR_MAX_VAL_SCALE_5) {
+ lp_ltr_scale = THC_LTR_SCALE_5;
+ lp_ltr_us = lp_ltr_us >> 15;
+ } else {
+ lp_ltr_scale = THC_LTR_SCALE_2;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, &orig);
+ ltr_ctrl = FIELD_PREP(THC_M_CMN_LTR_CTRL_ACT_LTR_VAL, active_ltr_us) |
+ FIELD_PREP(THC_M_CMN_LTR_CTRL_ACT_LTR_SCALE, active_ltr_scale) |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN |
+ FIELD_PREP(THC_M_CMN_LTR_CTRL_LP_LTR_VAL, lp_ltr_us) |
+ FIELD_PREP(THC_M_CMN_LTR_CTRL_LP_LTR_SCALE, lp_ltr_scale) |
+ THC_M_CMN_LTR_CTRL_LP_LTR_REQ;
+
+ ltr_mask = THC_M_CMN_LTR_CTRL_ACT_LTR_VAL |
+ THC_M_CMN_LTR_CTRL_ACT_LTR_SCALE |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN |
+ THC_M_CMN_LTR_CTRL_LP_LTR_VAL |
+ THC_M_CMN_LTR_CTRL_LP_LTR_SCALE |
+ THC_M_CMN_LTR_CTRL_LP_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN;
+
+ tmp = orig & ~ltr_mask;
+ tmp |= ltr_ctrl & ltr_mask;
+
+ regmap_write(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, tmp);
+}
+EXPORT_SYMBOL_NS_GPL(thc_ltr_config, "INTEL_THC");
+
+/**
+ * thc_change_ltr_mode - Change THC LTR mode
+ *
+ * @dev: The pointer of THC private device context
+ * @ltr_mode: LTR mode(active or low power)
+ */
+void thc_change_ltr_mode(struct thc_device *dev, u32 ltr_mode)
+{
+ if (ltr_mode == THC_LTR_MODE_ACTIVE) {
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN,
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN);
+ return;
+ }
+
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN,
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN);
+}
+EXPORT_SYMBOL_NS_GPL(thc_change_ltr_mode, "INTEL_THC");
+
+/**
+ * thc_ltr_unconfig - Unconfigure THC Latency Tolerance Reporting(LTR) settings
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_ltr_unconfig(struct thc_device *dev)
+{
+ u32 ltr_ctrl, bits_clear;
+
+ regmap_read(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, &ltr_ctrl);
+ bits_clear = THC_M_CMN_LTR_CTRL_LP_LTR_EN |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN |
+ THC_M_CMN_LTR_CTRL_LP_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ;
+
+ ltr_ctrl &= ~bits_clear;
+
+ regmap_write(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, ltr_ctrl);
+}
+EXPORT_SYMBOL_NS_GPL(thc_ltr_unconfig, "INTEL_THC");
+
+/**
+ * thc_int_cause_read - Read interrupt cause register value
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: The interrupt cause register value
+ */
+u32 thc_int_cause_read(struct thc_device *dev)
+{
+ u32 int_cause;
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_DEV_INT_CAUSE_REG_VAL_OFFSET, &int_cause);
+
+ return int_cause;
+}
+EXPORT_SYMBOL_NS_GPL(thc_int_cause_read, "INTEL_THC");
+
+static void thc_print_txn_error_cause(const struct thc_device *dev)
+{
+ bool known_error = false;
+ u32 cause = 0;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, &cause);
+
+ if (cause & THC_M_PRT_ERR_CAUSE_PRD_ENTRY_ERR) {
+ dev_err(dev->dev, "TXN Error: Invalid PRD Entry\n");
+ known_error = true;
+ }
+ if (cause & THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR) {
+ dev_err(dev->dev, "TXN Error: THC Buffer Overrun\n");
+ known_error = true;
+ }
+ if (cause & THC_M_PRT_ERR_CAUSE_FRAME_BABBLE_ERR) {
+ dev_err(dev->dev, "TXN Error: Frame Babble\n");
+ known_error = true;
+ }
+ if (cause & THC_M_PRT_ERR_CAUSE_INVLD_DEV_ENTRY) {
+ dev_err(dev->dev, "TXN Error: Invalid Device Register Setting\n");
+ known_error = true;
+ }
+
+ /* Clear interrupt status bits */
+ regmap_write(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, cause);
+
+ if (!known_error)
+ dev_err(dev->dev, "TXN Error does not match any known value: 0x%X\n",
+ cause);
+}
+
+/**
+ * thc_interrupt_handler - Handle THC interrupts
+ *
+ * THC interrupts include several types: external touch device (TIC) non-DMA
+ * interrupts, PIO completion interrupts, DMA interrtups, I2C subIP raw
+ * interrupts and error interrupts.
+ *
+ * This is a help function for interrupt processing, it detects interrupt
+ * type, clear the interrupt status bit and return the interrupt type to caller
+ * for future processing.
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: The combined flag for interrupt type
+ */
+int thc_interrupt_handler(struct thc_device *dev)
+{
+ u32 read_sts_1, read_sts_2, read_sts_sw, write_sts;
+ u32 int_sts, err_cause, seq_cntrl, seq_sts;
+ int interrupt_type = 0;
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_1_OFFSET, &read_sts_1);
+
+ if (read_sts_1 & THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS) {
+ dev_dbg(dev->dev, "THC non-DMA device interrupt\n");
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ NONDMA_INT_STS_BIT);
+
+ interrupt_type |= BIT(THC_NONDMA_INT);
+
+ return interrupt_type;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET, &int_sts);
+
+ if (int_sts & THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS) {
+ dev_err(dev->dev, "THC transaction error, int_sts: 0x%08X\n", int_sts);
+ thc_print_txn_error_cause(dev);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ TXN_ERR_INT_STS_BIT);
+
+ interrupt_type |= BIT(THC_TXN_ERR_INT);
+
+ return interrupt_type;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, &err_cause);
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_2_OFFSET, &read_sts_2);
+
+ if (err_cause & THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR ||
+ read_sts_1 & THC_M_PRT_READ_DMA_INT_STS_STALL_STS ||
+ read_sts_2 & THC_M_PRT_READ_DMA_INT_STS_STALL_STS) {
+ dev_err(dev->dev, "Buffer overrun or RxDMA engine stalled!\n");
+ thc_print_txn_error_cause(dev);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_2_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_STALL_STS);
+ regmap_write(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_STALL_STS);
+ regmap_write(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET,
+ THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR);
+
+ interrupt_type |= BIT(THC_TXN_ERR_INT);
+
+ return interrupt_type;
+ }
+
+ if (int_sts & THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS) {
+ dev_err_once(dev->dev, "THC FATAL error, int_sts: 0x%08X\n", int_sts);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ TXN_FATAL_INT_STS_BIT);
+
+ interrupt_type |= BIT(THC_FATAL_ERR_INT);
+
+ return interrupt_type;
+ }
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_CNTRL_OFFSET, &seq_cntrl);
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_STS_OFFSET, &seq_sts);
+
+ if (seq_cntrl & THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE &&
+ seq_sts & THC_M_PRT_SW_SEQ_STS_TSSDONE) {
+ dev_dbg(dev->dev, "THC_SS_CD_IE and TSSDONE are set\n");
+ interrupt_type |= BIT(THC_PIO_DONE_INT);
+ }
+
+ if (read_sts_1 & THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS) {
+ dev_dbg(dev->dev, "Got RxDMA1 Read Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_1_OFFSET, read_sts_1);
+
+ interrupt_type |= BIT(THC_RXDMA1_INT);
+ }
+
+ if (read_sts_2 & THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS) {
+ dev_dbg(dev->dev, "Got RxDMA2 Read Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_2_OFFSET, read_sts_2);
+
+ interrupt_type |= BIT(THC_RXDMA2_INT);
+ }
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET, &read_sts_sw);
+
+ if (read_sts_sw & THC_M_PRT_READ_DMA_INT_STS_DMACPL_STS) {
+ dev_dbg(dev->dev, "Got SwDMA Read Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET, read_sts_sw);
+
+ dev->swdma_done = true;
+ wake_up_interruptible(&dev->swdma_complete_wait);
+
+ interrupt_type |= BIT(THC_SWDMA_INT);
+ }
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_WRITE_INT_STS_OFFSET, &write_sts);
+
+ if (write_sts & THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS) {
+ dev_dbg(dev->dev, "Got TxDMA Write complete Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_WRITE_INT_STS_OFFSET, write_sts);
+
+ dev->write_done = true;
+ wake_up_interruptible(&dev->write_complete_wait);
+
+ interrupt_type |= BIT(THC_TXDMA_INT);
+ }
+
+ if (int_sts & THC_M_PRT_INT_STATUS_DEV_RAW_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_DEV_RAW_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_UNDER_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_UNDER_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_OVER_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_OVER_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_FULL_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_FULL_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_OVER_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_OVER_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_EMPTY_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_EMPTY_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_ABRT_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_ABRT_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_ACTIVITY_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_ACTIVITY_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_SCL_STUCK_AT_LOW_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_SCL_STUCK_AT_LOW_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_STOP_DET_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_STOP_DET_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_START_DET_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_START_DET_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_MST_ON_HOLD_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_MST_ON_HOLD_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+
+ if (!interrupt_type)
+ interrupt_type |= BIT(THC_UNKNOWN_INT);
+
+ return interrupt_type;
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_handler, "INTEL_THC");
+
+/**
+ * thc_port_select - Set THC port type
+ *
+ * @dev: The pointer of THC private device context
+ * @port_type: THC port type to use for current device
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_port_select(struct thc_device *dev, enum thc_port_type port_type)
+{
+ u32 ctrl, mask;
+
+ if (port_type == THC_PORT_TYPE_SPI) {
+ dev_dbg(dev->dev, "Set THC port type to SPI\n");
+ dev->port_type = THC_PORT_TYPE_SPI;
+
+ /* Enable delay of CS assertion and set to default value */
+ ctrl = THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_EN |
+ FIELD_PREP(THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_VAL,
+ THC_CSA_CK_DELAY_VAL_DEFAULT);
+ mask = THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_EN |
+ THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_VAL;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SPI_DUTYC_CFG_OFFSET,
+ mask, ctrl);
+ } else if (port_type == THC_PORT_TYPE_I2C) {
+ dev_dbg(dev->dev, "Set THC port type to I2C\n");
+ dev->port_type = THC_PORT_TYPE_I2C;
+
+ /* Set THC transition arbitration policy to frame boundary for I2C */
+ ctrl = FIELD_PREP(THC_M_PRT_CONTROL_THC_ARB_POLICY,
+ THC_ARB_POLICY_FRAME_BOUNDARY);
+ mask = THC_M_PRT_CONTROL_THC_ARB_POLICY;
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, mask, ctrl);
+ } else {
+ dev_err(dev->dev, "unsupported THC port type: %d\n", port_type);
+ return -EINVAL;
+ }
+
+ ctrl = FIELD_PREP(THC_M_PRT_CONTROL_PORT_TYPE, port_type);
+ mask = THC_M_PRT_CONTROL_PORT_TYPE;
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, mask, ctrl);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_port_select, "INTEL_THC");
+
+#define THC_SPI_FREQUENCY_7M 7812500
+#define THC_SPI_FREQUENCY_15M 15625000
+#define THC_SPI_FREQUENCY_17M 17857100
+#define THC_SPI_FREQUENCY_20M 20833000
+#define THC_SPI_FREQUENCY_25M 25000000
+#define THC_SPI_FREQUENCY_31M 31250000
+#define THC_SPI_FREQUENCY_41M 41666700
+
+#define THC_SPI_LOW_FREQUENCY THC_SPI_FREQUENCY_17M
+
+static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
+{
+ int frequency[] = {
+ THC_SPI_FREQUENCY_7M,
+ THC_SPI_FREQUENCY_15M,
+ THC_SPI_FREQUENCY_17M,
+ THC_SPI_FREQUENCY_20M,
+ THC_SPI_FREQUENCY_25M,
+ THC_SPI_FREQUENCY_31M,
+ THC_SPI_FREQUENCY_41M,
+ };
+ u8 frequency_div[] = {
+ THC_SPI_FRQ_DIV_2,
+ THC_SPI_FRQ_DIV_1,
+ THC_SPI_FRQ_DIV_7,
+ THC_SPI_FRQ_DIV_6,
+ THC_SPI_FRQ_DIV_5,
+ THC_SPI_FRQ_DIV_4,
+ THC_SPI_FRQ_DIV_3,
+ };
+ int size = ARRAY_SIZE(frequency);
+ u32 closest_freq;
+ u8 freq_div;
+ int i;
+
+ for (i = size - 1; i >= 0; i--)
+ if ((int)spi_freq_val - frequency[i] >= 0)
+ break;
+
+ if (i < 0) {
+ dev_err_once(dev->dev, "Not supported SPI frequency %d\n", spi_freq_val);
+ return THC_SPI_FRQ_RESERVED;
+ }
+
+ closest_freq = frequency[i];
+ freq_div = frequency_div[i];
+
+ dev_dbg(dev->dev,
+ "Setting SPI frequency: spi_freq_val = %u, Closest freq = %u\n",
+ spi_freq_val, closest_freq);
+
+ return freq_div;
+}
+
+/**
+ * thc_spi_read_config - Configure SPI bus read attributes
+ *
+ * @dev: The pointer of THC private device context
+ * @spi_freq_val: SPI read frequecy value
+ * @io_mode: SPI read IO mode
+ * @opcode: Read opcode
+ * @spi_rd_mps: SPI read max packet size
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_spi_read_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_rd_mps)
+{
+ bool is_low_freq = false;
+ u32 cfg, mask;
+ u8 freq_div;
+
+ freq_div = thc_get_spi_freq_div_val(dev, spi_freq_val);
+ if (freq_div == THC_SPI_FRQ_RESERVED)
+ return -EINVAL;
+
+ if (spi_freq_val < THC_SPI_LOW_FREQUENCY)
+ is_low_freq = true;
+
+ cfg = FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TCRF, freq_div) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TRMODE, io_mode) |
+ (is_low_freq ? THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN : 0) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_RD_MPS, spi_rd_mps);
+ mask = THC_M_PRT_SPI_CFG_SPI_TCRF |
+ THC_M_PRT_SPI_CFG_SPI_TRMODE |
+ THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN |
+ THC_M_PRT_SPI_CFG_SPI_RD_MPS;
+
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SPI_CFG_OFFSET, mask, cfg);
+
+ if (io_mode == THC_QUAD_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO, opcode);
+ else if (io_mode == THC_DUAL_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO, opcode);
+ else
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_SIO, opcode);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET, opcode);
+ regmap_write(dev->thc_regmap, THC_M_PRT_SPI_DMARD_OPCODE_OFFSET, opcode);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_spi_read_config, "INTEL_THC");
+
+/**
+ * thc_spi_write_config - Configure SPI bus write attributes
+ *
+ * @dev: The pointer of THC private device context
+ * @spi_freq_val: SPI write frequecy value
+ * @io_mode: SPI write IO mode
+ * @opcode: Write opcode
+ * @spi_wr_mps: SPI write max packet size
+ * @perf_limit: Performance limitation in unit of 10us
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_spi_write_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_wr_mps,
+ u32 perf_limit)
+{
+ bool is_low_freq = false;
+ u32 cfg, mask;
+ u8 freq_div;
+
+ freq_div = thc_get_spi_freq_div_val(dev, spi_freq_val);
+ if (freq_div == THC_SPI_FRQ_RESERVED)
+ return -EINVAL;
+
+ if (spi_freq_val < THC_SPI_LOW_FREQUENCY)
+ is_low_freq = true;
+
+ cfg = FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TCWF, freq_div) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TWMODE, io_mode) |
+ (is_low_freq ? THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN : 0) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_WR_MPS, spi_wr_mps);
+ mask = THC_M_PRT_SPI_CFG_SPI_TCWF |
+ THC_M_PRT_SPI_CFG_SPI_TWMODE |
+ THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN |
+ THC_M_PRT_SPI_CFG_SPI_WR_MPS;
+
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SPI_CFG_OFFSET, mask, cfg);
+
+ if (io_mode == THC_QUAD_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO, opcode);
+ else if (io_mode == THC_DUAL_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO, opcode);
+ else
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_SIO, opcode);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SPI_WR_OPCODE_OFFSET, opcode);
+
+ dev->perf_limit = perf_limit;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_spi_write_config, "INTEL_THC");
+
+/**
+ * thc_spi_input_output_address_config - Configure SPI input and output addresses
+ *
+ * @dev: the pointer of THC private device context
+ * @input_hdr_addr: input report header address
+ * @input_bdy_addr: input report body address
+ * @output_addr: output report address
+ */
+void thc_spi_input_output_address_config(struct thc_device *dev, u32 input_hdr_addr,
+ u32 input_bdy_addr, u32 output_addr)
+{
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_DEV_INT_CAUSE_ADDR_OFFSET, input_hdr_addr);
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_RD_BULK_ADDR_1_OFFSET, input_bdy_addr);
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_RD_BULK_ADDR_2_OFFSET, input_bdy_addr);
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_WR_BULK_ADDR_OFFSET, output_addr);
+}
+EXPORT_SYMBOL_NS_GPL(thc_spi_input_output_address_config, "INTEL_THC");
+
+static int thc_i2c_subip_pio_read(struct thc_device *dev, const u32 address,
+ u32 *size, u32 *buffer)
+{
+ int ret;
+
+ if (!size || *size == 0 || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %p, buffer %p\n",
+ size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ ret = prepare_pio(dev, THC_PIO_OP_I2C_SUBSYSTEM_READ, address, *size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, 0, NULL);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, buffer, size);
+ if (ret < 0)
+ goto end;
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+
+ if (ret)
+ dev_err_once(dev->dev, "Read THC I2C SubIP register failed %d, offset %u\n",
+ ret, address);
+
+ return ret;
+}
+
+static int thc_i2c_subip_pio_write(struct thc_device *dev, const u32 address,
+ const u32 size, const u32 *buffer)
+{
+ int ret;
+
+ if (size == 0 || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %u, buffer %p\n",
+ size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ ret = prepare_pio(dev, THC_PIO_OP_I2C_SUBSYSTEM_WRITE, address, size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, size, buffer);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, NULL, NULL);
+ if (ret < 0)
+ goto end;
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+
+ if (ret)
+ dev_err_once(dev->dev, "Write THC I2C SubIP register failed %d, offset %u\n",
+ ret, address);
+
+ return ret;
+}
+
+#define I2C_SUBIP_CON_DEFAULT 0x663
+#define I2C_SUBIP_INT_MASK_DEFAULT 0x7FFF
+#define I2C_SUBIP_RX_TL_DEFAULT 62
+#define I2C_SUBIP_TX_TL_DEFAULT 0
+#define I2C_SUBIP_DMA_TDLR_DEFAULT 7
+#define I2C_SUBIP_DMA_RDLR_DEFAULT 7
+
+static int thc_i2c_subip_set_speed(struct thc_device *dev, const u32 speed,
+ const u32 hcnt, const u32 lcnt)
+{
+ u32 hcnt_offset, lcnt_offset;
+ u32 val;
+ int ret;
+
+ switch (speed) {
+ case THC_I2C_STANDARD:
+ hcnt_offset = THC_I2C_IC_SS_SCL_HCNT_OFFSET;
+ lcnt_offset = THC_I2C_IC_SS_SCL_LCNT_OFFSET;
+ break;
+
+ case THC_I2C_FAST_AND_PLUS:
+ hcnt_offset = THC_I2C_IC_FS_SCL_HCNT_OFFSET;
+ lcnt_offset = THC_I2C_IC_FS_SCL_LCNT_OFFSET;
+ break;
+
+ case THC_I2C_HIGH_SPEED:
+ hcnt_offset = THC_I2C_IC_HS_SCL_HCNT_OFFSET;
+ lcnt_offset = THC_I2C_IC_HS_SCL_LCNT_OFFSET;
+ break;
+
+ default:
+ dev_err_once(dev->dev, "Unsupported i2c speed %d\n", speed);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = thc_i2c_subip_pio_write(dev, hcnt_offset, sizeof(u32), &hcnt);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_pio_write(dev, lcnt_offset, sizeof(u32), &lcnt);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_CON_DEFAULT & ~THC_I2C_IC_CON_SPEED;
+ val |= FIELD_PREP(THC_I2C_IC_CON_SPEED, speed);
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_CON_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static u32 i2c_subip_regs[] = {
+ THC_I2C_IC_CON_OFFSET,
+ THC_I2C_IC_TAR_OFFSET,
+ THC_I2C_IC_INTR_MASK_OFFSET,
+ THC_I2C_IC_RX_TL_OFFSET,
+ THC_I2C_IC_TX_TL_OFFSET,
+ THC_I2C_IC_DMA_CR_OFFSET,
+ THC_I2C_IC_DMA_TDLR_OFFSET,
+ THC_I2C_IC_DMA_RDLR_OFFSET,
+ THC_I2C_IC_SS_SCL_HCNT_OFFSET,
+ THC_I2C_IC_SS_SCL_LCNT_OFFSET,
+ THC_I2C_IC_FS_SCL_HCNT_OFFSET,
+ THC_I2C_IC_FS_SCL_LCNT_OFFSET,
+ THC_I2C_IC_HS_SCL_HCNT_OFFSET,
+ THC_I2C_IC_HS_SCL_LCNT_OFFSET,
+ THC_I2C_IC_ENABLE_OFFSET,
+};
+
+/**
+ * thc_i2c_subip_init - Initialize and configure THC I2C subsystem
+ *
+ * @dev: The pointer of THC private device context
+ * @target_address: Slave address of touch device (TIC)
+ * @speed: I2C bus frequency speed mode
+ * @hcnt: I2C clock SCL high count
+ * @lcnt: I2C clock SCL low count
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_i2c_subip_init(struct thc_device *dev, const u32 target_address,
+ const u32 speed, const u32 hcnt, const u32 lcnt)
+{
+ u32 read_size = sizeof(u32);
+ u32 val;
+ int ret;
+
+ ret = thc_i2c_subip_pio_read(dev, THC_I2C_IC_ENABLE_OFFSET, &read_size, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= ~THC_I2C_IC_ENABLE_ENABLE;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_ENABLE_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_pio_read(dev, THC_I2C_IC_TAR_OFFSET, &read_size, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= ~THC_I2C_IC_TAR_IC_TAR;
+ val |= FIELD_PREP(THC_I2C_IC_TAR_IC_TAR, target_address);
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_TAR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_set_speed(dev, speed, hcnt, lcnt);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_INT_MASK_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_INTR_MASK_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_RX_TL_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_RX_TL_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_TX_TL_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_TX_TL_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = THC_I2C_IC_DMA_CR_RDMAE | THC_I2C_IC_DMA_CR_TDMAE;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_DMA_CR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_DMA_TDLR_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_DMA_TDLR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_DMA_RDLR_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_DMA_RDLR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_pio_read(dev, THC_I2C_IC_ENABLE_OFFSET, &read_size, &val);
+ if (ret < 0)
+ return ret;
+
+ val |= THC_I2C_IC_ENABLE_ENABLE;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_ENABLE_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ dev->i2c_subip_regs = devm_kzalloc(dev->dev, sizeof(i2c_subip_regs), GFP_KERNEL);
+ if (!dev->i2c_subip_regs)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_init, "INTEL_THC");
+
+/**
+ * thc_i2c_subip_regs_save - Save THC I2C sub-subsystem register values to THC device context
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_i2c_subip_regs_save(struct thc_device *dev)
+{
+ int ret;
+ u32 read_size = sizeof(u32);
+
+ for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
+ ret = thc_i2c_subip_pio_read(dev, i2c_subip_regs[i],
+ &read_size, (u32 *)&dev->i2c_subip_regs + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_regs_save, "INTEL_THC");
+
+/**
+ * thc_i2c_subip_regs_restore - Restore THC I2C subsystem registers from THC device context
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_i2c_subip_regs_restore(struct thc_device *dev)
+{
+ int ret;
+ u32 write_size = sizeof(u32);
+
+ for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
+ ret = thc_i2c_subip_pio_write(dev, i2c_subip_regs[i],
+ write_size, (u32 *)&dev->i2c_subip_regs + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_regs_restore, "INTEL_THC");
+
+MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
+MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) Intel THC Hardware Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h
new file mode 100644
index 000000000000..0517fee2c668
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _INTEL_THC_DEV_H_
+#define _INTEL_THC_DEV_H_
+
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#include "intel-thc-dma.h"
+
+#define THC_REGMAP_COMMON_OFFSET 0x10
+#define THC_REGMAP_MMIO_OFFSET 0x1000
+
+/*
+ * THC Port type
+ * @THC_PORT_TYPE_SPI: This port is used for HIDSPI
+ * @THC_PORT_TYPE_I2C: This port is used for HIDI2C
+ */
+enum thc_port_type {
+ THC_PORT_TYPE_SPI = 0,
+ THC_PORT_TYPE_I2C = 1,
+};
+
+/**
+ * THC interrupt flag
+ * @THC_NONDMA_INT: THC non-DMA interrupt
+ * @THC_RXDMA1_INT: THC RxDMA1 interrupt
+ * @THC_RXDMA2_INT: THC RxDMA2 interrupt
+ * @THC_SWDMA_INT: THC SWDMA interrupt
+ * @THC_TXDMA_INT: THC TXDMA interrupt
+ * @THC_PIO_DONE_INT: THC PIO complete interrupt
+ * @THC_I2CSUBIP_INT: THC I2C subsystem interrupt
+ * @THC_TXN_ERR_INT: THC transfer error interrupt
+ * @THC_FATAL_ERR_INT: THC fatal error interrupt
+ */
+enum thc_int_type {
+ THC_NONDMA_INT = 0,
+ THC_RXDMA1_INT = 1,
+ THC_RXDMA2_INT = 2,
+ THC_SWDMA_INT = 3,
+ THC_TXDMA_INT = 4,
+ THC_PIO_DONE_INT = 5,
+ THC_I2CSUBIP_INT = 6,
+ THC_TXN_ERR_INT = 7,
+ THC_FATAL_ERR_INT = 8,
+ THC_UNKNOWN_INT
+};
+
+/**
+ * struct thc_device - THC private device struct
+ * @thc_regmap: MMIO regmap structure for accessing THC registers
+ * @mmio_addr: MMIO registers address
+ * @thc_bus_lock: mutex locker for THC config
+ * @port_type: port type of THC port instance
+ * @pio_int_supported: PIO interrupt supported flag
+ * @dma_ctx: DMA specific data
+ * @write_complete_wait: signal event for DMA write complete
+ * @swdma_complete_wait: signal event for SWDMA sequence complete
+ * @write_done: bool value that indicates if DMA write is done
+ * @swdma_done: bool value that indicates if SWDMA swquence is done
+ * @perf_limit: the delay between read operation and write operation
+ * @i2c_subip_regs: the copy of THC I2C sub-system registers for resuming restore
+ */
+struct thc_device {
+ struct device *dev;
+ struct regmap *thc_regmap;
+ void __iomem *mmio_addr;
+ struct mutex thc_bus_lock;
+ enum thc_port_type port_type;
+ bool pio_int_supported;
+
+ struct thc_dma_context *dma_ctx;
+
+ wait_queue_head_t write_complete_wait;
+ wait_queue_head_t swdma_complete_wait;
+ bool write_done;
+ bool swdma_done;
+
+ u32 perf_limit;
+
+ u32 *i2c_subip_regs;
+};
+
+struct thc_device *thc_dev_init(struct device *device, void __iomem *mem_addr);
+int thc_tic_pio_read(struct thc_device *dev, const u32 address,
+ const u32 size, u32 *actual_size, u32 *buffer);
+int thc_tic_pio_write(struct thc_device *dev, const u32 address,
+ const u32 size, const u32 *buffer);
+int thc_tic_pio_write_and_read(struct thc_device *dev, const u32 address,
+ const u32 write_size, const u32 *write_buffer,
+ const u32 read_size, u32 *actual_size, u32 *read_buffer);
+void thc_interrupt_config(struct thc_device *dev);
+void thc_int_trigger_type_select(struct thc_device *dev, bool edge_trigger);
+void thc_interrupt_enable(struct thc_device *dev, bool int_enable);
+void thc_set_pio_interrupt_support(struct thc_device *dev, bool supported);
+int thc_interrupt_quiesce(const struct thc_device *dev, bool int_quiesce);
+void thc_ltr_config(struct thc_device *dev, u32 active_ltr_us, u32 lp_ltr_us);
+void thc_change_ltr_mode(struct thc_device *dev, u32 ltr_mode);
+void thc_ltr_unconfig(struct thc_device *dev);
+u32 thc_int_cause_read(struct thc_device *dev);
+int thc_interrupt_handler(struct thc_device *dev);
+int thc_port_select(struct thc_device *dev, enum thc_port_type port_type);
+int thc_spi_read_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_rd_mps);
+int thc_spi_write_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_wr_mps, u32 perf_limit);
+void thc_spi_input_output_address_config(struct thc_device *dev, u32 input_hdr_addr,
+ u32 input_bdy_addr, u32 output_addr);
+int thc_i2c_subip_init(struct thc_device *dev, const u32 target_address,
+ const u32 speed, const u32 hcnt, const u32 lcnt);
+int thc_i2c_subip_regs_save(struct thc_device *dev);
+int thc_i2c_subip_regs_restore(struct thc_device *dev);
+
+#endif /* _INTEL_THC_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
new file mode 100644
index 000000000000..eb23bea77686
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
@@ -0,0 +1,969 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/overflow.h>
+#include <linux/regmap.h>
+#include <linux/scatterlist.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-dma.h"
+#include "intel-thc-hw.h"
+
+static void dma_set_prd_base_addr(struct thc_device *dev, u64 physical_addr,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 addr_high, addr_low;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ addr_high = upper_32_bits(physical_addr);
+ addr_low = lower_32_bits(physical_addr);
+
+ regmap_write(dev->thc_regmap, dma_config->prd_base_addr_high, addr_high);
+ regmap_write(dev->thc_regmap, dma_config->prd_base_addr_low, addr_low);
+}
+
+static void dma_set_start_bit(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, mask, mbits, data, offset;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ switch (dma_config->dma_channel) {
+ case THC_RXDMA1:
+ case THC_RXDMA2:
+ if (dma_config->dma_channel == THC_RXDMA2) {
+ mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL,
+ THC_BITMASK_INTERRUPT_TYPE_DATA);
+ mask = THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_DEVINT_CFG_1_OFFSET, mask, mbits);
+ }
+
+ mbits = THC_M_PRT_READ_DMA_CNTRL_IE_EOF |
+ THC_M_PRT_READ_DMA_CNTRL_SOO |
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL |
+ THC_M_PRT_READ_DMA_CNTRL_IE_ERROR |
+ THC_M_PRT_READ_DMA_CNTRL_START;
+
+ mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
+ mask |= THC_M_PRT_READ_DMA_CNTRL_INT_SW_DMA_EN;
+ ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
+ offset = dma_config->dma_channel == THC_RXDMA1 ?
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET : THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
+ regmap_write_bits(dev->thc_regmap, offset, mask, ctrl);
+ break;
+
+ case THC_SWDMA:
+ mbits = THC_M_PRT_READ_DMA_CNTRL_IE_DMACPL |
+ THC_M_PRT_READ_DMA_CNTRL_IE_IOC |
+ THC_M_PRT_READ_DMA_CNTRL_SOO |
+ THC_M_PRT_READ_DMA_CNTRL_START;
+
+ mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
+ ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
+ mask, ctrl);
+ break;
+
+ case THC_TXDMA:
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET,
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS,
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS);
+
+ /* Select interrupt or polling method upon Write completion */
+ if (dev->dma_ctx->use_write_interrupts)
+ data = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL;
+ else
+ data = 0;
+
+ data |= THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
+ mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL |
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ mask, data);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void dma_set_prd_control(struct thc_device *dev, u8 entry_count, u8 cb_depth,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, mask;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ if (dma_config->dma_channel == THC_TXDMA) {
+ mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
+ ctrl = FIELD_PREP(THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC, entry_count);
+ } else {
+ mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
+ ctrl = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PTEC, entry_count) |
+ FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PCD, cb_depth);
+ }
+
+ regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, ctrl);
+}
+
+static void dma_clear_prd_control(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 mask;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ if (dma_config->dma_channel == THC_TXDMA)
+ mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
+ else
+ mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
+
+ regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, 0);
+}
+
+static u8 dma_get_read_pointer(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, read_pointer;
+
+ regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
+ read_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCRP, ctrl);
+
+ dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCRP 0x%x\n",
+ ctrl, dma_config->dma_cntrl, read_pointer);
+
+ return read_pointer;
+}
+
+static u8 dma_get_write_pointer(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, write_pointer;
+
+ regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
+ write_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCWP, ctrl);
+
+ dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCWP 0x%x\n",
+ ctrl, dma_config->dma_cntrl, write_pointer);
+
+ return write_pointer;
+}
+
+static void dma_set_write_pointer(struct thc_device *dev, u8 value,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, mask;
+
+ mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP;
+ ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, value);
+ regmap_write_bits(dev->thc_regmap, dma_config->dma_cntrl, mask, ctrl);
+}
+
+static size_t dma_get_max_packet_size(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ return dma_config->max_packet_size;
+}
+
+static void dma_set_max_packet_size(struct thc_device *dev, size_t size,
+ struct thc_dma_configuration *dma_config)
+{
+ if (size) {
+ dma_config->max_packet_size = ALIGN(size, SZ_4K);
+ dma_config->is_enabled = true;
+ }
+}
+
+static void thc_copy_one_sgl_to_prd(struct thc_device *dev,
+ struct thc_dma_configuration *config,
+ unsigned int ind)
+{
+ struct thc_prd_table *prd_tbl;
+ struct scatterlist *sg;
+ int j;
+
+ prd_tbl = &config->prd_tbls[ind];
+
+ for_each_sg(config->sgls[ind], sg, config->sgls_nent[ind], j) {
+ prd_tbl->entries[j].dest_addr =
+ sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
+ prd_tbl->entries[j].len = sg_dma_len(sg);
+ prd_tbl->entries[j].hw_status = 0;
+ prd_tbl->entries[j].end_of_prd = 0;
+ }
+
+ /* Set the end_of_prd flag in the last filled entry */
+ if (j > 0)
+ prd_tbl->entries[j - 1].end_of_prd = 1;
+}
+
+static void thc_copy_sgls_to_prd(struct thc_device *dev,
+ struct thc_dma_configuration *config)
+{
+ unsigned int i;
+
+ memset(config->prd_tbls, 0, array_size(PRD_TABLE_SIZE, config->prd_tbl_num));
+
+ for (i = 0; i < config->prd_tbl_num; i++)
+ thc_copy_one_sgl_to_prd(dev, config, i);
+}
+
+static int setup_dma_buffers(struct thc_device *dev,
+ struct thc_dma_configuration *config,
+ enum dma_data_direction dir)
+{
+ size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
+ unsigned int i, nent = PRD_ENTRIES_NUM;
+ dma_addr_t dma_handle;
+ void *cpu_addr;
+ size_t buf_sz;
+ int count;
+
+ if (!config->is_enabled)
+ return 0;
+
+ memset(config->sgls, 0, sizeof(config->sgls));
+ memset(config->sgls_nent, 0, sizeof(config->sgls_nent));
+
+ cpu_addr = dma_alloc_coherent(dev->dev, prd_tbls_size,
+ &dma_handle, GFP_KERNEL);
+ if (!cpu_addr)
+ return -ENOMEM;
+
+ config->prd_tbls = cpu_addr;
+ config->prd_tbls_dma_handle = dma_handle;
+
+ buf_sz = dma_get_max_packet_size(dev, config);
+
+ /* Allocate and map the scatter-gather lists, one for each PRD table */
+ for (i = 0; i < config->prd_tbl_num; i++) {
+ config->sgls[i] = sgl_alloc(buf_sz, GFP_KERNEL, &nent);
+ if (!config->sgls[i] || nent > PRD_ENTRIES_NUM) {
+ dev_err_once(dev->dev, "sgl_alloc (%uth) failed, nent %u\n",
+ i, nent);
+ return -ENOMEM;
+ }
+ count = dma_map_sg(dev->dev, config->sgls[i], nent, dir);
+
+ config->sgls_nent[i] = count;
+ }
+
+ thc_copy_sgls_to_prd(dev, config);
+
+ return 0;
+}
+
+static void thc_reset_dma_settings(struct thc_device *dev)
+{
+ /* Stop all DMA channels and reset DMA read pointers */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START, 0);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+}
+
+static void release_dma_buffers(struct thc_device *dev,
+ struct thc_dma_configuration *config)
+{
+ size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
+ unsigned int i;
+
+ if (!config->is_enabled)
+ return;
+
+ for (i = 0; i < config->prd_tbl_num; i++) {
+ if (!config->sgls[i] | !config->sgls_nent[i])
+ continue;
+
+ dma_unmap_sg(dev->dev, config->sgls[i],
+ config->sgls_nent[i],
+ config->dir);
+
+ sgl_free(config->sgls[i]);
+ config->sgls[i] = NULL;
+ }
+
+ memset(config->prd_tbls, 0, prd_tbls_size);
+
+ if (config->prd_tbls) {
+ dma_free_coherent(dev->dev, prd_tbls_size, config->prd_tbls,
+ config->prd_tbls_dma_handle);
+ config->prd_tbls = NULL;
+ config->prd_tbls_dma_handle = 0;
+ }
+}
+
+struct thc_dma_context *thc_dma_init(struct thc_device *dev)
+{
+ struct thc_dma_context *dma_ctx;
+
+ dma_ctx = devm_kzalloc(dev->dev, sizeof(*dma_ctx), GFP_KERNEL);
+ if (!dma_ctx)
+ return NULL;
+
+ dev->dma_ctx = dma_ctx;
+
+ dma_ctx->dma_config[THC_RXDMA1].dma_channel = THC_RXDMA1;
+ dma_ctx->dma_config[THC_RXDMA2].dma_channel = THC_RXDMA2;
+ dma_ctx->dma_config[THC_TXDMA].dma_channel = THC_TXDMA;
+ dma_ctx->dma_config[THC_SWDMA].dma_channel = THC_SWDMA;
+
+ dma_ctx->dma_config[THC_RXDMA1].dir = DMA_FROM_DEVICE;
+ dma_ctx->dma_config[THC_RXDMA2].dir = DMA_FROM_DEVICE;
+ dma_ctx->dma_config[THC_TXDMA].dir = DMA_TO_DEVICE;
+ dma_ctx->dma_config[THC_SWDMA].dir = DMA_FROM_DEVICE;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_tbl_num = PRD_TABLES_NUM;
+ dma_ctx->dma_config[THC_RXDMA2].prd_tbl_num = PRD_TABLES_NUM;
+ dma_ctx->dma_config[THC_TXDMA].prd_tbl_num = 1;
+ dma_ctx->dma_config[THC_SWDMA].prd_tbl_num = 1;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].prd_base_addr_high = THC_M_PRT_WPRD_BA_HI_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_SW_OFFSET;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].prd_base_addr_low = THC_M_PRT_WPRD_BA_LOW_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_SW_OFFSET;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_cntrl = THC_M_PRT_RPRD_CNTRL_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].prd_cntrl = THC_M_PRT_RPRD_CNTRL_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].prd_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].prd_cntrl = THC_M_PRT_RPRD_CNTRL_SW_OFFSET;
+
+ dma_ctx->dma_config[THC_RXDMA1].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].dma_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET;
+
+ /* Enable write DMA completion interrupt by default */
+ dma_ctx->use_write_interrupts = 1;
+
+ return dma_ctx;
+}
+
+/**
+ * thc_dma_set_max_packet_sizes - Set max packet sizes for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ * @mps_read1: RxDMA1 max packet size
+ * @mps_read2: RxDMA2 max packet size
+ * @mps_write: TxDMA max packet size
+ * @mps_swdma: Software DMA max packet size
+ *
+ * If mps is not 0, it means the corresponding DMA channel is used, then set
+ * the flag to turn on this channel.
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_set_max_packet_sizes(struct thc_device *dev, size_t mps_read1,
+ size_t mps_read2, size_t mps_write,
+ size_t mps_swdma)
+{
+ if (!dev->dma_ctx) {
+ dev_err_once(dev->dev,
+ "Cannot set max packet sizes because DMA context is NULL!\n");
+ return -EINVAL;
+ }
+
+ dma_set_max_packet_size(dev, mps_read1, &dev->dma_ctx->dma_config[THC_RXDMA1]);
+ dma_set_max_packet_size(dev, mps_read2, &dev->dma_ctx->dma_config[THC_RXDMA2]);
+ dma_set_max_packet_size(dev, mps_write, &dev->dma_ctx->dma_config[THC_TXDMA]);
+ dma_set_max_packet_size(dev, mps_swdma, &dev->dma_ctx->dma_config[THC_SWDMA]);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_set_max_packet_sizes, "INTEL_THC");
+
+/**
+ * thc_dma_allocate - Allocate DMA buffers for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_allocate(struct thc_device *dev)
+{
+ int ret, chan;
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
+ ret = setup_dma_buffers(dev, &dev->dma_ctx->dma_config[chan],
+ dev->dma_ctx->dma_config[chan].dir);
+ if (ret < 0) {
+ dev_err_once(dev->dev, "DMA setup failed for DMA channel %d\n", chan);
+ goto release_bufs;
+ }
+ }
+
+ return 0;
+
+release_bufs:
+ while (chan--)
+ release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_allocate, "INTEL_THC");
+
+/**
+ * thc_dma_release - Release DMA buffers for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_dma_release(struct thc_device *dev)
+{
+ int chan;
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++)
+ release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_release, "INTEL_THC");
+
+static int calc_prd_entries_num(struct thc_prd_table *prd_tbl,
+ size_t mes_len, u8 *nent)
+{
+ *nent = DIV_ROUND_UP(mes_len, THC_MIN_BYTES_PER_SG_LIST_ENTRY);
+ if (*nent > PRD_ENTRIES_NUM)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static size_t calc_message_len(struct thc_prd_table *prd_tbl, u8 *nent)
+{
+ size_t mes_len = 0;
+ unsigned int j;
+
+ for (j = 0; j < PRD_ENTRIES_NUM; j++) {
+ mes_len += prd_tbl->entries[j].len;
+ if (prd_tbl->entries[j].end_of_prd)
+ break;
+ }
+
+ *nent = j + 1;
+
+ return mes_len;
+}
+
+/**
+ * thc_dma_configure - Configure DMA settings for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_configure(struct thc_device *dev)
+{
+ struct thc_dma_context *dma_ctx = dev->dma_ctx;
+ int chan;
+
+ thc_reset_dma_settings(dev);
+
+ if (!dma_ctx) {
+ dev_err_once(dev->dev, "Cannot do DMA configure because DMA context is NULL\n");
+ return -EINVAL;
+ }
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
+ dma_set_prd_base_addr(dev,
+ dma_ctx->dma_config[chan].prd_tbls_dma_handle,
+ &dma_ctx->dma_config[chan]);
+
+ dma_set_prd_control(dev, PRD_ENTRIES_NUM - 1,
+ dma_ctx->dma_config[chan].prd_tbl_num - 1,
+ &dma_ctx->dma_config[chan]);
+ }
+
+ /* Start read2 DMA engine */
+ dma_set_start_bit(dev, &dma_ctx->dma_config[THC_RXDMA2]);
+
+ dev_dbg(dev->dev, "DMA configured successfully!\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_configure, "INTEL_THC");
+
+/**
+ * thc_dma_unconfigure - Unconfigure DMA settings for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_dma_unconfigure(struct thc_device *dev)
+{
+ int chan;
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
+ dma_set_prd_base_addr(dev, 0, &dev->dma_ctx->dma_config[chan]);
+ dma_clear_prd_control(dev, &dev->dma_ctx->dma_config[chan]);
+ }
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_unconfigure, "INTEL_THC");
+
+static int thc_wait_for_dma_pause(struct thc_device *dev, enum thc_dma_channel channel)
+{
+ u32 ctrl_reg, sts_reg, sts;
+ int ret;
+
+ ctrl_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_CNTRL_1_OFFSET :
+ ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_CNTRL_2_OFFSET :
+ THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET);
+
+ regmap_write_bits(dev->thc_regmap, ctrl_reg, THC_M_PRT_READ_DMA_CNTRL_START, 0);
+
+ sts_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_INT_STS_1_OFFSET :
+ ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_INT_STS_2_OFFSET :
+ THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET);
+
+ ret = regmap_read_poll_timeout(dev->thc_regmap, sts_reg, sts,
+ !(sts & THC_M_PRT_READ_DMA_INT_STS_ACTIVE),
+ THC_DEFAULT_RXDMA_POLLING_US_INTERVAL,
+ THC_DEFAULT_RXDMA_POLLING_US_TIMEOUT);
+
+ if (ret) {
+ dev_err_once(dev->dev,
+ "Timeout while waiting for DMA %d stop\n", channel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int read_dma_buffer(struct thc_device *dev,
+ struct thc_dma_configuration *read_config,
+ u8 prd_table_index, void *read_buff)
+{
+ struct thc_prd_table *prd_tbl;
+ struct scatterlist *sg;
+ size_t mes_len, ret;
+ u8 nent;
+
+ if (prd_table_index >= read_config->prd_tbl_num) {
+ dev_err_once(dev->dev, "PRD table index %d too big\n", prd_table_index);
+ return -EINVAL;
+ }
+
+ prd_tbl = &read_config->prd_tbls[prd_table_index];
+ mes_len = calc_message_len(prd_tbl, &nent);
+ if (mes_len > read_config->max_packet_size) {
+ dev_err(dev->dev,
+ "Message length %zu is bigger than buffer length %lu\n",
+ mes_len, read_config->max_packet_size);
+ return -EMSGSIZE;
+ }
+
+ sg = read_config->sgls[prd_table_index];
+ ret = sg_copy_to_buffer(sg, nent, read_buff, mes_len);
+ if (ret != mes_len) {
+ dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
+ ret, mes_len);
+ return -EIO;
+ }
+
+ return mes_len;
+}
+
+static void update_write_pointer(struct thc_device *dev,
+ struct thc_dma_configuration *read_config)
+{
+ u8 write_ptr = dma_get_write_pointer(dev, read_config);
+
+ if (write_ptr + 1 == THC_WRAPAROUND_VALUE_ODD)
+ dma_set_write_pointer(dev, THC_POINTER_WRAPAROUND, read_config);
+ else if (write_ptr + 1 == THC_WRAPAROUND_VALUE_EVEN)
+ dma_set_write_pointer(dev, 0, read_config);
+ else
+ dma_set_write_pointer(dev, write_ptr + 1, read_config);
+}
+
+static int is_dma_buf_empty(struct thc_device *dev,
+ struct thc_dma_configuration *read_config,
+ u8 *read_ptr, u8 *write_ptr)
+{
+ *read_ptr = dma_get_read_pointer(dev, read_config);
+ *write_ptr = dma_get_write_pointer(dev, read_config);
+
+ if ((*read_ptr & THC_POINTER_MASK) == (*write_ptr & THC_POINTER_MASK))
+ if (*read_ptr != *write_ptr)
+ return true;
+
+ return false;
+}
+
+static int thc_dma_read(struct thc_device *dev,
+ struct thc_dma_configuration *read_config,
+ void *read_buff, size_t *read_len, int *read_finished)
+{
+ u8 read_ptr, write_ptr, prd_table_index;
+ int status;
+
+ if (!is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr)) {
+ prd_table_index = write_ptr & THC_POINTER_MASK;
+
+ status = read_dma_buffer(dev, read_config, prd_table_index, read_buff);
+ if (status <= 0) {
+ dev_err_once(dev->dev, "read DMA buffer failed %d\n", status);
+ return -EIO;
+ }
+
+ *read_len = status;
+
+ /* Clear the relevant PRD table */
+ thc_copy_one_sgl_to_prd(dev, read_config, prd_table_index);
+
+ /* Increment the write pointer to let the HW know we have processed this PRD */
+ update_write_pointer(dev, read_config);
+ }
+
+ /*
+ * This function only reads one frame from PRD table for each call, so we need to
+ * check if all DMAed data is read out and return the flag to the caller. Caller
+ * should repeatedly call thc_dma_read() until all DMAed data is handled.
+ */
+ if (read_finished)
+ *read_finished = is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr) ? 1 : 0;
+
+ return 0;
+}
+
+/**
+ * thc_rxdma_read - Read data from RXDMA buffer
+ *
+ * @dev: The pointer of THC private device context
+ * @dma_channel: The RXDMA engine of read data source
+ * @read_buff: The pointer of the read data buffer
+ * @read_len: The pointer of the read data length
+ * @read_finished: The pointer of the flag indicating if all pending data has been read out
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_rxdma_read(struct thc_device *dev, enum thc_dma_channel dma_channel,
+ void *read_buff, size_t *read_len, int *read_finished)
+{
+ struct thc_dma_configuration *dma_config;
+ int ret;
+
+ dma_config = &dev->dma_ctx->dma_config[dma_channel];
+
+ if (!dma_config->is_enabled) {
+ dev_err_once(dev->dev, "The DMA channel %d is not enabled", dma_channel);
+ return -EINVAL;
+ }
+
+ if (!read_buff || !read_len) {
+ dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
+ read_buff, read_len);
+ return -EINVAL;
+ }
+
+ if (dma_channel >= THC_TXDMA) {
+ dev_err(dev->dev, "Unsupported DMA channel for RxDMA read, %d\n", dma_channel);
+ return -EINVAL;
+ }
+
+ ret = thc_dma_read(dev, dma_config, read_buff, read_len, read_finished);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_rxdma_read, "INTEL_THC");
+
+static int thc_swdma_read_start(struct thc_device *dev, void *write_buff,
+ size_t write_len, u32 *prd_tbl_len)
+{
+ u32 mask, val, data0 = 0, data1 = 0;
+ int ret;
+
+ ret = thc_interrupt_quiesce(dev, true);
+ if (ret)
+ return ret;
+
+ if (thc_wait_for_dma_pause(dev, THC_RXDMA1) || thc_wait_for_dma_pause(dev, THC_RXDMA2))
+ return -EIO;
+
+ thc_reset_dma_settings(dev);
+
+ mask = THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC |
+ THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN;
+ val = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC, write_len) |
+ ((!prd_tbl_len) ? THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN : 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_RPRD_CNTRL_SW_OFFSET,
+ mask, val);
+
+ if (prd_tbl_len) {
+ mask = THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN;
+ val = FIELD_PREP(THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN,
+ *prd_tbl_len);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_DMA_PRD_TABLE_LEN_OFFSET,
+ mask, val);
+ }
+
+ if (write_len <= sizeof(u32)) {
+ for (int i = 0; i < write_len; i++)
+ data0 |= *(((u8 *)write_buff) + i) << (i * 8);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
+ } else if (write_len <= 2 * sizeof(u32)) {
+ data0 = *(u32 *)write_buff;
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
+
+ for (int i = 0; i < write_len - sizeof(u32); i++)
+ data1 |= *(((u8 *)write_buff) + sizeof(u32) + i) << (i * 8);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET, data1);
+ }
+ dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_SWDMA]);
+
+ return 0;
+}
+
+static int thc_swdma_read_completion(struct thc_device *dev)
+{
+ int ret;
+
+ ret = thc_wait_for_dma_pause(dev, THC_SWDMA);
+ if (ret)
+ return ret;
+
+ thc_reset_dma_settings(dev);
+
+ dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_RXDMA2]);
+
+ ret = thc_interrupt_quiesce(dev, false);
+
+ return ret;
+}
+
+/**
+ * thc_swdma_read - Use software DMA to read data from touch device
+ *
+ * @dev: The pointer of THC private device context
+ * @write_buff: The pointer of write buffer for SWDMA sequence
+ * @write_len: The write data length for SWDMA sequence
+ * @prd_tbl_len: The prd table length of SWDMA engine, can be set to NULL
+ * @read_buff: The pointer of the read data buffer
+ * @read_len: The pointer of the read data length
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_swdma_read(struct thc_device *dev, void *write_buff, size_t write_len,
+ u32 *prd_tbl_len, void *read_buff, size_t *read_len)
+{
+ int ret;
+
+ if (!(&dev->dma_ctx->dma_config[THC_SWDMA])->is_enabled) {
+ dev_err_once(dev->dev, "The SWDMA channel is not enabled");
+ return -EINVAL;
+ }
+
+ if (!read_buff || !read_len) {
+ dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
+ read_buff, read_len);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ dev->swdma_done = false;
+
+ ret = thc_swdma_read_start(dev, write_buff, write_len, prd_tbl_len);
+ if (ret)
+ goto end;
+
+ ret = wait_event_interruptible_timeout(dev->swdma_complete_wait, dev->swdma_done, 1 * HZ);
+ if (ret <= 0 || !dev->swdma_done) {
+ dev_err_once(dev->dev, "timeout for waiting SWDMA completion\n");
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ ret = thc_dma_read(dev, &dev->dma_ctx->dma_config[THC_SWDMA], read_buff, read_len, NULL);
+ if (ret)
+ goto end;
+
+ ret = thc_swdma_read_completion(dev);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_swdma_read, "INTEL_THC");
+
+static int write_dma_buffer(struct thc_device *dev,
+ void *buffer, size_t buf_len)
+{
+ struct thc_dma_configuration *write_config = &dev->dma_ctx->dma_config[THC_TXDMA];
+ struct thc_prd_table *prd_tbl;
+ struct scatterlist *sg;
+ unsigned long len_left;
+ size_t ret;
+ u8 nent;
+ int i;
+
+ /* There is only one PRD table for write */
+ prd_tbl = &write_config->prd_tbls[0];
+
+ if (calc_prd_entries_num(prd_tbl, buf_len, &nent) < 0) {
+ dev_err(dev->dev, "Tx message length too big (%zu)\n", buf_len);
+ return -EOVERFLOW;
+ }
+
+ sg = write_config->sgls[0];
+ ret = sg_copy_from_buffer(sg, nent, buffer, buf_len);
+ if (ret != buf_len) {
+ dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
+ ret, buf_len);
+ return -EIO;
+ }
+
+ prd_tbl = &write_config->prd_tbls[0];
+ len_left = buf_len;
+
+ for_each_sg(write_config->sgls[0], sg, write_config->sgls_nent[0], i) {
+ if (sg_dma_address(sg) == 0 || sg_dma_len(sg) == 0) {
+ dev_err_once(dev->dev, "SGList: zero address or length\n");
+ return -EINVAL;
+ }
+
+ prd_tbl->entries[i].dest_addr =
+ sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
+
+ if (len_left < sg_dma_len(sg)) {
+ prd_tbl->entries[i].len = len_left;
+ prd_tbl->entries[i].end_of_prd = 1;
+ break;
+ }
+
+ prd_tbl->entries[i].len = sg_dma_len(sg);
+ prd_tbl->entries[i].end_of_prd = 0;
+
+ len_left -= sg_dma_len(sg);
+ }
+
+ dma_set_prd_control(dev, i, 0, write_config);
+
+ return 0;
+}
+
+static void thc_ensure_performance_limitations(struct thc_device *dev)
+{
+ unsigned long delay_usec = 0;
+ /*
+ * Minimum amount of delay the THC / QUICKSPI driver must wait
+ * between end of write operation and begin of read operation.
+ * This value shall be in 10us multiples.
+ */
+ if (dev->perf_limit > 0) {
+ delay_usec = dev->perf_limit * 10;
+ udelay(delay_usec);
+ }
+}
+
+static void thc_dma_write_completion(struct thc_device *dev)
+{
+ thc_ensure_performance_limitations(dev);
+}
+
+/**
+ * thc_dma_write - Use TXDMA to write data to touch device
+ *
+ * @dev: The pointer of THC private device context
+ * @buffer: The pointer of write data buffer
+ * @buf_len: The write data length
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_write(struct thc_device *dev, void *buffer, size_t buf_len)
+{
+ bool restore_interrupts = false;
+ u32 sts, ctrl;
+ int ret;
+
+ if (!(&dev->dma_ctx->dma_config[THC_TXDMA])->is_enabled) {
+ dev_err_once(dev->dev, "The TxDMA channel is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!buffer || buf_len <= 0) {
+ dev_err(dev->dev, "Invalid input parameters, buffer %p\n, buf_len %zu\n",
+ buffer, buf_len);
+ return -EINVAL;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, &sts);
+ if (sts & THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ACTIVE) {
+ dev_err_once(dev->dev, "THC TxDMA is till active and can't start again\n");
+ return -EBUSY;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, &ctrl);
+
+ ret = write_dma_buffer(dev, buffer, buf_len);
+ if (ret)
+ goto end;
+
+ if (dev->perf_limit && !(ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS)) {
+ ret = thc_interrupt_quiesce(dev, true);
+ if (ret)
+ goto end;
+
+ restore_interrupts = true;
+ }
+
+ dev->write_done = false;
+
+ dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_TXDMA]);
+
+ ret = wait_event_interruptible_timeout(dev->write_complete_wait, dev->write_done, 1 * HZ);
+ if (ret <= 0 || !dev->write_done) {
+ dev_err_once(dev->dev, "timeout for waiting TxDMA completion\n");
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ thc_dma_write_completion(dev);
+ mutex_unlock(&dev->thc_bus_lock);
+ return 0;
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+
+ if (restore_interrupts)
+ ret = thc_interrupt_quiesce(dev, false);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_write, "INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
new file mode 100644
index 000000000000..ca923ff2bef9
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _INTEL_THC_DMA_H_
+#define _INTEL_THC_DMA_H_
+
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/sizes.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+#define THC_POINTER_MASK GENMASK(6, 0)
+#define THC_POINTER_WRAPAROUND 0x80
+#define THC_WRAPAROUND_VALUE_ODD 0x10
+#define THC_WRAPAROUND_VALUE_EVEN 0x90
+#define THC_MIN_BYTES_PER_SG_LIST_ENTRY SZ_4K
+
+#define THC_DEFAULT_RXDMA_POLLING_US_INTERVAL 100
+#define THC_DEFAULT_RXDMA_POLLING_US_TIMEOUT (10 * USEC_PER_MSEC)
+
+/*
+ * THC needs 1KB aligned address, dest_addr is 54 bits, not 64,
+ * so don't need to send the lower 10-bits of address.
+ */
+#define THC_ADDRESS_SHIFT 10
+
+/**
+ * THC DMA channels:
+ * @THC_RXDMA1: legacy channel, reserved for raw data reading
+ * @THC_RXDMA2: DMA to read HID data from touch device
+ * @THC_TXDMA: DMA to write to touch device
+ * @THC_SWDMA: SW triggered DMA to write and read from touch device
+ */
+enum thc_dma_channel {
+ THC_RXDMA1 = 0,
+ THC_RXDMA2 = 1,
+ THC_TXDMA = 2,
+ THC_SWDMA = 3,
+ MAX_THC_DMA_CHANNEL
+};
+
+/**
+ * THC DMA Physical Memory Descriptor (PRD)
+ * @dest_addr: bit[53:0], destination address in system memory
+ * @int_on_completion: bit[63], if set, thc will trigger interrupt to driver
+ * @len: bit[87:64], length of this entry
+ * @end_of_prd: bit[88], if set, this entry is last one of current PRD table
+ * @hw_status: bit[90:89], hw status bits
+ */
+struct thc_prd_entry {
+ u64 dest_addr : 54;
+ u64 reserved1 : 9;
+ u64 int_on_completion : 1;
+ u64 len : 24;
+ u64 end_of_prd : 1;
+ u64 hw_status : 2;
+ u64 reserved2 : 37;
+};
+
+/*
+ * Max OS memory fragmentation will be at a 4KB boundary, thus to address 1MB
+ * of virtually contiguous memory 256 PRD entries are required for a single
+ * PRD Table. SW writes the number of PRD Entries for each PRD table in the
+ * THC_M_PRT_RPRD_CNTRL.PTEC register field. The PRD entry's length must be
+ * multiple of 4KB except for the last entry in a PRD table.
+ * This is the max possible number of etries supported by HW, in practise we
+ * there will be less entries in each prd table(the actual number will be
+ * given by scatter-gather list allocation).
+ */
+#define PRD_ENTRIES_NUM 16
+
+/*
+ * Number of PRD tables equals to number of data buffers.
+ * The max number of PRD tables supported by the HW is 128,
+ * but we allocate only 16.
+ */
+#define PRD_TABLES_NUM 16
+
+/* THC DMA Physical Memory Descriptor Table */
+struct thc_prd_table {
+ struct thc_prd_entry entries[PRD_ENTRIES_NUM];
+};
+
+#define PRD_TABLE_SIZE sizeof(struct thc_prd_table)
+
+/**
+ * struct thc_dma_configuration - THC DMA configure
+ * @dma_channel: DMA channel for current DMA configuration
+ * @prd_tbls_dma_handle: DMA buffer handle
+ * @dir: direction of DMA for this config
+ * @prd_tbls: PRD tables for current DMA
+ * @sgls: array of pointers to scatter-gather lists
+ * @sgls_nent: actual number of entries per sg list
+ * @prd_tbl_num: actual number of PRD tables
+ * @max_packet_size: size of the buffer needed for 1 DMA message (1 PRD table)
+ * @prd_base_addr_high: High 32bits memory address where stores PRD table
+ * @prd_base_addr_low: low 32bits memory address where stores PRD table
+ * @prd_cntrl: PRD control register value
+ * @dma_cntrl: DMA control register value
+ */
+struct thc_dma_configuration {
+ enum thc_dma_channel dma_channel;
+ dma_addr_t prd_tbls_dma_handle;
+ enum dma_data_direction dir;
+ bool is_enabled;
+
+ struct thc_prd_table *prd_tbls;
+ struct scatterlist *sgls[PRD_TABLES_NUM];
+ u8 sgls_nent[PRD_TABLES_NUM];
+ u8 prd_tbl_num;
+
+ size_t max_packet_size;
+ u32 prd_base_addr_high;
+ u32 prd_base_addr_low;
+ u32 prd_cntrl;
+ u32 dma_cntrl;
+};
+
+/*
+ * THC DMA context
+ * Store all THC Channel configures
+ */
+struct thc_dma_context {
+ struct thc_dma_configuration dma_config[MAX_THC_DMA_CHANNEL];
+ u8 use_write_interrupts;
+};
+
+struct thc_device;
+
+int thc_dma_set_max_packet_sizes(struct thc_device *dev,
+ size_t mps_read1, size_t mps_read2,
+ size_t mps_write, size_t mps_swdma);
+int thc_dma_allocate(struct thc_device *dev);
+int thc_dma_configure(struct thc_device *dev);
+void thc_dma_unconfigure(struct thc_device *dev);
+void thc_dma_release(struct thc_device *dev);
+int thc_rxdma_read(struct thc_device *dev, enum thc_dma_channel dma_channel,
+ void *read_buff, size_t *read_len, int *read_finished);
+int thc_swdma_read(struct thc_device *dev, void *write_buff, size_t write_len,
+ u32 *prd_tbl_len, void *read_buff, size_t *read_len);
+int thc_dma_write(struct thc_device *dev, void *buffer, size_t buf_len);
+
+struct thc_dma_context *thc_dma_init(struct thc_device *dev);
+
+#endif /* _INTEL_THC_DMA_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h
new file mode 100644
index 000000000000..6729c4c25dab
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h
@@ -0,0 +1,881 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _INTEL_THC_HW_H_
+#define _INTEL_THC_HW_H_
+
+#include <linux/bits.h>
+
+/* THC registers offset */
+/* Touch Host Controller Control Register */
+#define THC_M_PRT_CONTROL_OFFSET 0x1008
+/* THC SPI Bus Configuration Register */
+#define THC_M_PRT_SPI_CFG_OFFSET 0x1010
+/* THC SPI Bus Read Opcode Register */
+#define THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET 0x1014
+/* THC SPI Bus Read Opcode Register */
+#define THC_M_PRT_SPI_DMARD_OPCODE_OFFSET 0x1018
+/* THC SPI Bus Write Opcode Register */
+#define THC_M_PRT_SPI_WR_OPCODE_OFFSET 0x101C
+/* THC Interrupt Enable Register */
+#define THC_M_PRT_INT_EN_OFFSET 0x1020
+/* THC Interrupt Status Register */
+#define THC_M_PRT_INT_STATUS_OFFSET 0x1024
+/* THC Error Cause Register */
+#define THC_M_PRT_ERR_CAUSE_OFFSET 0x1028
+/* THC SW sequencing Control */
+#define THC_M_PRT_SW_SEQ_CNTRL_OFFSET 0x1040
+/* THC SW sequencing Status */
+#define THC_M_PRT_SW_SEQ_STS_OFFSET 0x1044
+/* THC SW Sequencing Data DW0 or SPI Address Register */
+#define THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET 0x1048
+/* THC SW sequencing Data DW1 */
+#define THC_M_PRT_SW_SEQ_DATA1_OFFSET 0x104C
+/* THC SW sequencing Data DW2 */
+#define THC_M_PRT_SW_SEQ_DATA2_OFFSET 0x1050
+/* THC SW sequencing Data DW3 */
+#define THC_M_PRT_SW_SEQ_DATA3_OFFSET 0x1054
+/* THC SW sequencing Data DW4 */
+#define THC_M_PRT_SW_SEQ_DATA4_OFFSET 0x1058
+/* THC SW sequencing Data DW5 */
+#define THC_M_PRT_SW_SEQ_DATA5_OFFSET 0x105C
+/* THC SW sequencing Data DW6 */
+#define THC_M_PRT_SW_SEQ_DATA6_OFFSET 0x1060
+/* THC SW sequencing Data DW7 */
+#define THC_M_PRT_SW_SEQ_DATA7_OFFSET 0x1064
+/* THC SW sequencing Data DW8 */
+#define THC_M_PRT_SW_SEQ_DATA8_OFFSET 0x1068
+/* THC SW sequencing Data DW9 */
+#define THC_M_PRT_SW_SEQ_DATA9_OFFSET 0x106C
+/* THC SW sequencing Data DW10 */
+#define THC_M_PRT_SW_SEQ_DATA10_OFFSET 0x1070
+/* THC SW sequencing Data DW11 */
+#define THC_M_PRT_SW_SEQ_DATA11_OFFSET 0x1074
+/* THC SW sequencing Data DW12 */
+#define THC_M_PRT_SW_SEQ_DATA12_OFFSET 0x1078
+/* THC SW sequencing Data DW13 */
+#define THC_M_PRT_SW_SEQ_DATA13_OFFSET 0x107C
+/* THC SW sequencing Data DW14 */
+#define THC_M_PRT_SW_SEQ_DATA14_OFFSET 0x1080
+/* THC SW sequencing Data DW15 */
+#define THC_M_PRT_SW_SEQ_DATA15_OFFSET 0x1084
+/* THC SW sequencing Data DW16 */
+#define THC_M_PRT_SW_SEQ_DATA16_OFFSET 0x1088
+/* THC Write PRD Base Address Register Low */
+#define THC_M_PRT_WPRD_BA_LOW_OFFSET 0x1090
+/* THC Write PRD Base Address Register High */
+#define THC_M_PRT_WPRD_BA_HI_OFFSET 0x1094
+/* THC Write DMA Control */
+#define THC_M_PRT_WRITE_DMA_CNTRL_OFFSET 0x1098
+/* THC Write Interrupt Status */
+#define THC_M_PRT_WRITE_INT_STS_OFFSET 0x109C
+/* THC Write DMA Error Register */
+#define THC_M_PRT_WRITE_DMA_ERR_OFFSET 0x10A0
+/* THC device address for the bulk write */
+#define THC_M_PRT_WR_BULK_ADDR_OFFSET 0x10B4
+/* THC Device Interrupt Cause Register Address */
+#define THC_M_PRT_DEV_INT_CAUSE_ADDR_OFFSET 0x10B8
+/* THC Device Interrupt Cause Register Value */
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_OFFSET 0x10BC
+/* THC TXDMA Frame Count */
+#define THC_M_PRT_TX_FRM_CNT_OFFSET 0x10E0
+/* THC TXDMA Packet Count */
+#define THC_M_PRT_TXDMA_PKT_CNT_OFFSET 0x10E4
+/* THC Device Interrupt Count on this port */
+#define THC_M_PRT_DEVINT_CNT_OFFSET 0x10E8
+/* Touch Device Interrupt Cause register Format Configuration Register 1 */
+#define THC_M_PRT_DEVINT_CFG_1_OFFSET 0x10EC
+/* Touch Device Interrupt Cause register Format Configuration Register 2 */
+#define THC_M_PRT_DEVINT_CFG_2_OFFSET 0x10F0
+/* THC Read PRD Base Address Low for the 1st RXDMA */
+#define THC_M_PRT_RPRD_BA_LOW_1_OFFSET 0x1100
+/* THC Read PRD Base Address High for the 1st RXDMA */
+#define THC_M_PRT_RPRD_BA_HI_1_OFFSET 0x1104
+/* THC Read PRD Control for the 1st RXDMA */
+#define THC_M_PRT_RPRD_CNTRL_1_OFFSET 0x1108
+/* THC Read DMA Control for the 1st RXDMA */
+#define THC_M_PRT_READ_DMA_CNTRL_1_OFFSET 0x110C
+/* THC Read Interrupt Status for the 1st RXDMA */
+#define THC_M_PRT_READ_DMA_INT_STS_1_OFFSET 0x1110
+/* THC Read DMA Error Register for the 1st RXDMA */
+#define THC_M_PRT_READ_DMA_ERR_1_OFFSET 0x1114
+/* Touch Sequencer GuC Tail Offset Address Low for the 1st RXDMA */
+#define THC_M_PRT_GUC_OFFSET_LOW_1_OFFSET 0x1118
+/* Touch Sequencer GuC Tail Offset Address High for the 1st RXDMA */
+#define THC_M_PRT_GUC_OFFSET_HI_1_OFFSET 0x111C
+/* Touch Host Controller GuC Work Queue Item Size for the 1st RXDMA */
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_1_OFFSET 0x1120
+/* Touch Host Controller GuC Control register for the 1st RXDMA */
+#define THC_M_PRT_GUC_WORKQ_SZ_1_OFFSET 0x1124
+/* Touch Sequencer Control for the 1st DMA */
+#define THC_M_PRT_TSEQ_CNTRL_1_OFFSET 0x1128
+/* Touch Sequencer GuC Doorbell Address Low for the 1st RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_LOW_1_OFFSET 0x1130
+/* Touch Sequencer GuC Doorbell Address High for the 1st RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_HI_1_OFFSET 0x1134
+/* Touch Sequencer GuC Doorbell Data */
+#define THC_M_PRT_GUC_DB_DATA_1_OFFSET 0x1138
+/* Touch Sequencer GuC Tail Offset Initial Value for the 1st RXDMA */
+#define THC_M_PRT_GUC_OFFSET_INITVAL_1_OFFSET 0x1140
+/* THC Device Address for the bulk/touch data read for the 1st RXDMA */
+#define THC_M_PRT_RD_BULK_ADDR_1_OFFSET 0x1170
+/* THC Gfx/SW Doorbell Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_DB_CNT_1_OFFSET 0x11A0
+/* THC Frame Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_FRM_CNT_1_OFFSET 0x11A4
+/* THC Micro Frame Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_UFRM_CNT_1_OFFSET 0x11A8
+/* THC Packet Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_RXDMA_PKT_CNT_1_OFFSET 0x11AC
+/*
+ * THC Software Interrupt Count from the 1st Stream RXDMA
+ * on this port
+ */
+#define THC_M_PRT_SWINT_CNT_1_OFFSET 0x11B0
+/* Touch Sequencer Frame Drop Counter for the 1st RXDMA */
+#define THC_M_PRT_FRAME_DROP_CNT_1_OFFSET 0x11B4
+/* THC Coaescing 1 */
+#define THC_M_PRT_COALESCE_1_OFFSET 0x11B8
+/* THC Read PRD Base Address Low for the 2nd RXDMA */
+#define THC_M_PRT_RPRD_BA_LOW_2_OFFSET 0x1200
+/* THC Read PRD Base Address High for the 2nd RXDMA */
+#define THC_M_PRT_RPRD_BA_HI_2_OFFSET 0x1204
+/* THC Read PRD Control for the 2nd RXDMA */
+#define THC_M_PRT_RPRD_CNTRL_2_OFFSET 0x1208
+/* THC Read DMA Control for the 2nd RXDMA */
+#define THC_M_PRT_READ_DMA_CNTRL_2_OFFSET 0x120C
+/* THC Read Interrupt Status for the 2nd RXDMA */
+#define THC_M_PRT_READ_DMA_INT_STS_2_OFFSET 0x1210
+/* THC Read DMA Error Register for the 2nd RXDMA */
+#define THC_M_PRT_READ_DMA_ERR_2_OFFSET 0x1214
+/* Touch Sequencer GuC Tail Offset Address Low for the 2nd RXDMA */
+#define THC_M_PRT_GUC_OFFSET_LOW_2_OFFSET 0x1218
+/* Touch Sequencer GuC Tail Offset Address High for the 2nd RXDMA */
+#define THC_M_PRT_GUC_OFFSET_HI_2_OFFSET 0x121C
+/* Touch Host Controller GuC Work Queue Item Size for the 2nd RXDMA */
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_2_OFFSET 0x1220
+/* Touch Host Controller GuC Control register for the 2nd RXDMA */
+#define THC_M_PRT_GUC_WORKQ_SZ_2_OFFSET 0x1224
+/* Touch Sequencer Control for the 2nd DMA */
+#define THC_M_PRT_TSEQ_CNTRL_2_OFFSET 0x1228
+/* Touch Sequencer GuC Doorbell Address Low for the 2nd RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_LOW_2_OFFSET 0x1230
+/* Touch Sequencer GuC Doorbell Address High for the 2nd RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_HI_2_OFFSET 0x1234
+/* Touch Sequencer GuC Doorbell Data for PRD2 */
+#define THC_M_PRT_GUC_DB_DATA_2_OFFSET 0x1238
+/* Touch Sequencer GuC Tail Offset Initial Value for the 2nd RXDMA */
+#define THC_M_PRT_GUC_OFFSET_INITVAL_2_OFFSET 0x1240
+/* THC Device Address for the bulk/touch data read for the 2nd RXDMA */
+#define THC_M_PRT_RD_BULK_ADDR_2_OFFSET 0x1270
+/* THC Gfx/SW Doorbell Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_DB_CNT_2_OFFSET 0x12A0
+/* THC Frame Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_FRM_CNT_2_OFFSET 0x12A4
+/* THC Micro Frame Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_UFRM_CNT_2_OFFSET 0x12A8
+/* THC Packet Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_RXDMA_PKT_CNT_2_OFFSET 0x12AC
+/*
+ * THC Software Interrupt Count from the 2nd Stream RXDMA
+ * on this port
+ */
+#define THC_M_PRT_SWINT_CNT_2_OFFSET 0x12B0
+/* Touch Sequencer Frame Drop Counter for the 2nd RXDMA */
+#define THC_M_PRT_FRAME_DROP_CNT_2_OFFSET 0x12B4
+/* THC Coaescing 2 */
+#define THC_M_PRT_COALESCE_2_OFFSET 0x12B8
+/* THC SPARE REGISTER */
+#define THC_M_PRT_SPARE_REG_OFFSET 0x12BC
+/* THC Read PRD Base Address Low for the SW RXDMA */
+#define THC_M_PRT_RPRD_BA_LOW_SW_OFFSET 0x12C0
+/* THC Read PRD Base Address High for the SW RXDMA */
+#define THC_M_PRT_RPRD_BA_HI_SW_OFFSET 0x12C4
+/* THC Read PRD Control for the SW RXDMA */
+#define THC_M_PRT_RPRD_CNTRL_SW_OFFSET 0x12C8
+/* THC Read DMA Control for the SW RXDMA */
+#define THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET 0x12CC
+/* THC Read Interrupt Status for the SW RXDMA */
+#define THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET 0x12D0
+/* Touch Sequencer Control for the SW DMA */
+#define THC_M_PRT_TSEQ_CNTRL_SW_OFFSET 0x12D4
+/* Address for the bulk read for SW DMA engine */
+#define THC_M_PRT_RD_BULK_ADDR_SW_OFFSET 0x12D8
+/* THC Frame Count from the SW RXDMA on this port */
+#define THC_M_PRT_FRM_CNT_SW_OFFSET 0x12DC
+/* THC Packet Count from the SW RXDMA on this port */
+#define THC_M_PRT_RXDMA_PKT_CNT_SW_OFFSET 0x12E0
+/* SW DMA PRD Table Length */
+#define THC_M_PRT_SW_DMA_PRD_TABLE_LEN_OFFSET 0x12E4
+/* THC timing based Frame/Interrupt caolescing control register for 1st RXDMA */
+#define THC_M_PRT_COALESCE_CNTRL_1_OFFSET 0x12E8
+/* THC timing based Frame/Interrupt caolescing control register for 2nd RXDMA */
+#define THC_M_PRT_COALESCE_CNTRL_2_OFFSET 0x12EC
+/* Touch Sequencer PRD Table Empty Counter for the 1st RXDMA */
+#define THC_M_PRT_PRD_EMPTY_CNT_1_OFFSET 0x12F0
+/* Touch Sequencer PRD Table Empty Counter for the 2nd RXDM */
+#define THC_M_PRT_PRD_EMPTY_CNT_2_OFFSET 0x12F4
+/* THC coalescing status to reflect the current coalescing FSM state for 1st RXDMA */
+#define THC_M_PRT_COALESCE_STS_1_OFFSET 0x12F8
+/* THC coalescing status to reflect the current coalescing FSM state for 2nd RXDMA */
+#define THC_M_PRT_COALESCE_STS_2_OFFSET 0x12FC
+/* THC Register for the SPI Port Duty Cycle Configuration */
+#define THC_M_PRT_SPI_DUTYC_CFG_OFFSET 0x1300
+/* THC Register for SW I2C Wtite Sequecning control */
+#define THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_OFFSET 0x1304
+/* THC current Timestamp Register for RXDMA1 */
+#define THC_M_PRT_TIMESTAMP_1_OFFSET 0x1308
+/* THC current Timestamp Register for RXDMA2 */
+#define THC_M_PRT_TIMESTAMP_2_OFFSET 0x130C
+/* Current SYNC Event Timestamp Register */
+#define THC_M_PRT_SYNC_TIMESTAMP_OFFSET 0x1310
+/* THC Display Sync Register */
+#define THC_M_PRT_DISP_SYNC_OFFSET 0x1314
+/* THC Display Sync Register */
+#define THC_M_PRT_DISP_SYNC_2_OFFSET 0x1318
+/* THC Register for SW I2C Wtite Sequecning control */
+#define THC_M_PRT_I2C_CFG_OFFSET 0x131C
+
+/* THC register bits definition */
+#define TXN_ERR_INT_STS_BIT BIT(28)
+#define TXN_FATAL_INT_STS_BIT BIT(30)
+
+#define NONDMA_INT_STS_BIT BIT(4)
+#define EOF_INT_STS_BIT BIT(5)
+
+#define THC_CFG_DID_VID_VID GENMASK(15, 0)
+#define THC_CFG_DID_VID_DID GENMASK(31, 16)
+
+#define THC_CFG_STS_CMD_IOSE BIT(0)
+#define THC_CFG_STS_CMD_MSE BIT(1)
+#define THC_CFG_STS_CMD_BME BIT(2)
+#define THC_CFG_STS_CMD_SPCYC BIT(3)
+#define THC_CFG_STS_CMD_MWRIEN BIT(4)
+#define THC_CFG_STS_CMD_VGAPS BIT(5)
+#define THC_CFG_STS_CMD_PERRR BIT(6)
+#define THC_CFG_STS_CMD_SERREN BIT(8)
+#define THC_CFG_STS_CMD_FBTBEN BIT(9)
+#define THC_CFG_STS_CMD_INTD BIT(10)
+#define THC_CFG_STS_CMD_INTS BIT(19)
+#define THC_CFG_STS_CMD_CAPL BIT(20)
+#define THC_CFG_STS_CMD_MCAP BIT(21)
+#define THC_CFG_STS_CMD_FBTBC BIT(23)
+#define THC_CFG_STS_CMD_MDPE BIT(24)
+#define THC_CFG_STS_CMD_DEVT GENMASK(26, 25)
+#define THC_CFG_STS_CMD_STA BIT(27)
+#define THC_CFG_STS_CMD_RTA BIT(28)
+#define THC_CFG_STS_CMD_RMA BIT(29)
+#define THC_CFG_STS_CMD_SSE BIT(30)
+#define THC_CFG_STS_CMD_DPE BIT(31)
+
+#define THC_CFG_CC_RID_RID GENMASK(7, 0)
+#define THC_CFG_CC_RID_PI GENMASK(15, 8)
+#define THC_CFG_CC_RID_SCC GENMASK(23, 16)
+#define THC_CFG_CC_RID_BCC GENMASK(31, 24)
+
+#define THC_CFG_BIST_HTYPE_LT_CLS_CLSZ GENMASK(7, 0)
+#define THC_CFG_BIST_HTYPE_LT_CLS_LT GENMASK(15, 8)
+#define THC_CFG_BIST_HTYPE_LT_CLS_HTYPE GENMASK(22, 16)
+#define THC_CFG_BIST_HTYPE_LT_CLS_MFD BIT(23)
+
+#define THC_CFG_BAR0_LOW_MEMSPACE BIT(0)
+#define THC_CFG_BAR0_LOW_TYP GENMASK(2, 1)
+#define THC_CFG_BAR0_LOW_PREFETCH BIT(3)
+#define THC_CFG_BAR0_LOW_MEMSIZE GENMASK(14, 4)
+#define THC_CFG_BAR0_LOW_MEMBAR GENMASK(31, 15)
+#define THC_CFG_BAR0_HI_MEMBAR GENMASK(31, 0)
+
+#define THC_CFG_SID_SVID_SSVID GENMASK(15, 0)
+#define THC_CFG_SID_SVID_SSID GENMASK(31, 16)
+
+#define THC_CFG_CAPP_CP GENMASK(7, 0)
+
+#define THC_CFG_INT_ILINE GENMASK(7, 0)
+#define THC_CFG_INT_IPIN GENMASK(15, 8)
+
+#define THC_CFG_UR_STS_CTL_URRE BIT(0)
+#define THC_CFG_UR_STS_CTL_URD BIT(1)
+#define THC_CFG_UR_STS_CTL_FD BIT(2)
+
+#define THC_CFG_MSIMC_MSINP_MSICID_CAPID GENMASK(7, 0)
+#define THC_CFG_MSIMC_MSINP_MSICID_NXTP GENMASK(15, 8)
+#define THC_CFG_MSIMC_MSINP_MSICID_MSIE BIT(16)
+#define THC_CFG_MSIMC_MSINP_MSICID_MMC GENMASK(19, 17)
+#define THC_CFG_MSIMC_MSINP_MSICID_MMEN GENMASK(22, 20)
+#define THC_CFG_MSIMC_MSINP_MSICID_XAC BIT(23)
+#define THC_CFG_MSIMC_MSINP_MSICID_PVMC BIT(24)
+#define THC_CFG_MSIMA_MADDR GENMASK(31, 2)
+#define THC_CFG_MSIMUA_MAUDDR GENMASK(31, 0)
+#define THC_CFG_MSIMD_MDAT GENMASK(15, 0)
+
+#define THC_CFG_PMCAP_PMNP_PMCID_CAPP GENMASK(7, 0)
+#define THC_CFG_PMCAP_PMNP_PMCID_NXTP GENMASK(15, 8)
+#define THC_CFG_PMCAP_PMNP_PMCID_VER GENMASK(18, 16)
+#define THC_CFG_PMCAP_PMNP_PMCID_PMECLK BIT(19)
+#define THC_CFG_PMCAP_PMNP_PMCID_DSI BIT(21)
+#define THC_CFG_PMCAP_PMNP_PMCID_AUXC GENMASK(24, 22)
+#define THC_CFG_PMCAP_PMNP_PMCID_D1S BIT(25)
+#define THC_CFG_PMCAP_PMNP_PMCID_D2S BIT(26)
+#define THC_CFG_PMCAP_PMNP_PMCID_PMES GENMASK(31, 27)
+
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_PWRST GENMASK(1, 0)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_NSR BIT(3)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_PMEEN BIT(8)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_DSEL GENMASK(12, 9)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_DS GENMASK(14, 13)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_PMESTS BIT(15)
+
+#define THC_CFG_DEVIDLE_CAPPID GENMASK(7, 0)
+#define THC_CFG_DEVIDLE_NCAPPP GENMASK(15, 8)
+#define THC_CFG_DEVIDLE_LENGTH GENMASK(23, 16)
+#define THC_CFG_DEVIDLE_REV GENMASK(27, 24)
+#define THC_CFG_DEVIDLE_VID GENMASK(31, 28)
+
+#define THC_CFG_VSHDR_VSECID GENMASK(15, 0)
+#define THC_CFG_VSHDR_VSECR GENMASK(19, 16)
+#define THC_CFG_VSHDR_VSECL GENMASK(31, 20)
+
+#define THC_CFG_SWLTRPTR_VALID BIT(0)
+#define THC_CFG_SWLTRPTR_BARNUM GENMASK(3, 1)
+#define THC_CFG_SWLTRPTR_SWLTRLOC GENMASK(31, 4)
+
+#define THC_CFG_DEVIDLEPTR_VALID BIT(0)
+#define THC_CFG_DEVIDLEPTR_BARNUM GENMASK(3, 1)
+#define THC_CFG_DEVIDLEPTR_DEVIDLELOC GENMASK(31, 4)
+#define THC_CFG_DEVIDLEPOL_POLV GENMASK(9, 0)
+#define THC_CFG_DEVIDLEPOL_POLS GENMASK(12, 10)
+
+#define THC_CFG_PCE_SPE BIT(0)
+#define THC_CFG_PCE_I3E BIT(1)
+#define THC_CFG_PCE_D3HE BIT(2)
+#define THC_CFG_PCE_SE BIT(3)
+#define THC_CFG_PCE_HAE BIT(5)
+
+#define THC_CFG_MANID_PROC GENMASK(7, 0)
+#define THC_CFG_MANID_MID GENMASK(15, 8)
+#define THC_CFG_MANID_MSID GENMASK(23, 16)
+#define THC_CFG_MANID_DOT GENMASK(27, 24)
+
+#define THC_M_CMN_DEVIDLECTRL_CIP BIT(0)
+#define THC_M_CMN_DEVIDLECTRL_IR BIT(1)
+#define THC_M_CMN_DEVIDLECTRL_DEVIDLE BIT(2)
+#define THC_M_CMN_DEVIDLECTRL_RR BIT(3)
+#define THC_M_CMN_DEVIDLECTRL_IRC BIT(4)
+
+#define THC_M_CMN_LTR_CTRL_OFFSET 0x14
+#define THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ BIT(0)
+#define THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN BIT(1)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_REQ BIT(2)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_EN BIT(3)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_SCALE GENMASK(6, 4)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_VAL GENMASK(16, 7)
+#define THC_M_CMN_LTR_CTRL_ACT_LTR_SCALE GENMASK(19, 17)
+#define THC_M_CMN_LTR_CTRL_ACT_LTR_VAL GENMASK(29, 20)
+#define THC_M_CMN_LTR_CTRL_LAST_LTR_SENT GENMASK(31, 30)
+
+#define THC_M_PRT_CONTROL_TSFTRST BIT(0)
+#define THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN BIT(1)
+#define THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS BIT(2)
+#define THC_M_PRT_CONTROL_DEVRST BIT(3)
+#define THC_M_PRT_CONTROL_THC_DRV_LOCK_EN BIT(13)
+#define THC_M_PRT_CONTROL_THC_INSTANCE_INDEX GENMASK(18, 16)
+#define THC_M_PRT_CONTROL_PORT_INDEX GENMASK(22, 20)
+#define THC_M_PRT_CONTROL_THC_ARB_POLICY GENMASK(25, 24)
+#define THC_M_PRT_CONTROL_THC_BIOS_LOCK_EN BIT(27)
+#define THC_M_PRT_CONTROL_PORT_SUPPORTED BIT(28)
+#define THC_M_PRT_CONTROL_SPI_IO_RDY BIT(29)
+#define THC_M_PRT_CONTROL_PORT_TYPE GENMASK(31, 30)
+
+#define THC_M_PRT_SPI_CFG_SPI_TRDC GENMASK(1, 0)
+#define THC_M_PRT_SPI_CFG_SPI_TRMODE GENMASK(3, 2)
+#define THC_M_PRT_SPI_CFG_SPI_TCRF GENMASK(6, 4)
+#define THC_M_PRT_SPI_CFG_SPI_RD_MPS GENMASK(15, 7)
+#define THC_M_PRT_SPI_CFG_SPI_TWMODE GENMASK(19, 18)
+#define THC_M_PRT_SPI_CFG_SPI_TCWF GENMASK(22, 20)
+#define THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN BIT(23)
+#define THC_M_PRT_SPI_CFG_SPI_WR_MPS GENMASK(31, 24)
+
+#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_SIO GENMASK(31, 24)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO GENMASK(23, 16)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO GENMASK(15, 8)
+
+#define THC_M_PRT_INT_EN_SIPE BIT(0)
+#define THC_M_PRT_INT_EN_SBO BIT(1)
+#define THC_M_PRT_INT_EN_SIDR BIT(2)
+#define THC_M_PRT_INT_EN_SOFB BIT(3)
+#define THC_M_PRT_INT_EN_INVLD_DEV_ENTRY_INT_EN BIT(9)
+#define THC_M_PRT_INT_EN_FRAME_BABBLE_ERR_INT_EN BIT(10)
+#define THC_M_PRT_INT_EN_BUF_OVRRUN_ERR_INT_EN BIT(12)
+#define THC_M_PRT_INT_EN_PRD_ENTRY_ERR_INT_EN BIT(13)
+#define THC_M_PRT_INT_EN_DISP_SYNC_EVT_INT_EN BIT(14)
+#define THC_M_PRT_INT_EN_DEV_RAW_INT_EN BIT(15)
+#define THC_M_PRT_INT_EN_FATAL_ERR_INT_EN BIT(16)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_RX_UNDER_INT_EN BIT(17)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_RX_OVER_INT_EN BIT(18)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_RX_FULL_INT_EN BIT(19)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_TX_OVER_INT_EN BIT(20)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_TX_EMPTY_INT_EN BIT(21)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_TX_ABRT_INT_EN BIT(22)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_SCL_STUCK_AT_LOW_DET_INT_EN BIT(24)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_STOP_DET_INT_EN BIT(25)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_START_DET_INT_EN BIT(26)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_MST_ON_HOLD_INT_EN BIT(27)
+#define THC_M_PRT_INT_EN_TXN_ERR_INT_EN BIT(29)
+#define THC_M_PRT_INT_EN_GBL_INT_EN BIT(31)
+
+#define THC_M_PRT_INT_STATUS_DISP_SYNC_EVT_INT_STS BIT(14)
+#define THC_M_PRT_INT_STATUS_DEV_RAW_INT_STS BIT(15)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_UNDER_INT_STS BIT(17)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_OVER_INT_STS BIT(18)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_FULL_INT_STS BIT(19)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_OVER_INT_STS BIT(20)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_EMPTY_INT_STS BIT(21)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_ABRT_INT_STS BIT(22)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_ACTIVITY_INT_STS BIT(23)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_SCL_STUCK_AT_LOW_INT_STS BIT(24)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_STOP_DET_INT_STS BIT(25)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_START_DET_INT_STS BIT(26)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_MST_ON_HOLD_INT_STS BIT(27)
+#define THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS BIT(28)
+#define THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS BIT(30)
+
+#define THC_M_PRT_ERR_CAUSE_INVLD_DEV_ENTRY BIT(9)
+#define THC_M_PRT_ERR_CAUSE_FRAME_BABBLE_ERR BIT(10)
+#define THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR BIT(12)
+#define THC_M_PRT_ERR_CAUSE_PRD_ENTRY_ERR BIT(13)
+#define THC_M_PRT_ERR_CAUSE_FATAL_ERR_CAUSE GENMASK(23, 16)
+
+#define THC_M_PRT_SW_SEQ_CNTRL_TSSGO BIT(0)
+#define THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE BIT(1)
+#define THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CMD GENMASK(15, 8)
+#define THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC GENMASK(31, 16)
+#define THC_M_PRT_SW_SEQ_STS_TSSDONE BIT(0)
+#define THC_M_PRT_SW_SEQ_STS_THC_SS_ERR BIT(1)
+#define THC_M_PRT_SW_SEQ_STS_THC_SS_CIP BIT(3)
+#define THC_M_PRT_SW_SEQ_DATA0_ADDR_THC_SW_SEQ_DATA0_ADDR GENMASK(31, 0)
+#define THC_M_PRT_SW_SEQ_DATA1_THC_SW_SEQ_DATA1 GENMASK(31, 0)
+
+#define THC_M_PRT_WPRD_BA_LOW_THC_M_PRT_WPRD_BA_LOW GENMASK(31, 12)
+#define THC_M_PRT_WPRD_BA_HI_THC_M_PRT_WPRD_BA_HI GENMASK(31, 0)
+
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START BIT(0)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_ERROR BIT(1)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC BIT(2)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL BIT(3)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_UHS BIT(23)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC GENMASK(31, 24)
+
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS BIT(0)
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ERROR_STS BIT(1)
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_IOC_STS BIT(2)
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ACTIVE BIT(3)
+
+#define THC_M_PRT_WR_BULK_ADDR_THC_M_PRT_WR_BULK_ADDR GENMASK(31, 0)
+
+#define THC_M_PRT_DEV_INT_CAUSE_ADDR_THC_M_PRT_DEV_INT_CAUSE_ADDR GENMASK(31, 0)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_INTERRUPT_TYPE GENMASK(3, 0)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_MICRO_FRAME_SIZE GENMASK(23, 4)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_BEGINNING_OF_FRAME BIT(29)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_END_OF_FRAME BIT(30)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_FRAME_TYPE BIT(31)
+
+#define THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT GENMASK(30, 0)
+#define THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT_RST BIT(31)
+
+#define THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT GENMASK(30, 0)
+#define THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT_RST BIT(31)
+
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_OFFSET GENMASK(4, 0)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_LEN GENMASK(9, 5)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_EOF_OFFSET GENMASK(14, 10)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_SEND_ICR_US_EN BIT(15)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL GENMASK(31, 16)
+
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_OFFSET GENMASK(4, 0)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_LEN GENMASK(9, 5)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_UNIT GENMASK(15, 12)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_IGNORE BIT(16)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_VAL BIT(17)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_RXDMA_ADDRINC_DIS BIT(24)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_TXDMA_ADDRINC_DIS BIT(25)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_RXDMA_PKT_STRM_EN BIT(26)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_TXDMA_PKT_STRM_EN BIT(27)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_DEVINT_POL BIT(28)
+
+#define THC_M_PRT_RPRD_BA_LOW_1_THC_M_PRT_RPRD_BA_LOW GENMASK(31, 12)
+#define THC_M_PRT_RPRD_BA_HI_1_THC_M_PRT_RPRD_BA_HI GENMASK(31, 0)
+
+#define THC_M_PRT_RPRD_CNTRL_PCD GENMASK(6, 0)
+#define THC_M_PRT_RPRD_CNTRL_PTEC GENMASK(15, 8)
+#define THC_M_PRT_RPRD_CNTRL_PREFETCH_WM GENMASK(19, 16)
+
+#define THC_M_PRT_READ_DMA_CNTRL_START BIT(0)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_ERROR BIT(1)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_IOC BIT(2)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_STALL BIT(3)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_NDDI BIT(4)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_EOF BIT(5)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_DMACPL BIT(7)
+#define THC_M_PRT_READ_DMA_CNTRL_TPCRP GENMASK(15, 8)
+#define THC_M_PRT_READ_DMA_CNTRL_TPCWP GENMASK(23, 16)
+#define THC_M_PRT_READ_DMA_CNTRL_INT_SW_DMA_EN BIT(28)
+#define THC_M_PRT_READ_DMA_CNTRL_SOO BIT(29)
+#define THC_M_PRT_READ_DMA_CNTRL_UHS BIT(30)
+#define THC_M_PRT_READ_DMA_CNTRL_TPCPR BIT(31)
+
+#define THC_M_PRT_READ_DMA_INT_STS_DMACPL_STS BIT(0)
+#define THC_M_PRT_READ_DMA_INT_STS_ERROR_STS BIT(1)
+#define THC_M_PRT_READ_DMA_INT_STS_IOC_STS BIT(2)
+#define THC_M_PRT_READ_DMA_INT_STS_STALL_STS BIT(3)
+#define THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS BIT(4)
+#define THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS BIT(5)
+#define THC_M_PRT_READ_DMA_INT_STS_ACTIVE BIT(8)
+
+#define THC_M_PRT_READ_DMA_ERR_1_DLERR BIT(0)
+
+#define THC_M_PRT_GUC_OFFSET_LOW_1_THC_M_PRT_GUC_OFFSET_LOW GENMASK(31, 3)
+#define THC_M_PRT_GUC_OFFSET_HI_1_THC_M_PRT_GUC_OFFSET_HI GENMASK(31, 0)
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_1_WORKQ_ITEM_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_1_WORKQ_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_1_FCD GENMASK(27, 24)
+#define THC_M_PRT_GUC_WORKQ_SZ_1_GIC GENMASK(31, 28)
+
+#define THC_M_PRT_TSEQ_CNTRL_1_RGD BIT(2)
+#define THC_M_PRT_TSEQ_CNTRL_1_EGP BIT(3)
+#define THC_M_PRT_TSEQ_CNTRL_1_RTO BIT(4)
+#define THC_M_PRT_TSEQ_CNTRL_1_EWOG BIT(5)
+#define THC_M_PRT_TSEQ_CNTRL_1_RWOGC BIT(6)
+#define THC_M_PRT_TSEQ_CNTRL_1_RX_DATA_FIFO_WR_WM GENMASK(25, 16)
+#define THC_M_PRT_TSEQ_CNTRL_1_RESET_PREP_CHICKEN BIT(30)
+#define THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN BIT(31)
+
+#define THC_M_PRT_GUC_DB_ADDR_LOW_1_GUC_DB_ADDR_LOW GENMASK(31, 2)
+#define THC_M_PRT_GUC_DB_ADDR_HI_1_GUC_DB_ADDR_HI GENMASK(31, 0)
+#define THC_M_PRT_GUC_DB_DATA_1_GUC_DB_DATA GENMASK(31, 0)
+#define THC_M_PRT_GUC_OFFSET_INITVAL_1_THC_M_PRT_GUC_OFFSET_INITVAL GENMASK(31, 0)
+
+#define THC_M_PRT_RD_BULK_ADDR_1_THC_M_PRT_RD_BULK_ADDR GENMASK(31, 0)
+
+#define THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT GENMASK(30, 0)
+#define THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT GENMASK(30, 0)
+#define THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT_RST BIT(31)
+
+#define THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT GENMASK(30, 0)
+#define THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRAME_DROP_CNT_1_NOFD GENMASK(30, 0)
+#define THC_M_PRT_FRAME_DROP_CNT_1_RFDC BIT(31)
+
+#define THC_M_PRT_COALESCE_1_COALESCE_TIMEOUT GENMASK(6, 0)
+
+#define THC_M_PRT_RPRD_BA_LOW_2_THC_M_PRT_RPRD_BA_LOW GENMASK(31, 12)
+#define THC_M_PRT_RPRD_BA_HI_2_THC_M_PRT_RPRD_BA_HI GENMASK(31, 0)
+
+#define THC_M_PRT_READ_DMA_ERR_2_DLERR BIT(0)
+
+#define THC_M_PRT_GUC_OFFSET_LOW_2_THC_M_PRT_GUC_OFFSET_LOW GENMASK(31, 3)
+#define THC_M_PRT_GUC_OFFSET_HI_2_THC_M_PRT_GUC_OFFSET_HI GENMASK(31, 0)
+
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_2_WORKQ_ITEM_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_2_WORKQ_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_2_FCD GENMASK(27, 24)
+#define THC_M_PRT_GUC_WORKQ_SZ_2_GIC GENMASK(31, 28)
+
+#define THC_M_PRT_TSEQ_CNTRL_2_RGD BIT(2)
+#define THC_M_PRT_TSEQ_CNTRL_2_EGP BIT(3)
+#define THC_M_PRT_TSEQ_CNTRL_2_RTO BIT(4)
+
+#define THC_M_PRT_GUC_DB_ADDR_LOW_2_GUC_DB_ADDR_LOW GENMASK(31, 2)
+#define THC_M_PRT_GUC_DB_ADDR_HI_2_GUC_DB_ADDR_HI GENMASK(31, 0)
+
+#define THC_M_PRT_GUC_DB_DATA_2_GUC_DB_DATA GENMASK(31, 0)
+
+#define THC_M_PRT_GUC_OFFSET_INITVAL_2_THC_M_PRT_GUC_OFFSET_INITVAL GENMASK(31, 0)
+
+#define THC_M_PRT_RD_BULK_ADDR_2_THC_M_PRT_RD_BULK_ADDR GENMASK(31, 0)
+
+#define THC_M_PRT_DB_CNT_2_THC_M_PRT_DB_CNT GENMASK(30, 0)
+#define THC_M_PRT_DB_CNT_2_THC_M_PRT_DB_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT GENMASK(30, 0)
+#define THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT_RST BIT(31)
+
+#define THC_M_PRT_SWINT_CNT_2_THC_M_PRT_SWINT_CNT GENMASK(30, 0)
+#define THC_M_PRT_SWINT_CNT_2_THC_M_PRT_SWINT_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRAME_DROP_CNT_2_NOFD GENMASK(30, 0)
+#define THC_M_PRT_FRAME_DROP_CNT_2_RFDC BIT(31)
+
+#define THC_M_PRT_COALESCE_2_COALESCE_TIMEOUT GENMASK(6, 0)
+
+#define THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_I2C_RW_PIO_EN BIT(23)
+#define THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_PIO_I2C_WBC GENMASK(31, 26)
+
+#define THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN BIT(23)
+#define THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC GENMASK(31, 26)
+
+#define THC_M_PRT_PRD_EMPTY_CNT_1_RPTEC BIT(31)
+#define THC_M_PRT_PRD_EMPTY_CNT_2_RPTEC BIT(31)
+
+#define THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN GENMASK(23, 0)
+
+#define THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_VAL GENMASK(3, 0)
+#define THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_EN BIT(25)
+
+/* CS Assertion delay default value */
+#define THC_CSA_CK_DELAY_VAL_DEFAULT 4
+
+/* ARB policy definition */
+/* Arbiter switches on packet boundary */
+#define THC_ARB_POLICY_PACKET_BOUNDARY 0
+/* Arbiter switches on Micro Frame boundary */
+#define THC_ARB_POLICY_UFRAME_BOUNDARY 1
+/* Arbiter switches on Frame boundary */
+#define THC_ARB_POLICY_FRAME_BOUNDARY 2
+
+#define THC_REGMAP_POLLING_INTERVAL_US 10 /* 10us */
+#define THC_PIO_DONE_TIMEOUT_US USEC_PER_SEC /* 1s */
+
+/* Default configures for HIDSPI */
+#define THC_BIT_OFFSET_INTERRUPT_TYPE 4
+/* input_report_type is 4 bits for HIDSPI */
+#define THC_BIT_LENGTH_INTERRUPT_TYPE 4
+/* Last fragment indicator is bit 15 for HIDSPI */
+#define THC_BIT_OFFSET_LAST_FRAGMENT_FLAG 22
+#define THC_BIT_OFFSET_MICROFRAME_SIZE 8
+/* input_report_length is 14 bits for HIDSPI */
+#define THC_BIT_LENGTH_MICROFRAME_SIZE 14
+/* MFS unit in power of 2 */
+#define THC_UNIT_MICROFRAME_SIZE 2
+#define THC_BITMASK_INTERRUPT_TYPE_DATA 1
+#define THC_BITMASK_INVALID_TYPE_DATA 2
+
+/* Interrupt Quiesce default timeout value */
+#define THC_QUIESCE_EN_TIMEOUT_US USEC_PER_SEC /* 1s */
+
+/* LTR definition */
+/*
+ * THC uses scale to calculate final LTR value.
+ * Scale is geometric progression of 2^5 step, starting from 2^0.
+ * For example, THC_LTR_SCALE_2(2) means 2^(5 * 2) = 1024, unit is ns.
+ */
+#define THC_LTR_SCALE_0 0
+#define THC_LTR_SCALE_1 1
+#define THC_LTR_SCALE_2 2
+#define THC_LTR_SCALE_3 3
+#define THC_LTR_SCALE_4 4
+#define THC_LTR_SCALE_5 5
+#define THC_LTR_MODE_ACTIVE 0
+#define THC_LTR_MODE_LP 1
+#define THC_LTR_MIN_VAL_SCALE_3 BIT(10)
+#define THC_LTR_MAX_VAL_SCALE_3 BIT(15)
+#define THC_LTR_MIN_VAL_SCALE_4 BIT(15)
+#define THC_LTR_MAX_VAL_SCALE_4 BIT(20)
+#define THC_LTR_MIN_VAL_SCALE_5 BIT(20)
+#define THC_LTR_MAX_VAL_SCALE_5 BIT(25)
+
+/*
+ * THC PIO opcode default value
+ * @THC_PIO_OP_SPI_TIC_READ: THC opcode for SPI PIO read
+ * @THC_PIO_OP_SPI_TIC_WRITE: THC opcode for SPI PIO write
+ * @THC_PIO_OP_I2C_SUBSYSTEM_READ: THC opcode for read I2C subsystem registers
+ * @THC_PIO_OP_I2C_SUBSYSTEM_WRITE: THC opcode for write I2C subsystem registers
+ * @THC_PIO_OP_I2C_TIC_READ: THC opcode for read I2C device
+ * @THC_PIO_OP_I2C_TIC_WRITE: THC opcode for write I2C device
+ * @THC_PIO_OP_I2C_TIC_WRITE_AND_READ: THC opcode for write followed by read I2C device
+ */
+enum thc_pio_opcode {
+ THC_PIO_OP_SPI_TIC_READ = 0x4,
+ THC_PIO_OP_SPI_TIC_WRITE = 0x6,
+ THC_PIO_OP_I2C_SUBSYSTEM_READ = 0x12,
+ THC_PIO_OP_I2C_SUBSYSTEM_WRITE = 0x13,
+ THC_PIO_OP_I2C_TIC_READ = 0x14,
+ THC_PIO_OP_I2C_TIC_WRITE = 0x18,
+ THC_PIO_OP_I2C_TIC_WRITE_AND_READ = 0x1C,
+};
+
+/**
+ * THC SPI IO mode
+ * @THC_SINGLE_IO: single IO mode, 1(opcode) - 1(address) - 1(data)
+ * @THC_DUAL_IO: dual IO mode, 1(opcode) - 2(address) - 2(data)
+ * @THC_QUAD_IO: quad IO mode, 1(opcode) - 4(address) - 4(data)
+ * @THC_QUAD_PARALLEL_IO: parallel quad IO mode, 4(opcode) - 4(address) - 4(data)
+ */
+enum thc_spi_iomode {
+ THC_SINGLE_IO = 0,
+ THC_DUAL_IO = 1,
+ THC_QUAD_IO = 2,
+ THC_QUAD_PARALLEL_IO = 3,
+};
+
+/**
+ * THC SPI frequency divider
+ *
+ * This DIV final value is determined by THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN bit.
+ * If THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN isn't be set, THC takes the DIV value directly;
+ * If THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN is set, THC takes the DIV value multiply by 8.
+ *
+ * For example, if THC input clock is 125MHz:
+ * When THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN isn't set, THC_SPI_FRQ_DIV_3 means DIV is 3,
+ * THC final clock is 125 / 3 = 41.667MHz;
+ * When THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN is set, THC_SPI_FRQ_DIV_3 means DIV is 3 * 8,
+ * THC final clock is 125 / (3 * 8) = 5.208MHz;
+ */
+enum thc_spi_frq_div {
+ THC_SPI_FRQ_RESERVED = 0,
+ THC_SPI_FRQ_DIV_1 = 1,
+ THC_SPI_FRQ_DIV_2 = 2,
+ THC_SPI_FRQ_DIV_3 = 3,
+ THC_SPI_FRQ_DIV_4 = 4,
+ THC_SPI_FRQ_DIV_5 = 5,
+ THC_SPI_FRQ_DIV_6 = 6,
+ THC_SPI_FRQ_DIV_7 = 7,
+};
+
+/* THC I2C sub-system registers */
+#define THC_I2C_IC_CON_OFFSET 0x0
+#define THC_I2C_IC_TAR_OFFSET 0x4
+#define THC_I2C_IC_SAR_OFFSET 0x8
+#define THC_I2C_IC_HS_MADDR_OFFSET 0xC
+#define THC_I2C_IC_DATA_CMD_OFFSET 0x10
+#define THC_I2C_IC_SS_SCL_HCNT_OFFSET 0x14
+#define THC_I2C_IC_UFM_SCL_HCNT_OFFSET 0x14
+#define THC_I2C_IC_SS_SCL_LCNT_OFFSET 0x18
+#define THC_I2C_IC_UFM_SCL_LCNT_OFFSET 0x18
+#define THC_I2C_IC_FS_SCL_HCNT_OFFSET 0x1C
+#define THC_I2C_IC_UFM_TBUF_CNT_OFFSET 0x1C
+#define THC_I2C_IC_FS_SCL_LCNT_OFFSET 0x20
+#define THC_I2C_IC_HS_SCL_HCNT_OFFSET 0x24
+#define THC_I2C_IC_HS_SCL_LCNT_OFFSET 0x28
+#define THC_I2C_IC_INTR_STAT_OFFSET 0x2C
+#define THC_I2C_IC_INTR_MASK_OFFSET 0x30
+#define THC_I2C_IC_RAW_INTR_STAT_OFFSET 0x34
+#define THC_I2C_IC_RX_TL_OFFSET 0x38
+#define THC_I2C_IC_TX_TL_OFFSET 0x3C
+#define THC_I2C_IC_CLR_INTR_OFFSET 0x40
+#define THC_I2C_IC_CLR_RX_UNDER_OFFSET 0x44
+#define THC_I2C_IC_CLR_RX_OVER_OFFSET 0x48
+#define THC_I2C_IC_CLR_TX_OVER_OFFSET 0x4C
+#define THC_I2C_IC_CLR_RD_REQ_OFFSET 0x50
+#define THC_I2C_IC_CLR_TX_ABRT_OFFSET 0x54
+#define THC_I2C_IC_CLR_RX_DONE_OFFSET 0x58
+#define THC_I2C_IC_CLR_ACTIVITY_OFFSET 0x5C
+#define THC_I2C_IC_CLR_STOP_DET_OFFSET 0x60
+#define THC_I2C_IC_CLR_START_DET_OFFSET 0x64
+#define THC_I2C_IC_CLR_GEN_CALL_OFFSET 0x68
+#define THC_I2C_IC_ENABLE_OFFSET 0x6C
+#define THC_I2C_IC_STATUS_OFFSET 0x70
+#define THC_I2C_IC_TXFLR_OFFSET 0x74
+#define THC_I2C_IC_RXFLR_OFFSET 0x78
+#define THC_I2C_IC_SDA_HOLD_OFFSET 0x7C
+#define THC_I2C_IC_TX_ABRT_SOURCE_OFFSET 0x80
+#define THC_I2C_IC_SLV_DATA_NACK_ONLY_OFFSET 0x84
+#define THC_I2C_IC_DMA_CR_OFFSET 0x88
+#define THC_I2C_IC_DMA_TDLR_OFFSET 0x8C
+#define THC_I2C_IC_DMA_RDLR_OFFSET 0x90
+#define THC_I2C_IC_SDA_SETUP_OFFSET 0x94
+#define THC_I2C_IC_ACK_GENERAL_CALL_OFFSET 0x98
+#define THC_I2C_IC_ENABLE_STATUS_OFFSET 0x9C
+#define THC_I2C_IC_FS_SPKLEN_OFFSET 0xA0
+#define THC_I2C_IC_UFM_SPKLEN_OFFSET 0xA0
+#define THC_I2C_IC_HS_SPKLEN_OFFSET 0xA4
+#define THC_I2C_IC_CLR_RESTART_DET_OFFSET 0xA8
+#define THC_I2C_IC_SCL_STUCK_AT_LOW_TIMEOUT_OFFSET 0xAC
+#define THC_I2C_IC_SDA_STUCK_AT_LOW_TIMEOUT_OFFSET 0xB0
+#define THC_I2C_IC_CLR_SCL_STUCK_DET_OFFSET 0xB4
+#define THC_I2C_IC_DEVICE_ID_OFFSET 0xB8
+#define THC_I2C_IC_SMBUS_CLK_LOW_SEXT_OFFSET 0xBC
+#define THC_I2C_IC_SMBUS_CLK_LOW_MEXT_OFFSET 0xC0
+#define THC_I2C_IC_SMBUS_THIGH_MAX_IDLE_COUNT_OFFSET 0xC4
+#define THC_I2C_IC_SMBUS_INTR_STAT_OFFSET 0xC8
+#define THC_I2C_IC_SMBUS_INTR_MASK_OFFSET 0xCC
+#define THC_I2C_IC_SMBUS_RAW_INTR_STAT_OFFSET 0xD0
+#define THC_I2C_IC_CLR_SMBUS_INTR_OFFSET 0xD4
+#define THC_I2C_IC_OPTIONAL_SAR_OFFSET 0xD8
+#define THC_I2C_IC_SMBUS_UDID_LSB_OFFSET 0xDC
+#define THC_I2C_IC_SMBUS_UDID_WORD0_OFFSET 0xDC
+#define THC_I2C_IC_SMBUS_UDID_WORD1_OFFSET 0xE0
+#define THC_I2C_IC_SMBUS_UDID_WORD2_OFFSET 0xE4
+#define THC_I2C_IC_SMBUS_UDID_WORD3_OFFSET 0xE8
+#define THC_I2C_IC_COMP_PARAM_1_OFFSET 0xF4
+#define THC_I2C_IC_COMP_VERSION_OFFSET 0xF8
+#define THC_I2C_IC_COMP_TYPE_OFFSET 0xFC
+
+/**
+ * THC I2C sub-system supported speed mode
+ */
+enum THC_I2C_SPEED_MODE {
+ THC_I2C_STANDARD = 1,
+ THC_I2C_FAST_AND_PLUS = 2,
+ THC_I2C_HIGH_SPEED = 3,
+};
+
+/* THC I2C sub-system register bits definition */
+#define THC_I2C_IC_ENABLE_ENABLE BIT(0)
+#define THC_I2C_IC_ENABLE_ABORT BIT(1)
+#define THC_I2C_IC_ENABLE_TX_CMD_BLOCK BIT(2)
+#define THC_I2C_IC_ENABLE_SDA_STUCK_RECOVERY_ENABLE BIT(3)
+#define THC_I2C_IC_ENABLE_SMBUS_CLK_RESET BIT(16)
+#define THC_I2C_IC_ENABLE_SMBUS_SUSPEND_EN BIT(17)
+#define THC_I2C_IC_ENABLE_SMBUS_ALERT_EN BIT(18)
+
+#define THC_I2C_IC_CON_MASTER_MODE BIT(0)
+#define THC_I2C_IC_CON_SPEED GENMASK(2, 1)
+#define THC_I2C_IC_CON_IC_10BITADDR_SLAVE BIT(3)
+#define THC_I2C_IC_CON_IC_10BITADDR_MASTER BIT(4)
+#define THC_I2C_IC_CON_IC_RESTART_EN BIT(5)
+#define THC_I2C_IC_CON_IC_SLAVE_DISABLE BIT(6)
+#define THC_I2C_IC_CON_STOP_DET_IFADDRESSED BIT(7)
+#define THC_I2C_IC_CON_TX_EMPTY_CTRL BIT(8)
+#define THC_I2C_IC_CON_RX_FIFO_FULL_HLD_CTRL BIT(9)
+#define THC_I2C_IC_CON_STOP_DET_IF_MASTER_ACTIVE BIT(10)
+#define THC_I2C_IC_CON_BUS_CLEAR_FEATURE_CTRL BIT(11)
+#define THC_I2C_IC_CON_OPTIONAL_SAR_CTRL BIT(16)
+#define THC_I2C_IC_CON_SMBUS_SLAVE_QUICK_EN BIT(17)
+#define THC_I2C_IC_CON_SMBUS_ARP_EN BIT(18)
+#define THC_I2C_IC_CON_SMBUS_PERSISTENT_SLV_ADDR_EN BIT(19)
+
+#define THC_I2C_IC_TAR_IC_TAR GENMASK(9, 0)
+#define THC_I2C_IC_TAR_GC_OR_START BIT(10)
+#define THC_I2C_IC_TAR_SPECIAL BIT(11)
+#define THC_I2C_IC_TAR_IC_10BITADDR_MASTER BIT(12)
+#define THC_I2C_IC_TAR_DEVICE_ID BIT(13)
+#define THC_I2C_IC_TAR_SMBUS_QUICK_CMD BIT(16)
+
+#define THC_I2C_IC_INTR_MASK_M_RX_UNDER BIT(0)
+#define THC_I2C_IC_INTR_MASK_M_RX_OVER BIT(1)
+#define THC_I2C_IC_INTR_MASK_M_RX_FULL BIT(2)
+#define THC_I2C_IC_INTR_MASK_M_TX_OVER BIT(3)
+#define THC_I2C_IC_INTR_MASK_M_TX_EMPTY BIT(4)
+#define THC_I2C_IC_INTR_MASK_M_RD_REQ BIT(5)
+#define THC_I2C_IC_INTR_MASK_M_TX_ABRT BIT(6)
+#define THC_I2C_IC_INTR_MASK_M_RX_DONE BIT(7)
+#define THC_I2C_IC_INTR_MASK_M_ACTIVITY BIT(8)
+#define THC_I2C_IC_INTR_MASK_M_STOP_DET BIT(9)
+#define THC_I2C_IC_INTR_MASK_M_START_DET BIT(10)
+#define THC_I2C_IC_INTR_MASK_M_GEN_CALL BIT(11)
+#define THC_I2C_IC_INTR_MASK_M_RESTART_DET BIT(12)
+#define THC_I2C_IC_INTR_MASK_M_MASTER_ON_HOLD BIT(13)
+#define THC_I2C_IC_INTR_MASK_M_SCL_STUCK_AT_LOW BIT(14)
+
+#define THC_I2C_IC_DMA_CR_RDMAE BIT(0)
+#define THC_I2C_IC_DMA_CR_TDMAE BIT(1)
+
+#endif /* _INTEL_THC_HW_H_ */
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 6f1443999d1d..1deacb4568cb 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -218,6 +218,14 @@ static inline __u32 wacom_s32tou(s32 value, __u8 n)
return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value;
}
+static inline u32 wacom_rescale(u32 value, u32 in_max, u32 out_max)
+{
+ if (in_max == 0 || out_max == 0)
+ return 0;
+ value = clamp(value, 0, in_max);
+ return DIV_ROUND_CLOSEST(value * out_max, in_max);
+}
+
extern const struct hid_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 9843b52bd017..8125383932ec 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1084,6 +1084,17 @@ static ssize_t wacom_luminance_store(struct wacom *wacom, u8 *dest,
mutex_lock(&wacom->lock);
*dest = value & 0x7f;
+ for (unsigned int i = 0; i < wacom->led.count; i++) {
+ struct wacom_group_leds *group = &wacom->led.groups[i];
+
+ for (unsigned int j = 0; j < group->count; j++) {
+ if (dest == &wacom->led.llv)
+ group->leds[j].llv = *dest;
+ else if (dest == &wacom->led.hlv)
+ group->leds[j].hlv = *dest;
+ }
+ }
+
err = wacom_led_control(wacom);
mutex_unlock(&wacom->lock);
@@ -1302,10 +1313,10 @@ enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
struct wacom *wacom = led->wacom;
if (wacom->led.max_hlv)
- return led->hlv * LED_FULL / wacom->led.max_hlv;
+ return wacom_rescale(led->hlv, wacom->led.max_hlv, LED_FULL);
if (wacom->led.max_llv)
- return led->llv * LED_FULL / wacom->led.max_llv;
+ return wacom_rescale(led->llv, wacom->led.max_llv, LED_FULL);
/* device doesn't support brightness tuning */
return LED_FULL;
@@ -1337,8 +1348,8 @@ static int wacom_led_brightness_set(struct led_classdev *cdev,
goto out;
}
- led->llv = wacom->led.llv = wacom->led.max_llv * brightness / LED_FULL;
- led->hlv = wacom->led.hlv = wacom->led.max_hlv * brightness / LED_FULL;
+ led->llv = wacom->led.llv = wacom_rescale(brightness, LED_FULL, wacom->led.max_llv);
+ led->hlv = wacom->led.hlv = wacom_rescale(brightness, LED_FULL, wacom->led.max_hlv);
wacom->led.groups[led->group].select = led->id;
@@ -1370,17 +1381,6 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
if (!name)
return -ENOMEM;
- if (!read_only) {
- led->trigger.name = name;
- error = devm_led_trigger_register(dev, &led->trigger);
- if (error) {
- hid_err(wacom->hdev,
- "failed to register LED trigger %s: %d\n",
- led->cdev.name, error);
- return error;
- }
- }
-
led->group = group;
led->id = id;
led->wacom = wacom;
@@ -1397,6 +1397,19 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
led->cdev.brightness_set = wacom_led_readonly_brightness_set;
}
+ if (!read_only) {
+ led->trigger.name = name;
+ if (id == wacom->led.groups[group].select)
+ led->trigger.brightness = wacom_leds_brightness_get(led);
+ error = devm_led_trigger_register(dev, &led->trigger);
+ if (error) {
+ hid_err(wacom->hdev,
+ "failed to register LED trigger %s: %d\n",
+ led->cdev.name, error);
+ return error;
+ }
+ }
+
error = devm_led_classdev_register(dev, &led->cdev);
if (error) {
hid_err(wacom->hdev,
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 5501a560fb07..b60bfafc6a8f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -4946,6 +4946,10 @@ static const struct wacom_features wacom_features_0x94 =
HID_DEVICE(BUS_I2C, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
.driver_data = (kernel_ulong_t)&wacom_features_##prod
+#define PCI_DEVICE_WACOM(prod) \
+ HID_DEVICE(BUS_PCI, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ .driver_data = (kernel_ulong_t)&wacom_features_##prod
+
#define USB_DEVICE_LENOVO(prod) \
HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \
.driver_data = (kernel_ulong_t)&wacom_features_##prod
@@ -5115,6 +5119,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(HID_ANY_ID) },
{ I2C_DEVICE_WACOM(HID_ANY_ID) },
+ { PCI_DEVICE_WACOM(HID_ANY_ID) },
{ BT_DEVICE_WACOM(HID_ANY_ID) },
{ }
};
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index dd376602f3f1..4cbaba15d86e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,7 +324,7 @@ config SENSORS_K8TEMP
config SENSORS_K10TEMP
tristate "AMD Family 10h+ temperature sensor"
- depends on X86 && PCI && AMD_NB
+ depends on X86 && PCI && AMD_NODE
help
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
@@ -413,7 +413,7 @@ config SENSORS_ASPEED
will be called aspeed_pwm_tacho.
config SENSORS_ASPEED_G6
- tristate "ASPEED g6 PWM and Fan tach driver"
+ tristate "ASPEED G6 PWM and Fan tach driver"
depends on ARCH_ASPEED || COMPILE_TEST
depends on PWM
help
@@ -421,7 +421,7 @@ config SENSORS_ASPEED_G6
controllers.
This driver can also be built as a module. If so, the module
- will be called aspeed_pwm_tacho.
+ will be called aspeed_g6_pwm_tach.
config SENSORS_ATXP1
tristate "Attansic ATXP1 VID controller"
@@ -1412,7 +1412,9 @@ config SENSORS_LM73
config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
depends on I2C
+ depends on I3C || !I3C
select REGMAP_I2C
+ select REGMAP_I3C if I3C
help
If you say yes here you get support for one common type of
temperature sensor chip, with models including:
@@ -1822,6 +1824,18 @@ config SENSORS_PWM_FAN
This driver can also be built as a module. If so, the module
will be called pwm-fan.
+config SENSORS_QNAP_MCU_HWMON
+ tristate "QNAP MCU hardware monitoring"
+ depends on MFD_QNAP_MCU
+ depends on THERMAL || THERMAL=n
+ help
+ Say yes here to enable support for fan and temperature sensor
+ connected to a QNAP MCU, as found in a number of QNAP network
+ attached storage devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called qnap-mcu-hwmon.
+
config SENSORS_RASPBERRYPI_HWMON
tristate "Raspberry Pi voltage monitor"
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b827b92f2a78..b7ef0f0562d3 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -189,6 +189,7 @@ obj-$(CONFIG_SENSORS_POWERZ) += powerz.o
obj-$(CONFIG_SENSORS_POWR1220) += powr1220.o
obj-$(CONFIG_SENSORS_PT5161L) += pt5161l.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
+obj-$(CONFIG_SENSORS_QNAP_MCU_HWMON) += qnap-mcu-hwmon.o
obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
obj-$(CONFIG_SENSORS_SBTSI) += sbtsi_temp.o
obj-$(CONFIG_SENSORS_SBRMI) += sbrmi.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 2f1c9d97ad21..44afb07409a4 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -84,6 +84,7 @@ struct acpi_power_meter_resource {
u64 power;
u64 cap;
u64 avg_interval;
+ bool power_alarm;
int sensors_valid;
unsigned long sensors_last_updated;
struct sensor_device_attribute sensors[NUM_SENSORS];
@@ -292,8 +293,8 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ unsigned long temp, trip_bk;
int res;
- unsigned long temp;
res = kstrtoul(buf, 10, &temp);
if (res)
@@ -301,13 +302,15 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
temp = DIV_ROUND_CLOSEST(temp, 1000);
- mutex_lock(&resource->lock);
+ guard(mutex)(&resource->lock);
+
+ trip_bk = resource->trip[attr->index - 7];
resource->trip[attr->index - 7] = temp;
res = set_acpi_trip(resource);
- mutex_unlock(&resource->lock);
-
- if (res)
+ if (res) {
+ resource->trip[attr->index - 7] = trip_bk;
return res;
+ }
return count;
}
@@ -396,6 +399,9 @@ static ssize_t show_val(struct device *dev,
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
u64 val = 0;
+ int ret;
+
+ guard(mutex)(&resource->lock);
switch (attr->index) {
case 0:
@@ -423,10 +429,17 @@ static ssize_t show_val(struct device *dev,
val = 0;
break;
case 6:
- if (resource->power > resource->cap)
- val = 1;
- else
- val = 0;
+ ret = update_meter(resource);
+ if (ret)
+ return ret;
+ /* need to update cap if not to support the notification. */
+ if (!(resource->caps.flags & POWER_METER_CAN_NOTIFY)) {
+ ret = update_cap(resource);
+ if (ret)
+ return ret;
+ }
+ val = resource->power_alarm || resource->power > resource->cap;
+ resource->power_alarm = resource->power > resource->cap;
break;
case 7:
case 8:
@@ -682,7 +695,7 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
/* _PMD method is optional. */
res = read_domain_devices(resource);
- if (res != -ENODEV)
+ if (res && res != -ENODEV)
return res;
if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
@@ -847,12 +860,20 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
break;
case METER_NOTIFY_CAP:
+ mutex_lock(&resource->lock);
+ res = update_cap(resource);
+ if (res)
+ dev_err_once(&device->dev, "update cap failed when capping value is changed.\n");
+ mutex_unlock(&resource->lock);
sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME);
break;
case METER_NOTIFY_INTERVAL:
sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME);
break;
case METER_NOTIFY_CAPPING:
+ mutex_lock(&resource->lock);
+ resource->power_alarm = true;
+ mutex_unlock(&resource->lock);
sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME);
dev_info(&device->dev, "Capping in progress.\n");
break;
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 9555366aeaf0..43e54dc513da 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -250,6 +250,8 @@ static const struct ec_sensor_info sensors_family_amd_600[] = {
EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
};
static const struct ec_sensor_info sensors_family_intel_300[] = {
@@ -477,6 +479,15 @@ static const struct ec_board_info board_info_zenith_ii_extreme = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
{ \
.matches = { \
@@ -538,6 +549,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_zenith_ii_extreme),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME ALPHA",
&board_info_zenith_ii_extreme),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("TUF GAMING X670E-PLUS",
+ &board_info_tuf_gaming_x670e_plus),
{},
};
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 1dc7e24fe4c5..c80350e499e9 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -17,6 +17,7 @@
#include <linux/jiffies.h>
#include <linux/err.h>
#include <linux/acpi.h>
+#include <linux/string_choices.h>
#define ATK_HID "ATK0110"
@@ -441,7 +442,7 @@ static void atk_print_sensor(struct atk_data *data, union acpi_object *obj)
flags->integer.value,
name->string.pointer,
limit1->integer.value, limit2->integer.value,
- enable->integer.value ? "enabled" : "disabled");
+ str_enabled_disabled(enable->integer.value));
#endif
}
@@ -1074,8 +1075,7 @@ static int atk_ec_enabled(struct atk_data *data)
err = -EIO;
} else {
err = (buf->value != 0);
- dev_dbg(dev, "EC is %sabled\n",
- err ? "en" : "dis");
+ dev_dbg(dev, "EC is %s\n", str_enabled_disabled(err));
}
ACPI_FREE(obj);
@@ -1096,18 +1096,15 @@ static int atk_ec_ctl(struct atk_data *data, int enable)
obj = atk_sitm(data, &sitm);
if (IS_ERR(obj)) {
- dev_err(dev, "Failed to %sable the EC\n",
- enable ? "en" : "dis");
+ dev_err(dev, "Failed to %s the EC\n", str_enable_disable(enable));
return PTR_ERR(obj);
}
ec_ret = (struct atk_acpi_ret_buffer *)obj->buffer.pointer;
if (ec_ret->flags == 0) {
- dev_err(dev, "Failed to %sable the EC\n",
- enable ? "en" : "dis");
+ dev_err(dev, "Failed to %s the EC\n", str_enable_disable(enable));
err = -EIO;
} else {
- dev_info(dev, "EC %sabled\n",
- enable ? "en" : "dis");
+ dev_info(dev, "EC %s\n", str_enabled_disabled(enable));
}
ACPI_FREE(obj);
diff --git a/drivers/hwmon/chipcap2.c b/drivers/hwmon/chipcap2.c
index edf454474f11..9d071f7ca9d2 100644
--- a/drivers/hwmon/chipcap2.c
+++ b/drivers/hwmon/chipcap2.c
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
@@ -556,55 +557,40 @@ static int cc2_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct cc2_data *data = dev_get_drvdata(dev);
- int ret = 0;
- mutex_lock(&data->dev_access_lock);
+ guard(mutex)(&data->dev_access_lock);
switch (type) {
case hwmon_temp:
- ret = cc2_measurement(data, type, val);
- break;
+ return cc2_measurement(data, type, val);
case hwmon_humidity:
switch (attr) {
case hwmon_humidity_input:
- ret = cc2_measurement(data, type, val);
- break;
+ return cc2_measurement(data, type, val);
case hwmon_humidity_min:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_L_ON, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_L_ON, val);
case hwmon_humidity_min_hyst:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_L_OFF, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_L_OFF, val);
case hwmon_humidity_max:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_H_ON, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_H_ON, val);
case hwmon_humidity_max_hyst:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_H_OFF, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_H_OFF, val);
case hwmon_humidity_min_alarm:
- ret = cc2_humidity_min_alarm_status(data, val);
- break;
+ return cc2_humidity_min_alarm_status(data, val);
case hwmon_humidity_max_alarm:
- ret = cc2_humidity_max_alarm_status(data, val);
- break;
+ return cc2_humidity_max_alarm_status(data, val);
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
-
- mutex_unlock(&data->dev_access_lock);
-
- return ret;
}
static int cc2_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long val)
{
struct cc2_data *data = dev_get_drvdata(dev);
- int ret;
u16 arg;
u8 cmd;
@@ -614,41 +600,28 @@ static int cc2_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (val < 0 || val > CC2_RH_MAX)
return -EINVAL;
- mutex_lock(&data->dev_access_lock);
+ guard(mutex)(&data->dev_access_lock);
switch (attr) {
case hwmon_humidity_min:
cmd = CC2_W_ALARM_L_ON;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
case hwmon_humidity_min_hyst:
cmd = CC2_W_ALARM_L_OFF;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
case hwmon_humidity_max:
cmd = CC2_W_ALARM_H_ON;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
case hwmon_humidity_max_hyst:
cmd = CC2_W_ALARM_H_OFF;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
default:
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
-
- mutex_unlock(&data->dev_access_lock);
-
- return ret;
}
static int cc2_request_ready_irq(struct cc2_data *data, struct device *dev)
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index f5bdf842040e..cd00adaad1b4 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1545,6 +1545,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
},
{
+ .ident = "Dell XPS 13 9370",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 13 9370"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+ },
+ {
.ident = "Dell Optiplex 7000",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 2a4ec55ddb47..291d91f68646 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -194,7 +194,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
scsi_cmd[14] = ata_command;
err = scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
- ATA_SECT_SIZE, HZ, 5, NULL);
+ ATA_SECT_SIZE, 10 * HZ, 5, NULL);
if (err > 0)
err = -EIO;
return err;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index bbb9cc44e29f..b7c0b1e3c23b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -158,11 +158,6 @@ static umode_t hwmon_is_visible(const struct hwmon_ops *ops,
/* Thermal zone handling */
-/*
- * The complex conditional is necessary to avoid a cyclic dependency
- * between hwmon and thermal_sys modules.
- */
-#ifdef CONFIG_THERMAL_OF
static int hwmon_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct hwmon_thermal_data *tdata = thermal_zone_device_priv(tz);
@@ -268,6 +263,9 @@ static int hwmon_thermal_register_sensors(struct device *dev)
void *drvdata = dev_get_drvdata(dev);
int i;
+ if (!IS_ENABLED(CONFIG_THERMAL_OF))
+ return 0;
+
for (i = 1; info[i]; i++) {
int j;
@@ -296,6 +294,9 @@ static void hwmon_thermal_notify(struct device *dev, int index)
struct hwmon_device *hwdev = to_hwmon_device(dev);
struct hwmon_thermal_data *tzdata;
+ if (!IS_ENABLED(CONFIG_THERMAL_OF))
+ return;
+
list_for_each_entry(tzdata, &hwdev->tzdata, node) {
if (tzdata->index == index) {
thermal_zone_device_update(tzdata->tzd,
@@ -304,16 +305,6 @@ static void hwmon_thermal_notify(struct device *dev, int index)
}
}
-#else
-static int hwmon_thermal_register_sensors(struct device *dev)
-{
- return 0;
-}
-
-static void hwmon_thermal_notify(struct device *dev, int index) { }
-
-#endif /* IS_REACHABLE(CONFIG_THERMAL) && ... */
-
static int hwmon_attr_base(enum hwmon_sensor_types type)
{
if (type == hwmon_in || type == hwmon_intrusion)
@@ -1179,6 +1170,12 @@ devm_hwmon_device_register_with_info(struct device *dev, const char *name,
if (!dev)
return ERR_PTR(-EINVAL);
+ if (!name) {
+ name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(name))
+ return ERR_CAST(name);
+ }
+
ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/hwmon/isl28022.c b/drivers/hwmon/isl28022.c
index f9edcfd164c2..3f9b4520b53e 100644
--- a/drivers/hwmon/isl28022.c
+++ b/drivers/hwmon/isl28022.c
@@ -486,7 +486,7 @@ static int isl28022_probe(struct i2c_client *client)
}
static const struct i2c_device_id isl28022_ids[] = {
- { "isl28022", 0},
+ { "isl28022" },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, isl28022_ids);
@@ -506,8 +506,7 @@ static struct i2c_driver isl28022_driver = {
.id_table = isl28022_ids,
};
-static int __init
-isl28022_init(void)
+static int __init isl28022_init(void)
{
int err;
@@ -519,15 +518,13 @@ isl28022_init(void)
debugfs_remove_recursive(isl28022_debugfs_root);
return err;
}
+module_init(isl28022_init);
-static void __exit
-isl28022_exit(void)
+static void __exit isl28022_exit(void)
{
i2c_del_driver(&isl28022_driver);
debugfs_remove_recursive(isl28022_debugfs_root);
}
-
-module_init(isl28022_init);
module_exit(isl28022_exit);
MODULE_AUTHOR("Carsten Spieß <mail@carsten-spiess.de>");
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 7dc19c5d62ac..d0b4cc9a5011 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <asm/amd_nb.h>
+#include <asm/amd_node.h>
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
@@ -150,6 +150,11 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
+static u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) - AMD_NODE0_PCI_SLOT;
+}
+
static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
{
if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 2c2205aec7d4..d95a3c6c245c 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
+#include <linux/i3c/device.h>
#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/of.h>
@@ -38,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
max6626,
max31725,
mcp980x,
+ p3t1755,
pct2075,
stds75,
stlm75,
@@ -104,17 +106,15 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
#define LM75_REG_MAX 0x03
#define PCT2075_REG_IDLE 0x04
-/* Each client has this additional data */
struct lm75_data {
- struct i2c_client *client;
struct regmap *regmap;
- struct regulator *vs;
u16 orig_conf;
- u16 current_conf;
u8 resolution; /* In bits, 9 to 16 */
unsigned int sample_time; /* In ms */
enum lm75_type kind;
const struct lm75_params *params;
+ u8 reg_buf[1];
+ u8 val_buf[3];
};
/*-----------------------------------------------------------------------*/
@@ -222,6 +222,13 @@ static const struct lm75_params device_params[] = {
.default_resolution = 9,
.default_sample_time = MSEC_PER_SEC / 18,
},
+ [p3t1755] = {
+ .clr_mask = 1 << 1 | 1 << 7, /* disable SMBAlert and one-shot */
+ .default_resolution = 12,
+ .default_sample_time = 55,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ },
[pct2075] = {
.default_resolution = 11,
.default_sample_time = MSEC_PER_SEC / 10,
@@ -276,6 +283,7 @@ static const struct lm75_params device_params[] = {
.default_sample_time = 125,
.num_sample_times = 4,
.sample_times = (unsigned int []){ 125, 250, 1000, 4000 },
+ .alarm = true,
},
[tmp175] = {
.set_mask = 3 << 5, /* 12-bit mode */
@@ -332,41 +340,11 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
}
-static int lm75_write_config(struct lm75_data *data, u16 set_mask,
- u16 clr_mask)
-{
- unsigned int value;
-
- clr_mask |= LM75_SHUTDOWN << (8 * data->params->config_reg_16bits);
- value = data->current_conf & ~clr_mask;
- value |= set_mask;
-
- if (data->current_conf != value) {
- s32 err;
- if (data->params->config_reg_16bits)
- err = regmap_write(data->regmap, LM75_REG_CONF, value);
- else
- err = i2c_smbus_write_byte_data(data->client,
- LM75_REG_CONF,
- value);
- if (err)
- return err;
- data->current_conf = value;
- }
- return 0;
-}
-
-static int lm75_read_config(struct lm75_data *data)
+static inline int lm75_write_config(struct lm75_data *data, u16 set_mask,
+ u16 clr_mask)
{
- int ret;
- unsigned int status;
-
- if (data->params->config_reg_16bits) {
- ret = regmap_read(data->regmap, LM75_REG_CONF, &status);
- return ret ? ret : status;
- }
-
- return i2c_smbus_read_byte_data(data->client, LM75_REG_CONF);
+ return regmap_update_bits(data->regmap, LM75_REG_CONF,
+ clr_mask | LM75_SHUTDOWN, set_mask);
}
static irqreturn_t lm75_alarm_handler(int irq, void *private)
@@ -418,7 +396,8 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
if (attr == hwmon_temp_alarm) {
switch (data->kind) {
case as6200:
- *val = (regval >> 5) & 0x1;
+ case tmp112:
+ *val = (regval >> 13) & 0x1;
break;
default:
return -EINVAL;
@@ -469,7 +448,6 @@ static int lm75_write_temp(struct device *dev, u32 attr, long temp)
static int lm75_update_interval(struct device *dev, long val)
{
struct lm75_data *data = dev_get_drvdata(dev);
- unsigned int reg;
u8 index;
s32 err;
@@ -489,19 +467,14 @@ static int lm75_update_interval(struct device *dev, long val)
break;
case tmp112:
case as6200:
- err = regmap_read(data->regmap, LM75_REG_CONF, &reg);
- if (err < 0)
- return err;
- reg &= ~0x00c0;
- reg |= (3 - index) << 6;
- err = regmap_write(data->regmap, LM75_REG_CONF, reg);
+ err = regmap_update_bits(data->regmap, LM75_REG_CONF,
+ 0xc000, (3 - index) << 14);
if (err < 0)
return err;
data->sample_time = data->params->sample_times[index];
break;
case pct2075:
- err = i2c_smbus_write_byte_data(data->client, PCT2075_REG_IDLE,
- index + 1);
+ err = regmap_write(data->regmap, PCT2075_REG_IDLE, index + 1);
if (err)
return err;
data->sample_time = data->params->sample_times[index];
@@ -598,6 +571,115 @@ static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg)
return reg == LM75_REG_TEMP || reg == LM75_REG_CONF;
}
+static int lm75_i2c_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct i2c_client *client = context;
+ struct lm75_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ if (reg == LM75_REG_CONF) {
+ if (!data->params->config_reg_16bits)
+ ret = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
+ else
+ ret = i2c_smbus_read_word_data(client, LM75_REG_CONF);
+ } else {
+ ret = i2c_smbus_read_word_swapped(client, reg);
+ }
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+}
+
+static int lm75_i2c_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i2c_client *client = context;
+ struct lm75_data *data = i2c_get_clientdata(client);
+
+ if (reg == PCT2075_REG_IDLE ||
+ (reg == LM75_REG_CONF && !data->params->config_reg_16bits))
+ return i2c_smbus_write_byte_data(client, reg, val);
+ else if (reg == LM75_REG_CONF)
+ return i2c_smbus_write_word_data(client, reg, val);
+ return i2c_smbus_write_word_swapped(client, reg, val);
+}
+
+static const struct regmap_bus lm75_i2c_regmap_bus = {
+ .reg_read = lm75_i2c_reg_read,
+ .reg_write = lm75_i2c_reg_write,
+};
+
+static int lm75_i3c_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct i3c_device *i3cdev = context;
+ struct lm75_data *data = i3cdev_get_drvdata(i3cdev);
+ struct i3c_priv_xfer xfers[] = {
+ {
+ .rnw = false,
+ .len = 1,
+ .data.out = data->reg_buf,
+ },
+ {
+ .rnw = true,
+ .len = 2,
+ .data.out = data->val_buf,
+ },
+ };
+ int ret;
+
+ data->reg_buf[0] = reg;
+
+ if (reg == LM75_REG_CONF && !data->params->config_reg_16bits)
+ xfers[1].len--;
+
+ ret = i3c_device_do_priv_xfers(i3cdev, xfers, 2);
+ if (ret < 0)
+ return ret;
+
+ if (reg == LM75_REG_CONF && !data->params->config_reg_16bits)
+ *val = data->val_buf[0];
+ else if (reg == LM75_REG_CONF)
+ *val = data->val_buf[0] | (data->val_buf[1] << 8);
+ else
+ *val = data->val_buf[1] | (data->val_buf[0] << 8);
+
+ return 0;
+}
+
+static int lm75_i3c_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i3c_device *i3cdev = context;
+ struct lm75_data *data = i3cdev_get_drvdata(i3cdev);
+ struct i3c_priv_xfer xfers[] = {
+ {
+ .rnw = false,
+ .len = 3,
+ .data.out = data->val_buf,
+ },
+ };
+
+ data->val_buf[0] = reg;
+
+ if (reg == PCT2075_REG_IDLE ||
+ (reg == LM75_REG_CONF && !data->params->config_reg_16bits)) {
+ xfers[0].len--;
+ data->val_buf[1] = val & 0xff;
+ } else if (reg == LM75_REG_CONF) {
+ data->val_buf[1] = val & 0xff;
+ data->val_buf[2] = (val >> 8) & 0xff;
+ } else {
+ data->val_buf[1] = (val >> 8) & 0xff;
+ data->val_buf[2] = val & 0xff;
+ }
+
+ return i3c_device_do_priv_xfers(i3cdev, xfers, 1);
+}
+
+static const struct regmap_bus lm75_i3c_regmap_bus = {
+ .reg_read = lm75_i3c_reg_read,
+ .reg_write = lm75_i3c_reg_write,
+};
+
static const struct regmap_config lm75_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
@@ -610,46 +692,33 @@ static const struct regmap_config lm75_regmap_config = {
.use_single_write = true,
};
-static void lm75_disable_regulator(void *data)
-{
- struct lm75_data *lm75 = data;
-
- regulator_disable(lm75->vs);
-}
-
static void lm75_remove(void *data)
{
struct lm75_data *lm75 = data;
- struct i2c_client *client = lm75->client;
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf);
+ regmap_write(lm75->regmap, LM75_REG_CONF, lm75->orig_conf);
}
-static int lm75_probe(struct i2c_client *client)
+static int lm75_generic_probe(struct device *dev, const char *name,
+ enum lm75_type kind, int irq, struct regmap *regmap)
{
- struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm75_data *data;
int status, err;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
- return -EIO;
-
data = devm_kzalloc(dev, sizeof(struct lm75_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->client = client;
- data->kind = (uintptr_t)i2c_get_match_data(client);
+ /* needed by custom regmap callbacks */
+ dev_set_drvdata(dev, data);
- data->vs = devm_regulator_get(dev, "vs");
- if (IS_ERR(data->vs))
- return PTR_ERR(data->vs);
+ data->kind = kind;
+ data->regmap = regmap;
- data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
- if (IS_ERR(data->regmap))
- return PTR_ERR(data->regmap);
+ err = devm_regulator_get_enable(dev, "vs");
+ if (err)
+ return err;
/* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
* Then tweak to be more precise when appropriate.
@@ -661,25 +730,11 @@ static int lm75_probe(struct i2c_client *client)
data->sample_time = data->params->default_sample_time;
data->resolution = data->params->default_resolution;
- /* Enable the power */
- err = regulator_enable(data->vs);
- if (err) {
- dev_err(dev, "failed to enable regulator: %d\n", err);
- return err;
- }
-
- err = devm_add_action_or_reset(dev, lm75_disable_regulator, data);
+ /* Cache original configuration */
+ err = regmap_read(data->regmap, LM75_REG_CONF, &status);
if (err)
return err;
-
- /* Cache original configuration */
- status = lm75_read_config(data);
- if (status < 0) {
- dev_dbg(dev, "Can't read config? %d\n", status);
- return status;
- }
data->orig_conf = status;
- data->current_conf = status;
err = lm75_write_config(data, data->params->set_mask,
data->params->clr_mask);
@@ -690,20 +745,19 @@ static int lm75_probe(struct i2c_client *client)
if (err)
return err;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
- data, &lm75_chip_info,
- NULL);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, name, data,
+ &lm75_chip_info, NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- if (client->irq) {
+ if (irq) {
if (data->params->alarm) {
err = devm_request_threaded_irq(dev,
- client->irq,
+ irq,
NULL,
&lm75_alarm_handler,
IRQF_ONESHOT,
- client->name,
+ name,
hwmon_dev);
if (err)
return err;
@@ -713,12 +767,29 @@ static int lm75_probe(struct i2c_client *client)
}
}
- dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), client->name);
+ dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), name);
return 0;
}
-static const struct i2c_device_id lm75_ids[] = {
+static int lm75_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+ return -EOPNOTSUPP;
+
+ regmap = devm_regmap_init(dev, &lm75_i2c_regmap_bus, client, &lm75_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return lm75_generic_probe(dev, client->name, (uintptr_t)i2c_get_match_data(client),
+ client->irq, regmap);
+}
+
+static const struct i2c_device_id lm75_i2c_ids[] = {
{ "adt75", adt75, },
{ "as6200", as6200, },
{ "at30ts74", at30ts74, },
@@ -734,6 +805,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "max31725", max31725, },
{ "max31726", max31725, },
{ "mcp980x", mcp980x, },
+ { "p3t1755", p3t1755, },
{ "pct2075", pct2075, },
{ "stds75", stds75, },
{ "stlm75", stlm75, },
@@ -750,7 +822,38 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tmp1075", tmp1075, },
{ /* LIST END */ }
};
-MODULE_DEVICE_TABLE(i2c, lm75_ids);
+MODULE_DEVICE_TABLE(i2c, lm75_i2c_ids);
+
+struct lm75_i3c_device {
+ enum lm75_type type;
+ const char *name;
+};
+
+static const struct lm75_i3c_device lm75_i3c_p3t1755 = {
+ .name = "p3t1755",
+ .type = p3t1755,
+};
+
+static const struct i3c_device_id lm75_i3c_ids[] = {
+ I3C_DEVICE(0x011b, 0x152a, &lm75_i3c_p3t1755),
+ { /* LIST END */ }
+};
+MODULE_DEVICE_TABLE(i3c, lm75_i3c_ids);
+
+static int lm75_i3c_probe(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+ const struct lm75_i3c_device *id_data;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init(dev, &lm75_i3c_regmap_bus, i3cdev, &lm75_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ id_data = i3c_device_match_id(i3cdev, lm75_i3c_ids)->data;
+
+ return lm75_generic_probe(dev, id_data->name, id_data->type, 0, regmap);
+}
static const struct of_device_id __maybe_unused lm75_of_match[] = {
{
@@ -814,6 +917,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
.data = (void *)mcp980x
},
{
+ .compatible = "nxp,p3t1755",
+ .data = (void *)p3t1755
+ },
+ {
.compatible = "nxp,pct2075",
.data = (void *)pct2075
},
@@ -972,32 +1079,16 @@ static int lm75_detect(struct i2c_client *new_client,
#ifdef CONFIG_PM
static int lm75_suspend(struct device *dev)
{
- int status;
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
- status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
- if (status < 0) {
- dev_dbg(&client->dev, "Can't read config? %d\n", status);
- return status;
- }
- status = status | LM75_SHUTDOWN;
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
- return 0;
+ return regmap_update_bits(data->regmap, LM75_REG_CONF, LM75_SHUTDOWN, LM75_SHUTDOWN);
}
static int lm75_resume(struct device *dev)
{
- int status;
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
- status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
- if (status < 0) {
- dev_dbg(&client->dev, "Can't read config? %d\n", status);
- return status;
- }
- status = status & ~LM75_SHUTDOWN;
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
- return 0;
+ return regmap_update_bits(data->regmap, LM75_REG_CONF, LM75_SHUTDOWN, 0);
}
static const struct dev_pm_ops lm75_dev_pm_ops = {
@@ -1009,20 +1100,28 @@ static const struct dev_pm_ops lm75_dev_pm_ops = {
#define LM75_DEV_PM_OPS NULL
#endif /* CONFIG_PM */
-static struct i2c_driver lm75_driver = {
+static struct i2c_driver lm75_i2c_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm75",
.of_match_table = of_match_ptr(lm75_of_match),
.pm = LM75_DEV_PM_OPS,
},
- .probe = lm75_probe,
- .id_table = lm75_ids,
+ .probe = lm75_i2c_probe,
+ .id_table = lm75_i2c_ids,
.detect = lm75_detect,
.address_list = normal_i2c,
};
-module_i2c_driver(lm75_driver);
+static struct i3c_driver lm75_i3c_driver = {
+ .driver = {
+ .name = "lm75_i3c",
+ },
+ .probe = lm75_i3c_probe,
+ .id_table = lm75_i3c_ids,
+};
+
+module_i3c_i2c_driver(lm75_i3c_driver, &lm75_i2c_driver)
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
MODULE_DESCRIPTION("LM75 driver");
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index 7ca139e4b6af..6d5d4cb846da 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -125,7 +125,7 @@ static int ltc2991_get_curr(struct ltc2991_state *st, u32 reg, int channel,
/* Vx-Vy, 19.075uV/LSB */
*val = DIV_ROUND_CLOSEST(sign_extend32(reg_val, 14) * 19075,
- st->r_sense_uohm[channel]);
+ (s32)st->r_sense_uohm[channel]);
return 0;
}
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index f71615e06a8f..416ac02e9f74 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -175,9 +175,11 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_MSI 0x201
#define NCT6683_CUSTOMER_ID_MSI2 0x200
#define NCT6683_CUSTOMER_ID_MSI3 0x207
+#define NCT6683_CUSTOMER_ID_MSI4 0x20d
#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
#define NCT6683_CUSTOMER_ID_ASROCK2 0xe1b
#define NCT6683_CUSTOMER_ID_ASROCK3 0x1631
+#define NCT6683_CUSTOMER_ID_ASROCK4 0x163e
#define NCT6683_REG_BUILD_YEAR 0x604
#define NCT6683_REG_BUILD_MONTH 0x605
@@ -1227,12 +1229,16 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_MSI3:
break;
+ case NCT6683_CUSTOMER_ID_MSI4:
+ break;
case NCT6683_CUSTOMER_ID_ASROCK:
break;
case NCT6683_CUSTOMER_ID_ASROCK2:
break;
case NCT6683_CUSTOMER_ID_ASROCK3:
break;
+ case NCT6683_CUSTOMER_ID_ASROCK4:
+ break;
default:
if (!force)
return -ENODEV;
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index c243b51837d2..fa3351351825 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -42,6 +42,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#undef DEFAULT_SYMBOL_NAMESPACE
+#define DEFAULT_SYMBOL_NAMESPACE "HWMON_NCT6775"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -56,9 +59,6 @@
#include "lm75.h"
#include "nct6775.h"
-#undef DEFAULT_SYMBOL_NAMESPACE
-#define DEFAULT_SYMBOL_NAMESPACE "HWMON_NCT6775"
-
#define USE_ALTERNATE
/* used to set data->name = nct6775_device_names[data->sio_kind] */
diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
index 89761a9c8892..1e3749dfa598 100644
--- a/drivers/hwmon/occ/p9_sbe.c
+++ b/drivers/hwmon/occ/p9_sbe.c
@@ -30,7 +30,7 @@ struct p9_sbe_occ {
#define to_p9_sbe_occ(x) container_of((x), struct p9_sbe_occ, occ)
static ssize_t ffdc_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *battr, char *buf, loff_t pos,
+ const struct bin_attribute *battr, char *buf, loff_t pos,
size_t count)
{
ssize_t rc = 0;
@@ -48,7 +48,7 @@ static ssize_t ffdc_read(struct file *filp, struct kobject *kobj,
return rc;
}
-static BIN_ATTR_RO(ffdc, OCC_MAX_RESP_WORDS * 4);
+static const BIN_ATTR_RO(ffdc, OCC_MAX_RESP_WORDS * 4);
static bool p9_sbe_occ_save_ffdc(struct p9_sbe_occ *ctx, const void *resp,
size_t resp_len)
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index f6d352841953..419469f40ba0 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -51,7 +51,7 @@ config SENSORS_ADM1275
tristate "Analog Devices ADM1275 and compatibles"
help
If you say yes here you get hardware monitoring support for Analog
- Devices ADM1075, ADM1272, ADM1275, ADM1276, ADM1278, ADM1281,
+ Devices ADM1075, ADM1272, ADM1273, ADM1275, ADM1276, ADM1278, ADM1281,
ADM1293, and ADM1294 Hot-Swap Controller and Digital Power Monitors.
This driver can also be built as a module. If so, the module will
@@ -85,6 +85,15 @@ config SENSORS_BPA_RS600
This driver can also be built as a module. If so, the module will
be called bpa-rs600.
+config SENSORS_CRPS
+ tristate "Intel Common Redundant Power Supply"
+ help
+ If you say yes here you get hardware monitoring support for the Intel
+ Common Redundant Power Supply.
+
+ This driver can also be built as a module. If so, the module will
+ be called crps.
+
config SENSORS_DELTA_AHE50DC_FAN
tristate "Delta AHE-50DC fan control module"
help
@@ -251,7 +260,7 @@ config SENSORS_MAX15301
tristate "Maxim MAX15301"
help
If you say yes here you get hardware monitoring support for Maxim
- MAX15301, as well as for Flex BMR461.
+ MAX15301, MAX15303, as well as for Flex BMR461.
This driver can also be built as a module. If so, the module will
be called max15301.
@@ -510,6 +519,23 @@ config SENSORS_TDA38640_REGULATOR
If you say yes here you get regulator support for Infineon
TDA38640 as regulator.
+config SENSORS_TPS25990
+ tristate "TI TPS25990"
+ help
+ If you say yes here you get hardware monitoring support for TI
+ TPS25990.
+
+ This driver can also be built as a module. If so, the module will
+ be called tps25990.
+
+config SENSORS_TPS25990_REGULATOR
+ bool "Regulator support for TPS25990 and compatibles"
+ depends on SENSORS_TPS25990 && REGULATOR
+ default SENSORS_TPS25990
+ help
+ If you say yes here you get regulator support for Texas Instruments
+ TPS25990.
+
config SENSORS_TPS40422
tristate "TI TPS40422"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index d00bcc758b97..c7eb7739b7f8 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
obj-$(CONFIG_SENSORS_STPDDC60) += stpddc60.o
obj-$(CONFIG_SENSORS_TDA38640) += tda38640.o
+obj-$(CONFIG_SENSORS_TPS25990) += tps25990.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_TPS546D24) += tps546d24.o
@@ -61,3 +62,4 @@ obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_XDPE152) += xdpe152c4.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
obj-$(CONFIG_SENSORS_PIM4328) += pim4328.o
+obj-$(CONFIG_SENSORS_CRPS) += crps.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 127593e10a03..7d175baa5de2 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -18,7 +18,7 @@
#include <linux/log2.h>
#include "pmbus.h"
-enum chips { adm1075, adm1272, adm1275, adm1276, adm1278, adm1281, adm1293, adm1294 };
+enum chips { adm1075, adm1272, adm1273, adm1275, adm1276, adm1278, adm1281, adm1293, adm1294 };
#define ADM1275_MFR_STATUS_IOUT_WARN2 BIT(0)
#define ADM1293_MFR_STATUS_VAUX_UV_WARN BIT(5)
@@ -479,6 +479,7 @@ static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
static const struct i2c_device_id adm1275_id[] = {
{ "adm1075", adm1075 },
{ "adm1272", adm1272 },
+ { "adm1273", adm1273 },
{ "adm1275", adm1275 },
{ "adm1276", adm1276 },
{ "adm1278", adm1278 },
@@ -555,9 +556,9 @@ static int adm1275_probe(struct i2c_client *client)
"Device mismatch: Configured %s, detected %s\n",
client->name, mid->name);
- if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
- mid->driver_data == adm1281 || mid->driver_data == adm1293 ||
- mid->driver_data == adm1294)
+ if (mid->driver_data == adm1272 || mid->driver_data == adm1273 ||
+ mid->driver_data == adm1278 || mid->driver_data == adm1281 ||
+ mid->driver_data == adm1293 || mid->driver_data == adm1294)
config_read_fn = i2c_smbus_read_word_data;
else
config_read_fn = i2c_smbus_read_byte_data;
@@ -630,6 +631,7 @@ static int adm1275_probe(struct i2c_client *client)
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
case adm1272:
+ case adm1273:
data->have_vout = true;
data->have_pin_max = true;
data->have_temp_max = true;
diff --git a/drivers/hwmon/pmbus/crps.c b/drivers/hwmon/pmbus/crps.c
new file mode 100644
index 000000000000..164b33fed312
--- /dev/null
+++ b/drivers/hwmon/pmbus/crps.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2024 IBM Corp.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/pmbus.h>
+
+#include "pmbus.h"
+
+static const struct i2c_device_id crps_id[] = {
+ { "intel_crps185" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, crps_id);
+
+static struct pmbus_driver_info crps_info = {
+ .pages = 1,
+ /* PSU uses default linear data format. */
+ .func[0] = PMBUS_HAVE_PIN | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
+};
+
+static int crps_probe(struct i2c_client *client)
+{
+ int rc;
+ struct device *dev = &client->dev;
+ char buf[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
+
+ rc = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (rc < 0)
+ return dev_err_probe(dev, rc, "Failed to read PMBUS_MFR_MODEL\n");
+
+ if (rc != 7 || strncmp(buf, "03NK260", 7)) {
+ buf[rc] = '\0';
+ return dev_err_probe(dev, -ENODEV, "Model '%s' not supported\n", buf);
+ }
+
+ rc = pmbus_do_probe(client, &crps_info);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to probe\n");
+
+ return 0;
+}
+
+static const struct of_device_id crps_of_match[] = {
+ {
+ .compatible = "intel,crps185",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, crps_of_match);
+
+static struct i2c_driver crps_driver = {
+ .driver = {
+ .name = "crps",
+ .of_match_table = crps_of_match,
+ },
+ .probe = crps_probe,
+ .id_table = crps_id,
+};
+
+module_i2c_driver(crps_driver);
+
+MODULE_AUTHOR("Ninad Palsule");
+MODULE_DESCRIPTION("PMBus driver for Intel Common Redundant power supplies");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/dps920ab.c b/drivers/hwmon/pmbus/dps920ab.c
index cc5aac9dfdb3..325111a955e6 100644
--- a/drivers/hwmon/pmbus/dps920ab.c
+++ b/drivers/hwmon/pmbus/dps920ab.c
@@ -190,12 +190,19 @@ static const struct of_device_id __maybe_unused dps920ab_of_match[] = {
MODULE_DEVICE_TABLE(of, dps920ab_of_match);
+static const struct i2c_device_id dps920ab_device_id[] = {
+ { "dps920ab" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, dps920ab_device_id);
+
static struct i2c_driver dps920ab_driver = {
.driver = {
.name = "dps920ab",
.of_match_table = of_match_ptr(dps920ab_of_match),
},
.probe = dps920ab_probe,
+ .id_table = dps920ab_device_id,
};
module_i2c_driver(dps920ab_driver);
diff --git a/drivers/hwmon/pmbus/max15301.c b/drivers/hwmon/pmbus/max15301.c
index 50dfd477772f..d5810b88ea8d 100644
--- a/drivers/hwmon/pmbus/max15301.c
+++ b/drivers/hwmon/pmbus/max15301.c
@@ -25,6 +25,7 @@
static const struct i2c_device_id max15301_id[] = {
{ "bmr461" },
{ "max15301" },
+ { "max15303" },
{}
};
MODULE_DEVICE_TABLE(i2c, max15301_id);
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index d605412a3173..ddb19c9726d6 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -487,6 +487,8 @@ struct pmbus_driver_info {
/* Regulator ops */
extern const struct regulator_ops pmbus_regulator_ops;
+int pmbus_regulator_init_cb(struct regulator_dev *rdev,
+ struct regulator_config *config);
/* Macros for filling in array of struct regulator_desc */
#define PMBUS_REGULATOR_STEP(_name, _id, _voltages, _step, _min_uV) \
@@ -501,6 +503,7 @@ extern const struct regulator_ops pmbus_regulator_ops;
.n_voltages = _voltages, \
.uV_step = _step, \
.min_uV = _min_uV, \
+ .init_cb = pmbus_regulator_init_cb, \
}
#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0, 0)
@@ -516,6 +519,7 @@ extern const struct regulator_ops pmbus_regulator_ops;
.n_voltages = _voltages, \
.uV_step = _step, \
.min_uV = _min_uV, \
+ .init_cb = pmbus_regulator_init_cb, \
}
#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a1375cb6b648..787683e83db6 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -31,6 +31,9 @@
#define PMBUS_ATTR_ALLOC_SIZE 32
#define PMBUS_NAME_SIZE 24
+static int wp = -1;
+module_param(wp, int, 0444);
+
struct pmbus_sensor {
struct pmbus_sensor *next;
char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
@@ -2665,6 +2668,56 @@ static void pmbus_remove_pec(void *dev)
device_remove_file(dev, &dev_attr_pec);
}
+static void pmbus_init_wp(struct i2c_client *client, struct pmbus_data *data)
+{
+ int ret;
+
+ switch (wp) {
+ case 0:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, 0);
+ break;
+
+ case 1:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, PB_WP_VOUT);
+ break;
+
+ case 2:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, PB_WP_OP);
+ break;
+
+ case 3:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, PB_WP_ALL);
+ break;
+
+ default:
+ /* Ignore the other values */
+ break;
+ }
+
+ ret = _pmbus_read_byte_data(client, -1, PMBUS_WRITE_PROTECT);
+ if (ret < 0)
+ return;
+
+ switch (ret & PB_WP_ANY) {
+ case PB_WP_ALL:
+ data->flags |= PMBUS_OP_PROTECTED;
+ fallthrough;
+ case PB_WP_OP:
+ data->flags |= PMBUS_VOUT_PROTECTED;
+ fallthrough;
+ case PB_WP_VOUT:
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ break;
+
+ default:
+ break;
+ }
+}
+
static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
@@ -2718,12 +2771,8 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
* faults, and we should not try it. Also, in that case, writes into
* limit registers need to be disabled.
*/
- if (!(data->flags & PMBUS_NO_WRITE_PROTECT)) {
- ret = _pmbus_read_byte_data(client, -1, PMBUS_WRITE_PROTECT);
-
- if (ret > 0 && (ret & PB_WP_ANY))
- data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
- }
+ if (!(data->flags & PMBUS_NO_WRITE_PROTECT))
+ pmbus_init_wp(client, data);
ret = i2c_smbus_read_byte_data(client, PMBUS_REVISION);
if (ret >= 0)
@@ -3183,8 +3232,12 @@ static int pmbus_regulator_list_voltage(struct regulator_dev *rdev,
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
int val, low, high;
+ if (data->flags & PMBUS_VOUT_PROTECTED)
+ return 0;
+
if (selector >= rdev->desc->n_voltages ||
selector < rdev->desc->linear_min_sel)
return -EINVAL;
@@ -3219,6 +3272,22 @@ const struct regulator_ops pmbus_regulator_ops = {
};
EXPORT_SYMBOL_NS_GPL(pmbus_regulator_ops, "PMBUS");
+int pmbus_regulator_init_cb(struct regulator_dev *rdev,
+ struct regulator_config *config)
+{
+ struct pmbus_data *data = config->driver_data;
+ struct regulation_constraints *constraints = rdev->constraints;
+
+ if (data->flags & PMBUS_OP_PROTECTED)
+ constraints->valid_ops_mask &= ~REGULATOR_CHANGE_STATUS;
+
+ if (data->flags & PMBUS_VOUT_PROTECTED)
+ constraints->valid_ops_mask &= ~REGULATOR_CHANGE_VOLTAGE;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(pmbus_regulator_init_cb, "PMBUS");
+
static int pmbus_regulator_register(struct pmbus_data *data)
{
struct device *dev = data->dev;
@@ -3465,11 +3534,11 @@ static int pmbus_init_debugfs(struct i2c_client *client,
/*
* Allocate the max possible entries we need.
- * 6 entries device-specific
+ * 7 entries device-specific
* 10 entries page-specific
*/
entries = devm_kcalloc(data->dev,
- 6 + data->info->pages * 10, sizeof(*entries),
+ 7 + data->info->pages * 10, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -3482,6 +3551,15 @@ static int pmbus_init_debugfs(struct i2c_client *client,
* assume that values of the following registers are the same for all
* pages and report values only for page 0.
*/
+ if (pmbus_check_byte_register(client, 0, PMBUS_REVISION)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = PMBUS_REVISION;
+ debugfs_create_file("revision", 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
if (pmbus_check_block_register(client, 0, PMBUS_MFR_ID)) {
entries[idx].client = client;
entries[idx].page = 0;
diff --git a/drivers/hwmon/pmbus/tps25990.c b/drivers/hwmon/pmbus/tps25990.c
new file mode 100644
index 000000000000..0d2655e69549
--- /dev/null
+++ b/drivers/hwmon/pmbus/tps25990.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2024 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define TPS25990_READ_VAUX 0xd0
+#define TPS25990_READ_VIN_MIN 0xd1
+#define TPS25990_READ_VIN_PEAK 0xd2
+#define TPS25990_READ_IIN_PEAK 0xd4
+#define TPS25990_READ_PIN_PEAK 0xd5
+#define TPS25990_READ_TEMP_AVG 0xd6
+#define TPS25990_READ_TEMP_PEAK 0xd7
+#define TPS25990_READ_VOUT_MIN 0xda
+#define TPS25990_READ_VIN_AVG 0xdc
+#define TPS25990_READ_VOUT_AVG 0xdd
+#define TPS25990_READ_IIN_AVG 0xde
+#define TPS25990_READ_PIN_AVG 0xdf
+#define TPS25990_VIREF 0xe0
+#define TPS25990_PK_MIN_AVG 0xea
+#define PK_MIN_AVG_RST_PEAK BIT(7)
+#define PK_MIN_AVG_RST_AVG BIT(6)
+#define PK_MIN_AVG_RST_MIN BIT(5)
+#define PK_MIN_AVG_AVG_CNT GENMASK(2, 0)
+#define TPS25990_MFR_WRITE_PROTECT 0xf8
+#define TPS25990_UNLOCKED BIT(7)
+
+#define TPS25990_8B_SHIFT 2
+#define TPS25990_VIN_OVF_NUM 525100
+#define TPS25990_VIN_OVF_DIV 10163
+#define TPS25990_VIN_OVF_OFF 155
+#define TPS25990_IIN_OCF_NUM 953800
+#define TPS25990_IIN_OCF_DIV 129278
+#define TPS25990_IIN_OCF_OFF 157
+
+#define PK_MIN_AVG_RST_MASK (PK_MIN_AVG_RST_PEAK | \
+ PK_MIN_AVG_RST_AVG | \
+ PK_MIN_AVG_RST_MIN)
+
+/*
+ * Arbitrary default Rimon value: 1kOhm
+ * This correspond to an overcurrent limit of 55A, close to the specified limit
+ * of un-stacked TPS25990 and makes further calculation easier to setup in
+ * sensor.conf, if necessary
+ */
+#define TPS25990_DEFAULT_RIMON 1000000000
+
+static void tps25990_set_m(int *m, u32 rimon)
+{
+ u64 val = ((u64)*m) * rimon;
+
+ /* Make sure m fits the s32 type */
+ *m = DIV_ROUND_CLOSEST_ULL(val, 1000000);
+}
+
+static int tps25990_mfr_write_protect_set(struct i2c_client *client,
+ u8 protect)
+{
+ u8 val;
+
+ switch (protect) {
+ case 0:
+ val = 0xa2;
+ break;
+ case PB_WP_ALL:
+ val = 0x0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pmbus_write_byte_data(client, -1, TPS25990_MFR_WRITE_PROTECT,
+ val);
+}
+
+static int tps25990_mfr_write_protect_get(struct i2c_client *client)
+{
+ int ret = pmbus_read_byte_data(client, -1, TPS25990_MFR_WRITE_PROTECT);
+
+ if (ret < 0)
+ return ret;
+
+ return (ret & TPS25990_UNLOCKED) ? 0 : PB_WP_ALL;
+}
+
+static int tps25990_read_word_data(struct i2c_client *client,
+ int page, int phase, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VIN_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_VIN_MIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VIN_MIN);
+ break;
+
+ case PMBUS_VIRT_READ_VIN_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VIN_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_VOUT_MIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VOUT_MIN);
+ break;
+
+ case PMBUS_VIRT_READ_VOUT_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VOUT_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_IIN_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_IIN_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_IIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_IIN_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_TEMP_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_TEMP_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_TEMP_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_PIN_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_PIN_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_PIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_PIN_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_VMON:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VAUX);
+ break;
+
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ /*
+ * These registers provide an 8 bits value instead of a
+ * 10bits one. Just shifting twice the register value is
+ * enough to make the sensor type conversion work, even
+ * if the datasheet provides different m, b and R for
+ * those.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ break;
+ ret <<= TPS25990_8B_SHIFT;
+ break;
+
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ break;
+ ret = DIV_ROUND_CLOSEST(ret * TPS25990_VIN_OVF_NUM,
+ TPS25990_VIN_OVF_DIV);
+ ret += TPS25990_VIN_OVF_OFF;
+ break;
+
+ case PMBUS_IIN_OC_FAULT_LIMIT:
+ /*
+ * VIREF directly sets the over-current limit at which the eFuse
+ * will turn the FET off and trigger a fault. Expose it through
+ * this generic property instead of a manufacturer specific one.
+ */
+ ret = pmbus_read_byte_data(client, page, TPS25990_VIREF);
+ if (ret < 0)
+ break;
+ ret = DIV_ROUND_CLOSEST(ret * TPS25990_IIN_OCF_NUM,
+ TPS25990_IIN_OCF_DIV);
+ ret += TPS25990_IIN_OCF_OFF;
+ break;
+
+ case PMBUS_VIRT_SAMPLES:
+ ret = pmbus_read_byte_data(client, page, TPS25990_PK_MIN_AVG);
+ if (ret < 0)
+ break;
+ ret = 1 << FIELD_GET(PK_MIN_AVG_AVG_CNT, ret);
+ break;
+
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ case PMBUS_VIRT_RESET_IIN_HISTORY:
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = 0;
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int tps25990_write_word_data(struct i2c_client *client,
+ int page, int reg, u16 value)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ value >>= TPS25990_8B_SHIFT;
+ value = clamp_val(value, 0, 0xff);
+ ret = pmbus_write_word_data(client, page, reg, value);
+ break;
+
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ value -= TPS25990_VIN_OVF_OFF;
+ value = DIV_ROUND_CLOSEST(((unsigned int)value) * TPS25990_VIN_OVF_DIV,
+ TPS25990_VIN_OVF_NUM);
+ value = clamp_val(value, 0, 0xf);
+ ret = pmbus_write_word_data(client, page, reg, value);
+ break;
+
+ case PMBUS_IIN_OC_FAULT_LIMIT:
+ value -= TPS25990_IIN_OCF_OFF;
+ value = DIV_ROUND_CLOSEST(((unsigned int)value) * TPS25990_IIN_OCF_DIV,
+ TPS25990_IIN_OCF_NUM);
+ value = clamp_val(value, 0, 0x3f);
+ ret = pmbus_write_byte_data(client, page, TPS25990_VIREF, value);
+ break;
+
+ case PMBUS_VIRT_SAMPLES:
+ value = clamp_val(value, 1, 1 << PK_MIN_AVG_AVG_CNT);
+ value = ilog2(value);
+ ret = pmbus_update_byte_data(client, page, TPS25990_PK_MIN_AVG,
+ PK_MIN_AVG_AVG_CNT,
+ FIELD_PREP(PK_MIN_AVG_AVG_CNT, value));
+ break;
+
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ case PMBUS_VIRT_RESET_IIN_HISTORY:
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ /*
+ * TPS25990 has history resets based on MIN/AVG/PEAK instead of per
+ * sensor type. Exposing this quirk in hwmon is not desirable so
+ * reset MIN, AVG and PEAK together. Even is there effectively only
+ * one reset, which resets everything, expose the 5 entries so
+ * userspace is not required map a sensor type to another to trigger
+ * a reset
+ */
+ ret = pmbus_update_byte_data(client, 0, TPS25990_PK_MIN_AVG,
+ PK_MIN_AVG_RST_MASK,
+ PK_MIN_AVG_RST_MASK);
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int tps25990_read_byte_data(struct i2c_client *client,
+ int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_WRITE_PROTECT:
+ ret = tps25990_mfr_write_protect_get(client);
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int tps25990_write_byte_data(struct i2c_client *client,
+ int page, int reg, u8 byte)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_WRITE_PROTECT:
+ ret = tps25990_mfr_write_protect_set(client, byte);
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
+static const struct regulator_desc tps25990_reg_desc[] = {
+ PMBUS_REGULATOR_ONE("vout"),
+};
+#endif
+
+static const struct pmbus_driver_info tps25990_base_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 5251,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 5251,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -2,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 140,
+ .b[PSC_TEMPERATURE] = 32100,
+ .R[PSC_TEMPERATURE] = -2,
+ /*
+ * Current and Power measurement depends on the ohm value
+ * of Rimon. m is multiplied by 1000 below to have an integer
+ * and -3 is added to R to compensate.
+ */
+ .format[PSC_CURRENT_IN] = direct,
+ .m[PSC_CURRENT_IN] = 9538,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = -6,
+ .format[PSC_POWER] = direct,
+ .m[PSC_POWER] = 4901,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = -7,
+ .func[0] = (PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_VMON |
+ PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN |
+ PMBUS_HAVE_TEMP |
+ PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_SAMPLES),
+ .read_word_data = tps25990_read_word_data,
+ .write_word_data = tps25990_write_word_data,
+ .read_byte_data = tps25990_read_byte_data,
+ .write_byte_data = tps25990_write_byte_data,
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
+ .reg_desc = tps25990_reg_desc,
+ .num_regulators = ARRAY_SIZE(tps25990_reg_desc),
+#endif
+};
+
+static const struct i2c_device_id tps25990_i2c_id[] = {
+ { "tps25990" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tps25990_i2c_id);
+
+static const struct of_device_id tps25990_of_match[] = {
+ { .compatible = "ti,tps25990" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tps25990_of_match);
+
+static int tps25990_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ u32 rimon = TPS25990_DEFAULT_RIMON;
+ int ret;
+
+ ret = device_property_read_u32(dev, "ti,rimon-micro-ohms", &rimon);
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(dev, ret, "failed to get rimon\n");
+
+ info = devm_kmemdup(dev, &tps25990_base_info, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* Adapt the current and power scale for each instance */
+ tps25990_set_m(&info->m[PSC_CURRENT_IN], rimon);
+ tps25990_set_m(&info->m[PSC_POWER], rimon);
+
+ return pmbus_do_probe(client, info);
+}
+
+static struct i2c_driver tps25990_driver = {
+ .driver = {
+ .name = "tps25990",
+ .of_match_table = tps25990_of_match,
+ },
+ .probe = tps25990_probe,
+ .id_table = tps25990_i2c_id,
+};
+module_i2c_driver(tps25990_driver);
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_DESCRIPTION("PMBUS driver for TPS25990 eFuse");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 53a1a968d00d..579d31bb9ac7 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -497,7 +497,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
struct device *hwmon;
int ret;
const struct hwmon_channel_info **channels;
- u32 pwm_min_from_stopped = 0;
+ u32 initial_pwm, pwm_min_from_stopped = 0;
u32 *fan_channel_config;
int channel_count = 1; /* We always have a PWM channel. */
int i;
@@ -545,11 +545,21 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->enable_mode = pwm_disable_reg_enable;
+ ret = pwm_fan_get_cooling_data(dev, ctx);
+ if (ret)
+ return ret;
+
+ /* use maximum cooling level if provided */
+ if (ctx->pwm_fan_cooling_levels)
+ initial_pwm = ctx->pwm_fan_cooling_levels[ctx->pwm_fan_max_state];
+ else
+ initial_pwm = MAX_PWM;
+
/*
* Set duty cycle to maximum allowed and enable PWM output as well as
* the regulator. In case of error nothing is changed
*/
- ret = set_pwm(ctx, MAX_PWM);
+ ret = set_pwm(ctx, initial_pwm);
if (ret) {
dev_err(dev, "Failed to configure PWM: %d\n", ret);
return ret;
@@ -638,16 +648,16 @@ static int pwm_fan_probe(struct platform_device *pdev)
channels[1] = &ctx->fan_channel;
}
- ret = of_property_read_u32(dev->of_node, "fan-stop-to-start-percent",
- &pwm_min_from_stopped);
+ ret = device_property_read_u32(dev, "fan-stop-to-start-percent",
+ &pwm_min_from_stopped);
if (!ret && pwm_min_from_stopped) {
ctx->pwm_duty_cycle_from_stopped =
DIV_ROUND_UP_ULL(pwm_min_from_stopped *
(ctx->pwm_state.period - 1),
100);
}
- ret = of_property_read_u32(dev->of_node, "fan-stop-to-start-us",
- &ctx->pwm_usec_from_stopped);
+ ret = device_property_read_u32(dev, "fan-stop-to-start-us",
+ &ctx->pwm_usec_from_stopped);
if (ret)
ctx->pwm_usec_from_stopped = 250000;
@@ -661,10 +671,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
return PTR_ERR(hwmon);
}
- ret = pwm_fan_get_cooling_data(dev, ctx);
- if (ret)
- return ret;
-
ctx->pwm_fan_state = ctx->pwm_fan_max_state;
if (IS_ENABLED(CONFIG_THERMAL)) {
cdev = devm_thermal_of_cooling_device_register(dev,
diff --git a/drivers/hwmon/qnap-mcu-hwmon.c b/drivers/hwmon/qnap-mcu-hwmon.c
new file mode 100644
index 000000000000..29057514739c
--- /dev/null
+++ b/drivers/hwmon/qnap-mcu-hwmon.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Driver for hwmon elements found on QNAP-MCU devices
+ *
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/fwnode.h>
+#include <linux/hwmon.h>
+#include <linux/mfd/qnap-mcu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/thermal.h>
+
+struct qnap_mcu_hwmon {
+ struct qnap_mcu *mcu;
+ struct device *dev;
+
+ unsigned int pwm_min;
+ unsigned int pwm_max;
+
+ struct fwnode_handle *fan_node;
+ unsigned int fan_state;
+ unsigned int fan_max_state;
+ unsigned int *fan_cooling_levels;
+
+ struct thermal_cooling_device *cdev;
+ struct hwmon_chip_info info;
+};
+
+static int qnap_mcu_hwmon_get_rpm(struct qnap_mcu_hwmon *hwm)
+{
+ static const u8 cmd[] = { '@', 'F', 'A' };
+ u8 reply[6];
+ int ret;
+
+ /* poll the fan rpm */
+ ret = qnap_mcu_exec(hwm->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return ret;
+
+ /* First 2 bytes must mirror the sent command */
+ if (memcmp(cmd, reply, 2))
+ return -EIO;
+
+ return reply[4] * 30;
+}
+
+static int qnap_mcu_hwmon_get_pwm(struct qnap_mcu_hwmon *hwm)
+{
+ static const u8 cmd[] = { '@', 'F', 'Z', '0' }; /* 0 = fan-id? */
+ u8 reply[4];
+ int ret;
+
+ /* poll the fan pwm */
+ ret = qnap_mcu_exec(hwm->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return ret;
+
+ /* First 3 bytes must mirror the sent command */
+ if (memcmp(cmd, reply, 3))
+ return -EIO;
+
+ return reply[3];
+}
+
+static int qnap_mcu_hwmon_set_pwm(struct qnap_mcu_hwmon *hwm, u8 pwm)
+{
+ const u8 cmd[] = { '@', 'F', 'W', '0', pwm }; /* 0 = fan-id?, pwm 0-255 */
+
+ /* set the fan pwm */
+ return qnap_mcu_exec_with_ack(hwm->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_hwmon_get_temp(struct qnap_mcu_hwmon *hwm)
+{
+ static const u8 cmd[] = { '@', 'T', '3' };
+ u8 reply[4];
+ int ret;
+
+ /* poll the fan rpm */
+ ret = qnap_mcu_exec(hwm->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return ret;
+
+ /* First bytes must mirror the sent command */
+ if (memcmp(cmd, reply, sizeof(cmd)))
+ return -EIO;
+
+ /*
+ * There is an unknown bit set in bit7.
+ * Bits [6:0] report the actual temperature as returned by the
+ * original qnap firmware-tools, so just drop bit7 for now.
+ */
+ return (reply[3] & 0x7f) * 1000;
+}
+
+static int qnap_mcu_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct qnap_mcu_hwmon *hwm = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ if (val != 0)
+ val = clamp_val(val, hwm->pwm_min, hwm->pwm_max);
+
+ return qnap_mcu_hwmon_set_pwm(hwm, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qnap_mcu_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct qnap_mcu_hwmon *hwm = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = qnap_mcu_hwmon_get_pwm(hwm);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_fan:
+ ret = qnap_mcu_hwmon_get_rpm(hwm);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ case hwmon_temp:
+ ret = qnap_mcu_hwmon_get_temp(hwm);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t qnap_mcu_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ return 0444;
+
+ case hwmon_pwm:
+ return 0644;
+
+ case hwmon_fan:
+ return 0444;
+
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_ops qnap_mcu_hwmon_hwmon_ops = {
+ .is_visible = qnap_mcu_hwmon_is_visible,
+ .read = qnap_mcu_hwmon_read,
+ .write = qnap_mcu_hwmon_write,
+};
+
+/* thermal cooling device callbacks */
+static int qnap_mcu_hwmon_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qnap_mcu_hwmon *hwm = cdev->devdata;
+
+ if (!hwm)
+ return -EINVAL;
+
+ *state = hwm->fan_max_state;
+
+ return 0;
+}
+
+static int qnap_mcu_hwmon_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qnap_mcu_hwmon *hwm = cdev->devdata;
+
+ if (!hwm)
+ return -EINVAL;
+
+ *state = hwm->fan_state;
+
+ return 0;
+}
+
+static int qnap_mcu_hwmon_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qnap_mcu_hwmon *hwm = cdev->devdata;
+ int ret;
+
+ if (!hwm || state > hwm->fan_max_state)
+ return -EINVAL;
+
+ if (state == hwm->fan_state)
+ return 0;
+
+ ret = qnap_mcu_hwmon_set_pwm(hwm, hwm->fan_cooling_levels[state]);
+ if (ret)
+ return ret;
+
+ hwm->fan_state = state;
+
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops qnap_mcu_hwmon_cooling_ops = {
+ .get_max_state = qnap_mcu_hwmon_get_max_state,
+ .get_cur_state = qnap_mcu_hwmon_get_cur_state,
+ .set_cur_state = qnap_mcu_hwmon_set_cur_state,
+};
+
+static void devm_fan_node_release(void *data)
+{
+ struct qnap_mcu_hwmon *hwm = data;
+
+ fwnode_handle_put(hwm->fan_node);
+}
+
+static int qnap_mcu_hwmon_get_cooling_data(struct device *dev, struct qnap_mcu_hwmon *hwm)
+{
+ struct fwnode_handle *fwnode;
+ int num, i, ret;
+
+ fwnode = device_get_named_child_node(dev->parent, "fan-0");
+ if (!fwnode)
+ return 0;
+
+ /* if we found the fan-node, we're keeping it until device-unbind */
+ hwm->fan_node = fwnode;
+ ret = devm_add_action_or_reset(dev, devm_fan_node_release, hwm);
+ if (ret)
+ return ret;
+
+ num = fwnode_property_count_u32(fwnode, "cooling-levels");
+ if (num <= 0)
+ return dev_err_probe(dev, num ? : -EINVAL,
+ "Failed to count elements in 'cooling-levels'\n");
+
+ hwm->fan_cooling_levels = devm_kcalloc(dev, num, sizeof(u32),
+ GFP_KERNEL);
+ if (!hwm->fan_cooling_levels)
+ return -ENOMEM;
+
+ ret = fwnode_property_read_u32_array(fwnode, "cooling-levels",
+ hwm->fan_cooling_levels, num);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read 'cooling-levels'\n");
+
+ for (i = 0; i < num; i++) {
+ if (hwm->fan_cooling_levels[i] > hwm->pwm_max)
+ return dev_err_probe(dev, -EINVAL, "fan state[%d]:%d > %d\n", i,
+ hwm->fan_cooling_levels[i], hwm->pwm_max);
+ }
+
+ hwm->fan_max_state = num - 1;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info * const qnap_mcu_hwmon_channels[] = {
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static int qnap_mcu_hwmon_probe(struct platform_device *pdev)
+{
+ struct qnap_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
+ const struct qnap_mcu_variant *variant = pdev->dev.platform_data;
+ struct qnap_mcu_hwmon *hwm;
+ struct thermal_cooling_device *cdev;
+ struct device *dev = &pdev->dev;
+ struct device *hwmon;
+ int ret;
+
+ hwm = devm_kzalloc(dev, sizeof(*hwm), GFP_KERNEL);
+ if (!hwm)
+ return -ENOMEM;
+
+ hwm->mcu = mcu;
+ hwm->dev = &pdev->dev;
+ hwm->pwm_min = variant->fan_pwm_min;
+ hwm->pwm_max = variant->fan_pwm_max;
+
+ platform_set_drvdata(pdev, hwm);
+
+ /*
+ * Set duty cycle to maximum allowed.
+ */
+ ret = qnap_mcu_hwmon_set_pwm(hwm, hwm->pwm_max);
+ if (ret)
+ return ret;
+
+ hwm->info.ops = &qnap_mcu_hwmon_hwmon_ops;
+ hwm->info.info = qnap_mcu_hwmon_channels;
+
+ ret = qnap_mcu_hwmon_get_cooling_data(dev, hwm);
+ if (ret)
+ return ret;
+
+ hwm->fan_state = hwm->fan_max_state;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "qnapmcu",
+ hwm, &hwm->info, NULL);
+ if (IS_ERR(hwmon))
+ return dev_err_probe(dev, PTR_ERR(hwmon), "Failed to register hwmon device\n");
+
+ /*
+ * Only register cooling device when we found cooling-levels.
+ * qnap_mcu_hwmon_get_cooling_data() will fail when reading malformed
+ * levels and only succeed with either no or correct cooling levels.
+ */
+ if (IS_ENABLED(CONFIG_THERMAL) && hwm->fan_cooling_levels) {
+ cdev = devm_thermal_of_cooling_device_register(dev,
+ to_of_node(hwm->fan_node), "qnap-mcu-hwmon",
+ hwm, &qnap_mcu_hwmon_cooling_ops);
+ if (IS_ERR(cdev))
+ return dev_err_probe(dev, PTR_ERR(cdev),
+ "Failed to register qnap-mcu-hwmon as cooling device\n");
+ hwm->cdev = cdev;
+ }
+
+ return 0;
+}
+
+static struct platform_driver qnap_mcu_hwmon_driver = {
+ .probe = qnap_mcu_hwmon_probe,
+ .driver = {
+ .name = "qnap-mcu-hwmon",
+ },
+};
+module_platform_driver(qnap_mcu_hwmon_driver);
+
+MODULE_ALIAS("platform:qnap-mcu-hwmon");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("QNAP MCU hwmon driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index 10ef1e1f9458..a2938881ccd2 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -128,10 +128,32 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
return 0;
}
+static int rpi_hwmon_suspend(struct device *dev)
+{
+ struct rpi_hwmon_data *data = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&data->get_values_poll_work);
+
+ return 0;
+}
+
+static int rpi_hwmon_resume(struct device *dev)
+{
+ struct rpi_hwmon_data *data = dev_get_drvdata(dev);
+
+ get_values_poll(&data->get_values_poll_work.work);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(rpi_hwmon_pm_ops, rpi_hwmon_suspend,
+ rpi_hwmon_resume);
+
static struct platform_driver rpi_hwmon_driver = {
.probe = rpi_hwmon_probe,
.driver = {
.name = "raspberrypi-hwmon",
+ .pm = pm_ptr(&rpi_hwmon_pm_ops),
},
};
module_platform_driver(rpi_hwmon_driver);
diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c
index 6cee48a3e5c3..358152868d96 100644
--- a/drivers/hwmon/spd5118.c
+++ b/drivers/hwmon/spd5118.c
@@ -291,12 +291,6 @@ static umode_t spd5118_is_visible(const void *_data, enum hwmon_sensor_types typ
}
}
-static inline bool spd5118_parity8(u8 w)
-{
- w ^= w >> 4;
- return (0x6996 >> (w & 0xf)) & 1;
-}
-
/*
* Bank and vendor id are 8-bit fields with seven data bits and odd parity.
* Vendor IDs 0 and 0x7f are invalid.
@@ -304,7 +298,7 @@ static inline bool spd5118_parity8(u8 w)
*/
static bool spd5118_vendor_valid(u8 bank, u8 id)
{
- if (!spd5118_parity8(bank) || !spd5118_parity8(id))
+ if (parity8(bank) == 0 || parity8(id) == 0)
return false;
id &= 0x7f;
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index fbe673009126..a971ff628435 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -8,15 +8,15 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/i3c/device.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define DRIVER_NAME "tmp108"
@@ -331,6 +331,10 @@ static int tmp108_common_probe(struct device *dev, struct regmap *regmap, char *
u32 config;
int err;
+ err = devm_regulator_get_enable(dev, "vcc");
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable regulator\n");
+
tmp108 = devm_kzalloc(dev, sizeof(*tmp108), GFP_KERNEL);
if (!tmp108)
return -ENOMEM;
@@ -417,25 +421,24 @@ static int tmp108_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
static const struct i2c_device_id tmp108_i2c_ids[] = {
+ { "p3t1085" },
{ "tmp108" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp108_i2c_ids);
-#ifdef CONFIG_OF
static const struct of_device_id tmp108_of_ids[] = {
{ .compatible = "nxp,p3t1085", },
{ .compatible = "ti,tmp108", },
{}
};
MODULE_DEVICE_TABLE(of, tmp108_of_ids);
-#endif
static struct i2c_driver tmp108_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = pm_sleep_ptr(&tmp108_dev_pm_ops),
- .of_match_table = of_match_ptr(tmp108_of_ids),
+ .of_match_table = tmp108_of_ids,
},
.probe = tmp108_probe,
.id_table = tmp108_i2c_ids,
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 1c2cb12071b8..5acbfd7d088d 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -207,7 +207,8 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
*val = sign_extend32(regval,
reg == TMP51X_SHUNT_CURRENT_RESULT ?
16 - tmp51x_get_pga_shift(data) : 15);
- *val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
+ *val = DIV_ROUND_CLOSEST(*val * 10 * (long)MILLI, (long)data->shunt_uohms);
+
break;
case TMP51X_BUS_VOLTAGE_RESULT:
case TMP51X_BUS_VOLTAGE_H_LIMIT:
@@ -223,7 +224,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
case TMP51X_BUS_CURRENT_RESULT:
// Current = (ShuntVoltage * CalibrationRegister) / 4096
*val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
- *val = DIV_ROUND_CLOSEST(*val, MILLI);
+ *val = DIV_ROUND_CLOSEST(*val, (long)MILLI);
break;
case TMP51X_LOCAL_TEMP_RESULT:
case TMP51X_REMOTE_TEMP_RESULT_1:
@@ -263,7 +264,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
* The user enter current value and we convert it to
* voltage. 1lsb = 10uV
*/
- val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10 * MILLI);
+ val = DIV_ROUND_CLOSEST(val * (long)data->shunt_uohms, 10 * (long)MILLI);
max_val = U16_MAX >> tmp51x_get_pga_shift(data);
regval = clamp_val(val, -max_val, max_val);
break;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ceb3ecdf884b..eec95c724b25 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -503,7 +503,7 @@ config I2C_BRCMSTB
tristate "BRCM Settop/DSL I2C controller"
depends on ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || \
BMIPS_GENERIC || COMPILE_TEST
- default y
+ default ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || BMIPS_GENERIC
help
If you say yes to this option, support will be included for the
I2C interface on the Broadcom Settop/DSL SoCs.
@@ -910,7 +910,7 @@ config I2C_MXS
config I2C_NOMADIK
tristate "ST-Ericsson Nomadik/Ux500 I2C Controller"
- depends on ARM_AMBA
+ depends on ARM_AMBA || COMPILE_TEST
help
If you say yes to this option, support will be included for the
I2C interface from ST-Ericsson's Nomadik and Ux500 architectures,
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index fa0d5a2c3732..3621c02f1cba 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -211,7 +211,7 @@ static s32 amd756_access(struct i2c_adapter * adap, u16 addr,
SMB_HOST_ADDRESS);
outb_p(command, SMB_HOST_COMMAND);
if (read_write == I2C_SMBUS_WRITE)
- outw_p(data->word, SMB_HOST_DATA); /* TODO: endian???? */
+ outw_p(data->word, SMB_HOST_DATA);
size = AMD756_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
@@ -256,7 +256,7 @@ static s32 amd756_access(struct i2c_adapter * adap, u16 addr,
data->byte = inw_p(SMB_HOST_DATA);
break;
case AMD756_WORD_DATA:
- data->word = inw_p(SMB_HOST_DATA); /* TODO: endian???? */
+ data->word = inw_p(SMB_HOST_DATA);
break;
case AMD756_BLOCK_DATA:
data->block[0] = inw_p(SMB_HOST_DATA) & 0x3f;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 71dc0a6688b7..6a909d339681 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -11,23 +11,23 @@
*
* ----------------------------------------------------------------------------
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
+
#include <linux/clk.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of.h>
-#include <linux/platform_data/i2c-davinci.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
/* ----- global defines ----------------------------------------------- */
@@ -117,6 +117,8 @@
/* timeout for pm runtime autosuspend */
#define DAVINCI_I2C_PM_TIMEOUT 1000 /* ms */
+#define DAVINCI_I2C_DEFAULT_BUS_FREQ 100
+
struct davinci_i2c_dev {
struct device *dev;
void __iomem *base;
@@ -132,13 +134,10 @@ struct davinci_i2c_dev {
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
- struct davinci_i2c_platform_data *pdata;
-};
-
-/* default platform data to use if not supplied in the platform_device */
-static struct davinci_i2c_platform_data davinci_i2c_platform_data_default = {
- .bus_freq = 100,
- .bus_delay = 0,
+ /* standard bus frequency (kHz) */
+ unsigned int bus_freq;
+ /* Chip has a ICPFUNC register */
+ bool has_pfunc;
};
static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev,
@@ -168,14 +167,12 @@ static inline void davinci_i2c_reset_ctrl(struct davinci_i2c_dev *i2c_dev,
static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
{
- struct davinci_i2c_platform_data *pdata = dev->pdata;
u16 psc;
u32 clk;
u32 d;
u32 clkh;
u32 clkl;
u32 input_clock = clk_get_rate(dev->clk);
- struct device_node *of_node = dev->dev->of_node;
/* NOTE: I2C Clock divider programming info
* As per I2C specs the following formulas provide prescaler
@@ -209,19 +206,19 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
psc++; /* better to run under spec than over */
d = (psc >= 2) ? 5 : 7 - psc;
- if (of_node && of_device_is_compatible(of_node, "ti,keystone-i2c"))
+ if (device_is_compatible(dev->dev, "ti,keystone-i2c"))
d = 6;
- clk = ((input_clock / (psc + 1)) / (pdata->bus_freq * 1000));
+ clk = ((input_clock / (psc + 1)) / (dev->bus_freq * 1000));
/* Avoid driving the bus too fast because of rounding errors above */
- if (input_clock / (psc + 1) / clk > pdata->bus_freq * 1000)
+ if (input_clock / (psc + 1) / clk > dev->bus_freq * 1000)
clk++;
/*
* According to I2C-BUS Spec 2.1, in FAST-MODE LOW period should be at
* least 1.3uS, which is not the case with 50% duty cycle. Driving HIGH
* to LOW ratio as 1 to 2 is more safe.
*/
- if (pdata->bus_freq > 100)
+ if (dev->bus_freq > 100)
clkl = (clk << 1) / 3;
else
clkl = (clk >> 1);
@@ -255,8 +252,6 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
*/
static int i2c_davinci_init(struct davinci_i2c_dev *dev)
{
- struct davinci_i2c_platform_data *pdata = dev->pdata;
-
/* put I2C into reset */
davinci_i2c_reset_ctrl(dev, 0);
@@ -274,8 +269,7 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev)
davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKL_REG));
dev_dbg(dev->dev, "CLKH = %d\n",
davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKH_REG));
- dev_dbg(dev->dev, "bus_freq = %dkHz, bus_delay = %d\n",
- pdata->bus_freq, pdata->bus_delay);
+ dev_dbg(dev->dev, "bus_freq = %dkHz\n", dev->bus_freq);
/* Take the I2C module out of reset: */
@@ -309,12 +303,6 @@ static void davinci_i2c_unprepare_recovery(struct i2c_adapter *adap)
i2c_davinci_init(dev);
}
-static struct i2c_bus_recovery_info davinci_i2c_gpio_recovery_info = {
- .recover_bus = i2c_generic_scl_recovery,
- .prepare_recovery = davinci_i2c_prepare_recovery,
- .unprepare_recovery = davinci_i2c_unprepare_recovery,
-};
-
static void davinci_i2c_set_scl(struct i2c_adapter *adap, int val)
{
struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
@@ -414,7 +402,6 @@ static int
i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
{
struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
- struct davinci_i2c_platform_data *pdata = dev->pdata;
u32 flag;
u16 w;
unsigned long time_left;
@@ -424,10 +411,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
return -EADDRNOTAVAIL;
}
- /* Introduce a delay, required for some boards (e.g Davinci EVM) */
- if (pdata->bus_delay)
- udelay(pdata->bus_delay);
-
/* set the target address */
davinci_i2c_write_reg(dev, DAVINCI_I2C_SAR_REG, msg->addr);
@@ -758,8 +741,8 @@ static int davinci_i2c_probe(struct platform_device *pdev)
{
struct davinci_i2c_dev *dev;
struct i2c_adapter *adap;
- struct i2c_bus_recovery_info *rinfo;
int r, irq;
+ u32 prop;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -773,29 +756,15 @@ static int davinci_i2c_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
dev->irq = irq;
- dev->pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, dev);
- if (!dev->pdata && pdev->dev.of_node) {
- u32 prop;
-
- dev->pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct davinci_i2c_platform_data), GFP_KERNEL);
- if (!dev->pdata)
- return -ENOMEM;
-
- memcpy(dev->pdata, &davinci_i2c_platform_data_default,
- sizeof(struct davinci_i2c_platform_data));
- if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &prop))
- dev->pdata->bus_freq = prop / 1000;
-
- dev->pdata->has_pfunc =
- of_property_read_bool(pdev->dev.of_node,
- "ti,has-pfunc");
- } else if (!dev->pdata) {
- dev->pdata = &davinci_i2c_platform_data_default;
- }
+ r = device_property_read_u32(&pdev->dev, "clock-frequency", &prop);
+ if (r)
+ prop = DAVINCI_I2C_DEFAULT_BUS_FREQ;
+
+ dev->bus_freq = prop / 1000;
+
+ dev->has_pfunc = device_property_present(&pdev->dev, "ti,has-pfunc");
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
@@ -841,25 +810,10 @@ static int davinci_i2c_probe(struct platform_device *pdev)
adap->algo = &i2c_davinci_algo;
adap->dev.parent = &pdev->dev;
adap->timeout = DAVINCI_I2C_TIMEOUT;
- adap->dev.of_node = pdev->dev.of_node;
+ adap->dev.of_node = dev_of_node(&pdev->dev);
- if (dev->pdata->has_pfunc)
+ if (dev->has_pfunc)
adap->bus_recovery_info = &davinci_i2c_scl_recovery_info;
- else if (dev->pdata->gpio_recovery) {
- rinfo = &davinci_i2c_gpio_recovery_info;
- adap->bus_recovery_info = rinfo;
- rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl",
- GPIOD_OUT_HIGH_OPEN_DRAIN);
- if (IS_ERR(rinfo->scl_gpiod)) {
- r = PTR_ERR(rinfo->scl_gpiod);
- goto err_unuse_clocks;
- }
- rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
- if (IS_ERR(rinfo->sda_gpiod)) {
- r = PTR_ERR(rinfo->sda_gpiod);
- goto err_unuse_clocks;
- }
- }
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 183a35038eef..8eb7bd640f8d 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -8,6 +8,9 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW_COMMON"
+
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -29,8 +32,6 @@
#include <linux/types.h>
#include <linux/units.h>
-#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW_COMMON"
-
#include "i2c-designware-core.h"
static const char *const abort_sources[] = {
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index c8cbe5b1aeb1..2569bf1a72e0 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -8,6 +8,9 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
+
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -22,8 +25,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
-
#include "i2c-designware-core.h"
#define AMD_TIMEOUT_MIN_US 25
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index dc2b788eac5b..5cd4a5f7a472 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -6,6 +6,9 @@
*
* Copyright (C) 2016 Synopsys Inc.
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
+
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -16,8 +19,6 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
-
#include "i2c-designware-core.h"
static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index e330015087ab..6cdd957ea7e4 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -168,6 +168,7 @@ enum i2c_type_exynos {
I2C_TYPE_EXYNOS5,
I2C_TYPE_EXYNOS7,
I2C_TYPE_EXYNOSAUTOV9,
+ I2C_TYPE_EXYNOS8895,
};
struct exynos5_i2c {
@@ -240,6 +241,11 @@ static const struct exynos_hsi2c_variant exynosautov9_hsi2c_data = {
.hw = I2C_TYPE_EXYNOSAUTOV9,
};
+static const struct exynos_hsi2c_variant exynos8895_hsi2c_data = {
+ .fifo_depth = 64,
+ .hw = I2C_TYPE_EXYNOS8895,
+};
+
static const struct of_device_id exynos5_i2c_match[] = {
{
.compatible = "samsung,exynos5-hsi2c",
@@ -256,6 +262,9 @@ static const struct of_device_id exynos5_i2c_match[] = {
}, {
.compatible = "samsung,exynosautov9-hsi2c",
.data = &exynosautov9_hsi2c_data
+ }, {
+ .compatible = "samsung,exynos8895-hsi2c",
+ .data = &exynos8895_hsi2c_data
}, {},
};
MODULE_DEVICE_TABLE(of, exynos5_i2c_match);
@@ -331,6 +340,14 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
* clk_cycle := TSCLK_L + TSCLK_H
* temp := (CLK_DIV + 1) * (clk_cycle + 2)
*
+ * In case of HSI2C controllers in Exynos8895
+ * FPCLK / FI2C =
+ * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) +
+ * 2 * ((FLT_CYCLE + 3) - (FLT_CYCLE + 3) % (CLK_DIV + 1))
+ *
+ * clk_cycle := TSCLK_L + TSCLK_H
+ * temp := (FPCLK / FI2C) - (FLT_CYCLE + 3) * 2
+ *
* Constraints: 4 <= temp, 0 <= CLK_DIV < 256, 2 <= clk_cycle <= 510
*
* To split SCL clock into low, high periods appropriately, one
@@ -352,11 +369,19 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
*
*/
t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7;
- temp = clkin / op_clk - 8 - t_ftl_cycle;
- if (i2c->variant->hw != I2C_TYPE_EXYNOS7)
- temp -= t_ftl_cycle;
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS8895)
+ temp = clkin / op_clk - (t_ftl_cycle + 3) * 2;
+ else if (i2c->variant->hw == I2C_TYPE_EXYNOS7)
+ temp = clkin / op_clk - 8 - t_ftl_cycle;
+ else
+ temp = clkin / op_clk - 8 - (t_ftl_cycle * 2);
div = temp / 512;
- clk_cycle = temp / (div + 1) - 2;
+
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS8895)
+ clk_cycle = (temp + ((t_ftl_cycle + 3) % (div + 1)) * 2) /
+ (div + 1) - 2;
+ else
+ clk_cycle = temp / (div + 1) - 2;
if (temp < 4 || div >= 256 || clk_cycle < 2) {
dev_err(i2c->dev, "%s clock set-up failed\n",
hs_timings ? "HS" : "FS");
@@ -491,6 +516,8 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
switch (i2c->variant->hw) {
case I2C_TYPE_EXYNOSAUTOV9:
fallthrough;
+ case I2C_TYPE_EXYNOS8895:
+ fallthrough;
case I2C_TYPE_EXYNOS7:
if (int_status & HSI2C_INT_TRANS_DONE) {
i2c->trans_done = 1;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 75dab01d43a7..171d29d2770e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1162,127 +1162,6 @@ static void dmi_check_onboard_devices(const struct dmi_header *dm, void *adap)
}
}
-/* NOTE: Keep this list in sync with drivers/platform/x86/dell-smo8800.c */
-static const char *const acpi_smo8800_ids[] = {
- "SMO8800",
- "SMO8801",
- "SMO8810",
- "SMO8811",
- "SMO8820",
- "SMO8821",
- "SMO8830",
- "SMO8831",
-};
-
-static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
- u32 nesting_level,
- void *context,
- void **return_value)
-{
- struct acpi_device_info *info;
- acpi_status status;
- char *hid;
- int i;
-
- status = acpi_get_object_info(obj_handle, &info);
- if (ACPI_FAILURE(status))
- return AE_OK;
-
- if (!(info->valid & ACPI_VALID_HID))
- goto smo88xx_not_found;
-
- hid = info->hardware_id.string;
- if (!hid)
- goto smo88xx_not_found;
-
- i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
- if (i < 0)
- goto smo88xx_not_found;
-
- kfree(info);
-
- *return_value = NULL;
- return AE_CTRL_TERMINATE;
-
-smo88xx_not_found:
- kfree(info);
- return AE_OK;
-}
-
-static bool is_dell_system_with_lis3lv02d(void)
-{
- void *err = ERR_PTR(-ENOENT);
-
- if (!dmi_match(DMI_SYS_VENDOR, "Dell Inc."))
- return false;
-
- /*
- * Check that ACPI device SMO88xx is present and is functioning.
- * Function acpi_get_devices() already filters all ACPI devices
- * which are not present or are not functioning.
- * ACPI device SMO88xx represents our ST microelectronics lis3lv02d
- * accelerometer but unfortunately ACPI does not provide any other
- * information (like I2C address).
- */
- acpi_get_devices(NULL, check_acpi_smo88xx_device, NULL, &err);
-
- return !IS_ERR(err);
-}
-
-/*
- * Accelerometer's I2C address is not specified in DMI nor ACPI,
- * so it is needed to define mapping table based on DMI product names.
- */
-static const struct {
- const char *dmi_product_name;
- unsigned short i2c_addr;
-} dell_lis3lv02d_devices[] = {
- /*
- * Dell platform team told us that these Latitude devices have
- * ST microelectronics accelerometer at I2C address 0x29.
- */
- { "Latitude E5250", 0x29 },
- { "Latitude E5450", 0x29 },
- { "Latitude E5550", 0x29 },
- { "Latitude E6440", 0x29 },
- { "Latitude E6440 ATG", 0x29 },
- { "Latitude E6540", 0x29 },
- /*
- * Additional individual entries were added after verification.
- */
- { "Latitude 5480", 0x29 },
- { "Precision 3540", 0x29 },
- { "Vostro V131", 0x1d },
- { "Vostro 5568", 0x29 },
- { "XPS 15 7590", 0x29 },
-};
-
-static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
-{
- struct i2c_board_info info;
- const char *dmi_product_name;
- int i;
-
- dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
- for (i = 0; i < ARRAY_SIZE(dell_lis3lv02d_devices); ++i) {
- if (strcmp(dmi_product_name,
- dell_lis3lv02d_devices[i].dmi_product_name) == 0)
- break;
- }
-
- if (i == ARRAY_SIZE(dell_lis3lv02d_devices)) {
- dev_warn(&priv->pci_dev->dev,
- "Accelerometer lis3lv02d is present on SMBus but its"
- " address is unknown, skipping registration\n");
- return;
- }
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = dell_lis3lv02d_devices[i].i2c_addr;
- strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
- i2c_new_client_device(&priv->adapter, &info);
-}
-
/* Register optional targets */
static void i801_probe_optional_targets(struct i801_priv *priv)
{
@@ -1302,9 +1181,6 @@ static void i801_probe_optional_targets(struct i801_priv *priv)
if (dmi_name_in_vendors("FUJITSU"))
dmi_walk(dmi_check_onboard_devices, &priv->adapter);
- if (is_dell_system_with_lis3lv02d())
- register_dell_lis3lv02d_i2c_device(priv);
-
/* Instantiate SPD EEPROMs unless the SMBus is multiplexed */
#ifdef CONFIG_I2C_I801_MUX
if (!priv->mux_pdev)
@@ -1682,13 +1558,16 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!(priv->features & FEATURE_BLOCK_BUFFER))
priv->features &= ~FEATURE_BLOCK_PROC;
- err = pcim_enable_device(dev);
+ /*
+ * Do not call pcim_enable_device(), because the device has to remain
+ * enabled on driver detach. See i801_remove() for the reasoning.
+ */
+ err = pci_enable_device(dev);
if (err) {
dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
err);
return err;
}
- pcim_pin_device(dev);
/* Determine the address of the SMBus area */
priv->smba = pci_resource_start(dev, SMBBAR);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 8adf2963d764..0d4b3935e687 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -8,6 +8,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i2c.h>
@@ -29,6 +31,7 @@
#define LPI2C_MCR 0x10 /* i2c contrl register */
#define LPI2C_MSR 0x14 /* i2c status register */
#define LPI2C_MIER 0x18 /* i2c interrupt enable */
+#define LPI2C_MDER 0x1C /* i2c DMA enable */
#define LPI2C_MCFGR0 0x20 /* i2c master configuration */
#define LPI2C_MCFGR1 0x24 /* i2c master configuration */
#define LPI2C_MCFGR2 0x28 /* i2c master configuration */
@@ -40,6 +43,20 @@
#define LPI2C_MTDR 0x60 /* i2c master TX data register */
#define LPI2C_MRDR 0x70 /* i2c master RX data register */
+#define LPI2C_SCR 0x110 /* i2c target control register */
+#define LPI2C_SSR 0x114 /* i2c target status register */
+#define LPI2C_SIER 0x118 /* i2c target interrupt enable */
+#define LPI2C_SDER 0x11C /* i2c target DMA enable */
+#define LPI2C_SCFGR0 0x120 /* i2c target configuration */
+#define LPI2C_SCFGR1 0x124 /* i2c target configuration */
+#define LPI2C_SCFGR2 0x128 /* i2c target configuration */
+#define LPI2C_SAMR 0x140 /* i2c target address match */
+#define LPI2C_SASR 0x150 /* i2c target address status */
+#define LPI2C_STAR 0x154 /* i2c target transmit ACK */
+#define LPI2C_STDR 0x160 /* i2c target transmit data */
+#define LPI2C_SRDR 0x170 /* i2c target receive data */
+#define LPI2C_SRDROR 0x178 /* i2c target receive data read only */
+
/* i2c command */
#define TRAN_DATA 0X00
#define RECV_DATA 0X01
@@ -70,11 +87,50 @@
#define MCFGR1_AUTOSTOP BIT(8)
#define MCFGR1_IGNACK BIT(9)
#define MRDR_RXEMPTY BIT(14)
+#define MDER_TDDE BIT(0)
+#define MDER_RDDE BIT(1)
+
+#define SCR_SEN BIT(0)
+#define SCR_RST BIT(1)
+#define SCR_FILTEN BIT(4)
+#define SCR_RTF BIT(8)
+#define SCR_RRF BIT(9)
+#define SSR_TDF BIT(0)
+#define SSR_RDF BIT(1)
+#define SSR_AVF BIT(2)
+#define SSR_TAF BIT(3)
+#define SSR_RSF BIT(8)
+#define SSR_SDF BIT(9)
+#define SSR_BEF BIT(10)
+#define SSR_FEF BIT(11)
+#define SSR_SBF BIT(24)
+#define SSR_BBF BIT(25)
+#define SSR_CLEAR_BITS (SSR_RSF | SSR_SDF | SSR_BEF | SSR_FEF)
+#define SIER_TDIE BIT(0)
+#define SIER_RDIE BIT(1)
+#define SIER_AVIE BIT(2)
+#define SIER_TAIE BIT(3)
+#define SIER_RSIE BIT(8)
+#define SIER_SDIE BIT(9)
+#define SIER_BEIE BIT(10)
+#define SIER_FEIE BIT(11)
+#define SIER_AM0F BIT(12)
+#define SCFGR1_RXSTALL BIT(1)
+#define SCFGR1_TXDSTALL BIT(2)
+#define SCFGR2_FILTSDA_SHIFT 24
+#define SCFGR2_FILTSCL_SHIFT 16
+#define SCFGR2_CLKHOLD(x) (x)
+#define SCFGR2_FILTSDA(x) ((x) << SCFGR2_FILTSDA_SHIFT)
+#define SCFGR2_FILTSCL(x) ((x) << SCFGR2_FILTSCL_SHIFT)
+#define SASR_READ_REQ 0x1
+#define SLAVE_INT_FLAG (SIER_TDIE | SIER_RDIE | SIER_AVIE | \
+ SIER_SDIE | SIER_BEIE)
#define I2C_CLK_RATIO 2
#define CHUNK_DATA 256
#define I2C_PM_TIMEOUT 10 /* ms */
+#define I2C_DMA_THRESHOLD 8 /* bytes */
enum lpi2c_imx_mode {
STANDARD, /* 100+Kbps */
@@ -91,6 +147,24 @@ enum lpi2c_imx_pincfg {
FOUR_PIN_PP,
};
+struct lpi2c_imx_dma {
+ bool using_pio_mode;
+ u8 rx_cmd_buf_len;
+ u8 *dma_buf;
+ u16 *rx_cmd_buf;
+ unsigned int dma_len;
+ unsigned int tx_burst_num;
+ unsigned int rx_burst_num;
+ unsigned long dma_msg_flag;
+ resource_size_t phy_addr;
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_data_dir;
+ enum dma_transfer_direction dma_transfer_dir;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+};
+
struct lpi2c_imx_struct {
struct i2c_adapter adapter;
int num_clks;
@@ -108,6 +182,9 @@ struct lpi2c_imx_struct {
unsigned int rxfifosize;
enum lpi2c_imx_mode mode;
struct i2c_bus_recovery_info rinfo;
+ bool can_use_dma;
+ struct lpi2c_imx_dma *dma;
+ struct i2c_client *target;
};
static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
@@ -305,7 +382,7 @@ static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
return 0;
}
-static int lpi2c_imx_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+static int lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
{
unsigned long time_left;
@@ -451,6 +528,425 @@ static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx,
lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
}
+static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
+{
+ if (!lpi2c_imx->can_use_dma)
+ return false;
+
+ /*
+ * When the length of data is less than I2C_DMA_THRESHOLD,
+ * cpu mode is used directly to avoid low performance.
+ */
+ return !(msg->len < I2C_DMA_THRESHOLD);
+}
+
+static int lpi2c_imx_pio_xfer(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msg)
+{
+ reinit_completion(&lpi2c_imx->complete);
+
+ if (msg->flags & I2C_M_RD)
+ lpi2c_imx_read(lpi2c_imx, msg);
+ else
+ lpi2c_imx_write(lpi2c_imx, msg);
+
+ return lpi2c_imx_pio_msg_complete(lpi2c_imx);
+}
+
+static int lpi2c_imx_dma_timeout_calculate(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long time = 0;
+
+ time = 8 * lpi2c_imx->dma->dma_len * 1000 / lpi2c_imx->bitrate;
+
+ /* Add extra second for scheduler related activities */
+ time += 1;
+
+ /* Double calculated time */
+ return msecs_to_jiffies(time * MSEC_PER_SEC);
+}
+
+static int lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ u16 rx_remain = dma->dma_len;
+ int cmd_num;
+ u16 temp;
+
+ /*
+ * Calculate the number of rx command words via the DMA TX channel
+ * writing into command register based on the i2c msg len, and build
+ * the rx command words buffer.
+ */
+ cmd_num = DIV_ROUND_UP(rx_remain, CHUNK_DATA);
+ dma->rx_cmd_buf = kcalloc(cmd_num, sizeof(u16), GFP_KERNEL);
+ dma->rx_cmd_buf_len = cmd_num * sizeof(u16);
+
+ if (!dma->rx_cmd_buf) {
+ dev_err(&lpi2c_imx->adapter.dev, "Alloc RX cmd buffer failed\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < cmd_num ; i++) {
+ temp = rx_remain > CHUNK_DATA ? CHUNK_DATA - 1 : rx_remain - 1;
+ temp |= (RECV_DATA << 8);
+ rx_remain -= CHUNK_DATA;
+ dma->rx_cmd_buf[i] = temp;
+ }
+
+ return 0;
+}
+
+static int lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long time_left, time;
+
+ time = lpi2c_imx_dma_timeout_calculate(lpi2c_imx);
+ time_left = wait_for_completion_timeout(&lpi2c_imx->complete, time);
+ if (time_left == 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "I/O Error in DMA Data Transfer\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void lpi2c_dma_unmap(struct lpi2c_imx_dma *dma)
+{
+ struct dma_chan *chan = dma->dma_data_dir == DMA_FROM_DEVICE
+ ? dma->chan_rx : dma->chan_tx;
+
+ dma_unmap_single(chan->device->dev, dma->dma_addr,
+ dma->dma_len, dma->dma_data_dir);
+
+ dma->dma_data_dir = DMA_NONE;
+}
+
+static void lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_dma *dma)
+{
+ dmaengine_terminate_sync(dma->chan_tx);
+ dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+}
+
+static void lpi2c_cleanup_dma(struct lpi2c_imx_dma *dma)
+{
+ if (dma->dma_data_dir == DMA_FROM_DEVICE)
+ dmaengine_terminate_sync(dma->chan_rx);
+ else if (dma->dma_data_dir == DMA_TO_DEVICE)
+ dmaengine_terminate_sync(dma->chan_tx);
+
+ lpi2c_dma_unmap(dma);
+}
+
+static void lpi2c_dma_callback(void *data)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = (struct lpi2c_imx_struct *)data;
+
+ complete(&lpi2c_imx->complete);
+}
+
+static int lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct dma_async_tx_descriptor *rx_cmd_desc;
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ struct dma_chan *txchan = dma->chan_tx;
+ dma_cookie_t cookie;
+
+ dma->dma_tx_addr = dma_map_single(txchan->device->dev,
+ dma->rx_cmd_buf, dma->rx_cmd_buf_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(txchan->device->dev, dma->dma_tx_addr)) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
+ return -EINVAL;
+ }
+
+ rx_cmd_desc = dmaengine_prep_slave_single(txchan, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rx_cmd_desc) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
+ goto desc_prepare_err_exit;
+ }
+
+ cookie = dmaengine_submit(rx_cmd_desc);
+ if (dma_submit_error(cookie)) {
+ dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
+ goto submit_err_exit;
+ }
+
+ dma_async_issue_pending(txchan);
+
+ return 0;
+
+desc_prepare_err_exit:
+ dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+ return -EINVAL;
+
+submit_err_exit:
+ dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+ dmaengine_desc_free(rx_cmd_desc);
+ return -EINVAL;
+}
+
+static int lpi2c_dma_submit(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ chan = dma->chan_rx;
+ dma->dma_data_dir = DMA_FROM_DEVICE;
+ dma->dma_transfer_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = dma->chan_tx;
+ dma->dma_data_dir = DMA_TO_DEVICE;
+ dma->dma_transfer_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma->dma_addr = dma_map_single(chan->device->dev,
+ dma->dma_buf, dma->dma_len, dma->dma_data_dir);
+ if (dma_mapping_error(chan->device->dev, dma->dma_addr)) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
+ return -EINVAL;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma->dma_addr,
+ dma->dma_len, dma->dma_transfer_dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
+ goto desc_prepare_err_exit;
+ }
+
+ reinit_completion(&lpi2c_imx->complete);
+ desc->callback = lpi2c_dma_callback;
+ desc->callback_param = lpi2c_imx;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
+ goto submit_err_exit;
+ }
+
+ /* Can't switch to PIO mode when DMA have started transfer */
+ dma->using_pio_mode = false;
+
+ dma_async_issue_pending(chan);
+
+ return 0;
+
+desc_prepare_err_exit:
+ lpi2c_dma_unmap(dma);
+ return -EINVAL;
+
+submit_err_exit:
+ lpi2c_dma_unmap(dma);
+ dmaengine_desc_free(desc);
+ return -EINVAL;
+}
+
+static int lpi2c_imx_find_max_burst_num(unsigned int fifosize, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = fifosize / 2; i > 0; i--)
+ if (!(len % i))
+ break;
+
+ return i;
+}
+
+/*
+ * For a highest DMA efficiency, tx/rx burst number should be calculated according
+ * to the FIFO depth.
+ */
+static void lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ unsigned int cmd_num;
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ /*
+ * One RX cmd word can trigger DMA receive no more than 256 bytes.
+ * The number of RX cmd words should be calculated based on the data
+ * length.
+ */
+ cmd_num = DIV_ROUND_UP(dma->dma_len, CHUNK_DATA);
+ dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
+ cmd_num);
+ dma->rx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->rxfifosize,
+ dma->dma_len);
+ } else {
+ dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
+ dma->dma_len);
+ }
+}
+
+static int lpi2c_dma_config(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ struct dma_slave_config rx = {}, tx = {};
+ int ret;
+
+ lpi2c_imx_dma_burst_num_calculate(lpi2c_imx);
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
+ tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ tx.dst_maxburst = dma->tx_burst_num;
+ tx.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &tx);
+ if (ret < 0)
+ return ret;
+
+ rx.src_addr = dma->phy_addr + LPI2C_MRDR;
+ rx.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ rx.src_maxburst = dma->rx_burst_num;
+ rx.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &rx);
+ if (ret < 0)
+ return ret;
+ } else {
+ tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
+ tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ tx.dst_maxburst = dma->tx_burst_num;
+ tx.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &tx);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lpi2c_dma_enable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ /*
+ * TX interrupt will be triggered when the number of words in
+ * the transmit FIFO is equal or less than TX watermark.
+ * RX interrupt will be triggered when the number of words in
+ * the receive FIFO is greater than RX watermark.
+ * In order to trigger the DMA interrupt, TX watermark should be
+ * set equal to the DMA TX burst number but RX watermark should
+ * be set less than the DMA RX burst number.
+ */
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ /* Set I2C TX/RX watermark */
+ writel(dma->tx_burst_num | (dma->rx_burst_num - 1) << 16,
+ lpi2c_imx->base + LPI2C_MFCR);
+ /* Enable I2C DMA TX/RX function */
+ writel(MDER_TDDE | MDER_RDDE, lpi2c_imx->base + LPI2C_MDER);
+ } else {
+ /* Set I2C TX watermark */
+ writel(dma->tx_burst_num, lpi2c_imx->base + LPI2C_MFCR);
+ /* Enable I2C DMA TX function */
+ writel(MDER_TDDE, lpi2c_imx->base + LPI2C_MDER);
+ }
+
+ /* Enable NACK detected */
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_NDIE);
+};
+
+/*
+ * When lpi2c is in TX DMA mode we can use one DMA TX channel to write
+ * data word into TXFIFO, but in RX DMA mode it is different.
+ *
+ * The LPI2C MTDR register is a command data and transmit data register.
+ * Bits 8-10 are the command data field and Bits 0-7 are the transmit
+ * data field. When the LPI2C master needs to read data, the number of
+ * bytes to read should be set in the command field and RECV_DATA should
+ * be set into the command data field to receive (DATA[7:0] + 1) bytes.
+ * The recv data command word is made of RECV_DATA in the command data
+ * field and the number of bytes to read in transmit data field. When the
+ * length of data to be read exceeds 256 bytes, recv data command word
+ * needs to be written to TXFIFO multiple times.
+ *
+ * So when in RX DMA mode, the TX channel also must to be configured to
+ * send RX command words and the RX command word must be set in advance
+ * before transmitting.
+ */
+static int lpi2c_imx_dma_xfer(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msg)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ int ret;
+
+ /* When DMA mode fails before transferring, CPU mode can be used. */
+ dma->using_pio_mode = true;
+
+ dma->dma_len = msg->len;
+ dma->dma_msg_flag = msg->flags;
+ dma->dma_buf = i2c_get_dma_safe_msg_buf(msg, I2C_DMA_THRESHOLD);
+ if (!dma->dma_buf)
+ return -ENOMEM;
+
+ ret = lpi2c_dma_config(lpi2c_imx);
+ if (ret) {
+ dev_err(&lpi2c_imx->adapter.dev, "Failed to configure DMA (%d)\n", ret);
+ goto disable_dma;
+ }
+
+ lpi2c_dma_enable(lpi2c_imx);
+
+ ret = lpi2c_dma_submit(lpi2c_imx);
+ if (ret) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA submission failed (%d)\n", ret);
+ goto disable_dma;
+ }
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ ret = lpi2c_imx_alloc_rx_cmd_buf(lpi2c_imx);
+ if (ret)
+ goto disable_cleanup_data_dma;
+
+ ret = lpi2c_dma_rx_cmd_submit(lpi2c_imx);
+ if (ret)
+ goto disable_cleanup_data_dma;
+ }
+
+ ret = lpi2c_imx_dma_msg_complete(lpi2c_imx);
+ if (ret)
+ goto disable_cleanup_all_dma;
+
+ /* When encountering NACK in transfer, clean up all DMA transfers */
+ if ((readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) && !ret) {
+ ret = -EIO;
+ goto disable_cleanup_all_dma;
+ }
+
+ if (dma->dma_msg_flag & I2C_M_RD)
+ dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+ lpi2c_dma_unmap(dma);
+
+ goto disable_dma;
+
+disable_cleanup_all_dma:
+ if (dma->dma_msg_flag & I2C_M_RD)
+ lpi2c_cleanup_rx_cmd_dma(dma);
+disable_cleanup_data_dma:
+ lpi2c_cleanup_dma(dma);
+disable_dma:
+ /* Disable I2C DMA function */
+ writel(0, lpi2c_imx->base + LPI2C_MDER);
+
+ if (dma->dma_msg_flag & I2C_M_RD)
+ kfree(dma->rx_cmd_buf);
+
+ if (ret)
+ i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, false);
+ else
+ i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, true);
+
+ return ret;
+}
+
static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
@@ -477,12 +973,14 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
lpi2c_imx->msglen = msgs[i].len;
init_completion(&lpi2c_imx->complete);
- if (msgs[i].flags & I2C_M_RD)
- lpi2c_imx_read(lpi2c_imx, &msgs[i]);
- else
- lpi2c_imx_write(lpi2c_imx, &msgs[i]);
+ if (is_use_dma(lpi2c_imx, &msgs[i])) {
+ result = lpi2c_imx_dma_xfer(lpi2c_imx, &msgs[i]);
+ if (result && lpi2c_imx->dma->using_pio_mode)
+ result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
+ } else {
+ result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
+ }
- result = lpi2c_imx_msg_complete(lpi2c_imx);
if (result)
goto stop;
@@ -510,9 +1008,56 @@ disable:
return (result < 0) ? result : num;
}
-static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+static irqreturn_t lpi2c_imx_target_isr(struct lpi2c_imx_struct *lpi2c_imx,
+ u32 ssr, u32 sier_filter)
+{
+ u8 value;
+ u32 sasr;
+
+ /* Arbitration lost */
+ if (sier_filter & SSR_BEF) {
+ writel(0, lpi2c_imx->base + LPI2C_SIER);
+ return IRQ_HANDLED;
+ }
+
+ /* Address detected */
+ if (sier_filter & SSR_AVF) {
+ sasr = readl(lpi2c_imx->base + LPI2C_SASR);
+ if (SASR_READ_REQ & sasr) {
+ /* Read request */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_REQUESTED, &value);
+ writel(value, lpi2c_imx->base + LPI2C_STDR);
+ goto ret;
+ } else {
+ /* Write request */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_REQUESTED, &value);
+ }
+ }
+
+ if (sier_filter & SSR_SDF)
+ /* STOP */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_STOP, &value);
+
+ if (sier_filter & SSR_TDF) {
+ /* Target send data */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_PROCESSED, &value);
+ writel(value, lpi2c_imx->base + LPI2C_STDR);
+ }
+
+ if (sier_filter & SSR_RDF) {
+ /* Target receive data */
+ value = readl(lpi2c_imx->base + LPI2C_SRDR);
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_RECEIVED, &value);
+ }
+
+ret:
+ /* Clear SSR */
+ writel(ssr & SSR_CLEAR_BITS, lpi2c_imx->base + LPI2C_SSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lpi2c_imx_master_isr(struct lpi2c_imx_struct *lpi2c_imx)
{
- struct lpi2c_imx_struct *lpi2c_imx = dev_id;
unsigned int enabled;
unsigned int temp;
@@ -532,6 +1077,124 @@ static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+
+ if (lpi2c_imx->target) {
+ u32 scr = readl(lpi2c_imx->base + LPI2C_SCR);
+ u32 ssr = readl(lpi2c_imx->base + LPI2C_SSR);
+ u32 sier_filter = ssr & readl(lpi2c_imx->base + LPI2C_SIER);
+
+ /*
+ * The target is enabled and an interrupt has been triggered.
+ * Enter the target's irq handler.
+ */
+ if ((scr & SCR_SEN) && sier_filter)
+ return lpi2c_imx_target_isr(lpi2c_imx, ssr, sier_filter);
+ }
+
+ /*
+ * Otherwise the interrupt has been triggered by the master.
+ * Enter the master's irq handler.
+ */
+ return lpi2c_imx_master_isr(lpi2c_imx);
+}
+
+static void lpi2c_imx_target_init(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ u32 temp;
+
+ /* reset target module */
+ writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
+ writel(0, lpi2c_imx->base + LPI2C_SCR);
+
+ /* Set target address */
+ writel((lpi2c_imx->target->addr << 1), lpi2c_imx->base + LPI2C_SAMR);
+
+ writel(SCFGR1_RXSTALL | SCFGR1_TXDSTALL, lpi2c_imx->base + LPI2C_SCFGR1);
+
+ /*
+ * set SCFGR2: FILTSDA, FILTSCL and CLKHOLD
+ *
+ * FILTSCL/FILTSDA can eliminate signal skew. It should generally be
+ * set to the same value and should be set >= 50ns.
+ *
+ * CLKHOLD is only used when clock stretching is enabled, but it will
+ * extend the clock stretching to ensure there is an additional delay
+ * between the target driving SDA and the target releasing the SCL pin.
+ *
+ * CLKHOLD setting is crucial for lpi2c target. When master read data
+ * from target, if there is a delay caused by cpu idle, excessive load,
+ * or other delays between two bytes in one message transmission, it
+ * will cause a short interval time between the driving SDA signal and
+ * releasing SCL signal. The lpi2c master will mistakenly think it is a stop
+ * signal resulting in an arbitration failure. This issue can be avoided
+ * by setting CLKHOLD.
+ *
+ * In order to ensure lpi2c function normally when the lpi2c speed is as
+ * low as 100kHz, CLKHOLD should be set to 3 and it is also compatible with
+ * higher clock frequency like 400kHz and 1MHz.
+ */
+ temp = SCFGR2_FILTSDA(2) | SCFGR2_FILTSCL(2) | SCFGR2_CLKHOLD(3);
+ writel(temp, lpi2c_imx->base + LPI2C_SCFGR2);
+
+ /*
+ * Enable module:
+ * SCR_FILTEN can enable digital filter and output delay counter for LPI2C
+ * target mode. So SCR_FILTEN need be asserted when enable SDA/SCL FILTER
+ * and CLKHOLD.
+ */
+ writel(SCR_SEN | SCR_FILTEN, lpi2c_imx->base + LPI2C_SCR);
+
+ /* Enable interrupt from i2c module */
+ writel(SLAVE_INT_FLAG, lpi2c_imx->base + LPI2C_SIER);
+}
+
+static int lpi2c_imx_register_target(struct i2c_client *client)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
+ int ret;
+
+ if (lpi2c_imx->target)
+ return -EBUSY;
+
+ lpi2c_imx->target = client;
+
+ ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
+ if (ret < 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "failed to resume i2c controller");
+ return ret;
+ }
+
+ lpi2c_imx_target_init(lpi2c_imx);
+
+ return 0;
+}
+
+static int lpi2c_imx_unregister_target(struct i2c_client *client)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
+ int ret;
+
+ if (!lpi2c_imx->target)
+ return -EINVAL;
+
+ /* Reset target address. */
+ writel(0, lpi2c_imx->base + LPI2C_SAMR);
+
+ writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
+ writel(0, lpi2c_imx->base + LPI2C_SCR);
+
+ lpi2c_imx->target = NULL;
+
+ ret = pm_runtime_put_sync(lpi2c_imx->adapter.dev.parent);
+ if (ret < 0)
+ dev_err(&lpi2c_imx->adapter.dev, "failed to suspend i2c controller");
+
+ return ret;
+}
+
static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
struct platform_device *pdev)
{
@@ -546,6 +1209,58 @@ static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
return 0;
}
+static void dma_exit(struct device *dev, struct lpi2c_imx_dma *dma)
+{
+ if (dma->chan_rx)
+ dma_release_channel(dma->chan_rx);
+
+ if (dma->chan_tx)
+ dma_release_channel(dma->chan_tx);
+
+ devm_kfree(dev, dma);
+}
+
+static int lpi2c_dma_init(struct device *dev, dma_addr_t phy_addr)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
+ struct lpi2c_imx_dma *dma;
+ int ret;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->phy_addr = phy_addr;
+
+ /* Prepare for TX DMA: */
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
+ ret = PTR_ERR(dma->chan_tx);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(dev, "can't request DMA tx channel (%d)\n", ret);
+ dma->chan_tx = NULL;
+ goto dma_exit;
+ }
+
+ /* Prepare for RX DMA: */
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
+ ret = PTR_ERR(dma->chan_rx);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(dev, "can't request DMA rx channel (%d)\n", ret);
+ dma->chan_rx = NULL;
+ goto dma_exit;
+ }
+
+ lpi2c_imx->can_use_dma = true;
+ lpi2c_imx->dma = dma;
+ return 0;
+
+dma_exit:
+ dma_exit(dev, dma);
+ return ret;
+}
+
static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
@@ -555,6 +1270,8 @@ static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
static const struct i2c_algorithm lpi2c_imx_algo = {
.master_xfer = lpi2c_imx_xfer,
.functionality = lpi2c_imx_func,
+ .reg_target = lpi2c_imx_register_target,
+ .unreg_target = lpi2c_imx_unregister_target,
};
static const struct of_device_id lpi2c_imx_of_match[] = {
@@ -566,6 +1283,8 @@ MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
static int lpi2c_imx_probe(struct platform_device *pdev)
{
struct lpi2c_imx_struct *lpi2c_imx;
+ struct resource *res;
+ dma_addr_t phy_addr;
unsigned int temp;
int irq, ret;
@@ -573,7 +1292,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (!lpi2c_imx)
return -ENOMEM;
- lpi2c_imx->base = devm_platform_ioremap_resource(pdev, 0);
+ lpi2c_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(lpi2c_imx->base))
return PTR_ERR(lpi2c_imx->base);
@@ -587,6 +1306,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
+ phy_addr = (dma_addr_t)res->start;
ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
if (ret < 0)
@@ -598,7 +1318,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret)
lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
- ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, IRQF_NO_SUSPEND,
pdev->name, lpi2c_imx);
if (ret)
return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", irq);
@@ -640,6 +1360,14 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER)
goto rpm_disable;
+ /* Init DMA */
+ ret = lpi2c_dma_init(&pdev->dev, phy_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto rpm_disable;
+ dev_info(&pdev->dev, "use pio mode\n");
+ }
+
ret = i2c_add_adapter(&lpi2c_imx->adapter);
if (ret)
goto rpm_disable;
@@ -694,9 +1422,68 @@ static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
return 0;
}
+static int __maybe_unused lpi2c_suspend_noirq(struct device *dev)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused lpi2c_resume_noirq(struct device *dev)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * If the I2C module powers down during system suspend,
+ * the register values will be lost. Therefore, reinitialize
+ * the target when the system resumes.
+ */
+ if (lpi2c_imx->target)
+ lpi2c_imx_target_init(lpi2c_imx);
+
+ return 0;
+}
+
+static int lpi2c_suspend(struct device *dev)
+{
+ /*
+ * Some I2C devices may need the I2C controller to remain active
+ * during resume_noirq() or suspend_noirq(). If the controller is
+ * autosuspended, there is no way to wake it up once runtime PM is
+ * disabled (in suspend_late()).
+ *
+ * During system resume, the I2C controller will be available only
+ * after runtime PM is re-enabled (in resume_early()). However, this
+ * may be too late for some devices.
+ *
+ * Wake up the controller in the suspend() callback while runtime PM
+ * is still enabled. The I2C controller will remain available until
+ * the suspend_noirq() callback (pm_runtime_force_suspend()) is
+ * called. During resume, the I2C controller can be restored by the
+ * resume_noirq() callback (pm_runtime_force_resume()).
+ *
+ * Finally, the resume() callback re-enables autosuspend, ensuring
+ * the I2C controller remains available until the system enters
+ * suspend_noirq() and from resume_noirq().
+ */
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int lpi2c_resume(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
static const struct dev_pm_ops lpi2c_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(lpi2c_suspend_noirq,
+ lpi2c_resume_noirq)
+ SYSTEM_SLEEP_PM_OPS(lpi2c_suspend, lpi2c_resume)
SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend,
lpi2c_runtime_resume, NULL)
};
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 5c9a8dfbc4a0..ee0d25b498cb 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -397,17 +397,16 @@ static void i2c_imx_reset_regs(struct imx_i2c_struct *i2c_imx)
}
/* Functions for DMA support */
-static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
- dma_addr_t phy_addr)
+static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dma_addr_t phy_addr)
{
struct imx_i2c_dma *dma;
struct dma_slave_config dma_sconfig;
- struct device *dev = &i2c_imx->adapter.dev;
+ struct device *dev = i2c_imx->adapter.dev.parent;
int ret;
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
- return;
+ return -ENOMEM;
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
@@ -452,7 +451,7 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
- return;
+ return 0;
fail_rx:
dma_release_channel(dma->chan_rx);
@@ -460,6 +459,8 @@ fail_tx:
dma_release_channel(dma->chan_tx);
fail_al:
devm_kfree(dev, dma);
+
+ return ret;
}
static void i2c_imx_dma_callback(void *arg)
@@ -621,8 +622,8 @@ static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx)
return 0;
}
-static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
- unsigned int i2c_clk_rate)
+static int i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
+ unsigned int i2c_clk_rate)
{
struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
unsigned int div;
@@ -637,7 +638,11 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
/* Divider value calculation */
if (i2c_imx->cur_clk == i2c_clk_rate)
- return;
+ return 0;
+
+ /* Keep the denominator of the following program always NOT equal to 0. */
+ if (!(i2c_clk_rate / 2))
+ return -EINVAL;
i2c_imx->cur_clk = i2c_clk_rate;
@@ -668,6 +673,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
dev_dbg(&i2c_imx->adapter.dev, "IFDR[IC]=0x%x, REAL DIV=%d\n",
i2c_clk_div[i].val, i2c_clk_div[i].div);
#endif
+
+ return 0;
}
static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
@@ -677,11 +684,12 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
struct imx_i2c_struct *i2c_imx = container_of(nb,
struct imx_i2c_struct,
clk_change_nb);
+ int ret = 0;
if (action & POST_RATE_CHANGE)
- i2c_imx_set_clk(i2c_imx, ndata->new_rate);
+ ret = i2c_imx_set_clk(i2c_imx, ndata->new_rate);
- return NOTIFY_OK;
+ return notifier_from_errno(ret);
}
static int i2c_imx_start(struct imx_i2c_struct *i2c_imx, bool atomic)
@@ -1760,7 +1768,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
goto rpm_disable;
/* Request IRQ */
- ret = request_irq(irq, i2c_imx_isr, IRQF_SHARED, pdev->name, i2c_imx);
+ ret = request_irq(irq, i2c_imx_isr, IRQF_SHARED | IRQF_NO_SUSPEND,
+ pdev->name, i2c_imx);
if (ret) {
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
goto rpm_disable;
@@ -1780,7 +1789,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx->bitrate = pdata->bitrate;
i2c_imx->clk_change_nb.notifier_call = i2c_imx_clk_notifier_call;
clk_notifier_register(i2c_imx->clk, &i2c_imx->clk_change_nb);
- i2c_imx_set_clk(i2c_imx, clk_get_rate(i2c_imx->clk));
+ ret = i2c_imx_set_clk(i2c_imx, clk_get_rate(i2c_imx->clk));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get I2C clock\n");
+ goto clk_notifier_unregister;
+ }
i2c_imx_reset_regs(i2c_imx);
@@ -1790,6 +1803,22 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER)
goto clk_notifier_unregister;
+ /*
+ * DMA mode should be optional for I2C, when encountering DMA errors,
+ * no need to exit I2C probe. Only print warning to show DMA error and
+ * use PIO mode directly to ensure I2C bus available as much as possible.
+ */
+ ret = i2c_imx_dma_request(i2c_imx, phy_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto clk_notifier_unregister;
+ else if (ret == -ENODEV)
+ dev_dbg(&pdev->dev, "Only use PIO mode\n");
+ else
+ dev_warn(&pdev->dev, "Failed to setup DMA (%pe), only use PIO mode\n",
+ ERR_PTR(ret));
+ }
+
/* Add I2C adapter */
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
if (ret < 0)
@@ -1804,9 +1833,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx->adapter.name);
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
- /* Init DMA config if supported */
- i2c_imx_dma_request(i2c_imx, phy_addr);
-
return 0; /* Return OK */
clk_notifier_unregister:
@@ -1858,8 +1884,7 @@ static int i2c_imx_runtime_suspend(struct device *dev)
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
clk_disable(i2c_imx->clk);
-
- return 0;
+ return pinctrl_pm_select_sleep_state(dev);
}
static int i2c_imx_runtime_resume(struct device *dev)
@@ -1867,6 +1892,10 @@ static int i2c_imx_runtime_resume(struct device *dev)
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
int ret;
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
ret = clk_enable(i2c_imx->clk);
if (ret)
dev_err(dev, "can't enable I2C clock, ret=%d\n", ret);
@@ -1874,7 +1903,43 @@ static int i2c_imx_runtime_resume(struct device *dev)
return ret;
}
+static int i2c_imx_suspend(struct device *dev)
+{
+ /*
+ * Some I2C devices may need the I2C controller to remain active
+ * during resume_noirq() or suspend_noirq(). If the controller is
+ * autosuspended, there is no way to wake it up once runtime PM is
+ * disabled (in suspend_late()).
+ *
+ * During system resume, the I2C controller will be available only
+ * after runtime PM is re-enabled (in resume_early()). However, this
+ * may be too late for some devices.
+ *
+ * Wake up the controller in the suspend() callback while runtime PM
+ * is still enabled. The I2C controller will remain available until
+ * the suspend_noirq() callback (pm_runtime_force_suspend()) is
+ * called. During resume, the I2C controller can be restored by the
+ * resume_noirq() callback (pm_runtime_force_resume()).
+ *
+ * Finally, the resume() callback re-enables autosuspend, ensuring
+ * the I2C controller remains available until the system enters
+ * suspend_noirq() and from resume_noirq().
+ */
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int i2c_imx_resume(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
static const struct dev_pm_ops i2c_imx_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SYSTEM_SLEEP_PM_OPS(i2c_imx_suspend, i2c_imx_resume)
RUNTIME_PM_OPS(i2c_imx_runtime_suspend, i2c_imx_runtime_resume, NULL)
};
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 2b3b65ef2900..a2ac992f9cb0 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -291,9 +291,9 @@ static int smbus_sch_probe(struct platform_device *pdev)
/* Set up the sysfs linkage to our parent device */
priv->adapter.dev.parent = dev;
- priv->adapter.owner = THIS_MODULE,
- priv->adapter.class = I2C_CLASS_HWMON,
- priv->adapter.algo = &smbus_algorithm,
+ priv->adapter.owner = THIS_MODULE;
+ priv->adapter.class = I2C_CLASS_HWMON;
+ priv->adapter.algo = &smbus_algorithm;
snprintf(priv->adapter.name, sizeof(priv->adapter.name),
"SMBus SCH adapter at %04x", (unsigned short)res->start);
diff --git a/drivers/i2c/busses/i2c-keba.c b/drivers/i2c/busses/i2c-keba.c
index 759732a07ef0..7b9ed2592f5b 100644
--- a/drivers/i2c/busses/i2c-keba.c
+++ b/drivers/i2c/busses/i2c-keba.c
@@ -464,12 +464,8 @@ static void ki2c_unregister_devices(struct ki2c *ki2c)
{
int i;
- for (i = 0; i < ki2c->client_size; i++) {
- struct i2c_client *client = ki2c->client[i];
-
- if (client)
- i2c_unregister_device(client);
- }
+ for (i = 0; i < ki2c->client_size; i++)
+ i2c_unregister_device(ki2c->client[i]);
}
static int ki2c_register_devices(struct ki2c *ki2c)
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 482a0074d448..3ca08b8ef8af 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -263,6 +263,265 @@ static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
#define I2C_FREQ_MIN_HZ 10000
#define I2C_FREQ_MAX_HZ I2C_MAX_FAST_MODE_PLUS_FREQ
+struct smb_timing_t {
+ u32 core_clk;
+ u8 hldt;
+ u8 dbcnt;
+ u16 sclfrq;
+ u8 scllt;
+ u8 sclht;
+ bool fast_mode;
+};
+
+static struct smb_timing_t smb_timing_100khz[] = {
+ {
+ .core_clk = 100000000, .hldt = 0x2A, .dbcnt = 0x4,
+ .sclfrq = 0xFB, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 62500000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x9D, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 50000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x7E, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 48000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x79, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 40000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x65, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 30000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x4C, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 29000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x49, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 26000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x42, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 25000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x3F, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 24000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x3D, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 20000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x33, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 16180000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x29, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 15000000, .hldt = 0x23, .dbcnt = 0x1,
+ .sclfrq = 0x26, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 13000000, .hldt = 0x1D, .dbcnt = 0x1,
+ .sclfrq = 0x21, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 12000000, .hldt = 0x1B, .dbcnt = 0x1,
+ .sclfrq = 0x1F, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 10000000, .hldt = 0x18, .dbcnt = 0x1,
+ .sclfrq = 0x1A, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 9000000, .hldt = 0x16, .dbcnt = 0x1,
+ .sclfrq = 0x17, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 8090000, .hldt = 0x14, .dbcnt = 0x1,
+ .sclfrq = 0x15, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 7500000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x13, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 6500000, .hldt = 0xE, .dbcnt = 0x1,
+ .sclfrq = 0x11, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 4000000, .hldt = 0x9, .dbcnt = 0x1,
+ .sclfrq = 0xB, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+};
+
+static struct smb_timing_t smb_timing_400khz[] = {
+ {
+ .core_clk = 100000000, .hldt = 0x2A, .dbcnt = 0x3,
+ .sclfrq = 0x0, .scllt = 0x47, .sclht = 0x35,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 62500000, .hldt = 0x2A, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0x2C, .sclht = 0x22,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 50000000, .hldt = 0x21, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x24, .sclht = 0x1B,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 48000000, .hldt = 0x1E, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x24, .sclht = 0x19,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 40000000, .hldt = 0x1B, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x1E, .sclht = 0x14,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 33000000, .hldt = 0x15, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x19, .sclht = 0x11,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 30000000, .hldt = 0x15, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x19, .sclht = 0xD,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 29000000, .hldt = 0x11, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x15, .sclht = 0x10,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 26000000, .hldt = 0x10, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x13, .sclht = 0xE,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 25000000, .hldt = 0xF, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x13, .sclht = 0xD,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 24000000, .hldt = 0xD, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x12, .sclht = 0xD,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 20000000, .hldt = 0xB, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xF, .sclht = 0xA,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 16180000, .hldt = 0xA, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xC, .sclht = 0x9,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 15000000, .hldt = 0x9, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xB, .sclht = 0x8,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 13000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xA, .sclht = 0x7,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 12000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xA, .sclht = 0x6,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 10000000, .hldt = 0x6, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x8, .sclht = 0x5,
+ .fast_mode = true,
+ },
+};
+
+static struct smb_timing_t smb_timing_1000khz[] = {
+ {
+ .core_clk = 100000000, .hldt = 0x15, .dbcnt = 0x4,
+ .sclfrq = 0x0, .scllt = 0x1C, .sclht = 0x15,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 62500000, .hldt = 0xF, .dbcnt = 0x3,
+ .sclfrq = 0x0, .scllt = 0x11, .sclht = 0xE,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 50000000, .hldt = 0xA, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xE, .sclht = 0xB,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 48000000, .hldt = 0x9, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xD, .sclht = 0xB,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 41000000, .hldt = 0x9, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xC, .sclht = 0x9,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 40000000, .hldt = 0x8, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xB, .sclht = 0x9,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 33000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xA, .sclht = 0x7,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 25000000, .hldt = 0x4, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x7, .sclht = 0x6,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 24000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x8, .sclht = 0x5,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 20000000, .hldt = 0x4, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x6, .sclht = 0x4,
+ .fast_mode = true,
+ },
+};
+
struct npcm_i2c_data {
u8 fifo_size;
u32 segctl_init_val;
@@ -1666,6 +1925,12 @@ static int npcm_i2c_int_master_handler(struct npcm_i2c *bus)
(FIELD_GET(NPCM_I2CCST3_EO_BUSY,
ioread8(bus->reg + NPCM_I2CCST3)))) {
npcm_i2c_irq_handle_eob(bus);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* reenable slave if it was enabled */
+ if (bus->slave)
+ iowrite8(bus->slave->addr | NPCM_I2CADDR_SAEN,
+ bus->reg + NPCM_I2CADDR1);
+#endif
return 0;
}
@@ -1805,102 +2070,45 @@ static void npcm_i2c_recovery_init(struct i2c_adapter *_adap)
*/
static int npcm_i2c_init_clk(struct npcm_i2c *bus, u32 bus_freq_hz)
{
- u32 k1 = 0;
- u32 k2 = 0;
- u8 dbnct = 0;
- u32 sclfrq = 0;
- u8 hldt = 7;
+ struct smb_timing_t *smb_timing;
+ u8 scl_table_cnt = 0, table_size = 0;
u8 fast_mode = 0;
- u32 src_clk_khz;
- u32 bus_freq_khz;
- src_clk_khz = bus->apb_clk / 1000;
- bus_freq_khz = bus_freq_hz / 1000;
bus->bus_freq = bus_freq_hz;
- /* 100KHz and below: */
- if (bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ) {
- sclfrq = src_clk_khz / (bus_freq_khz * 4);
-
- if (sclfrq < SCLFRQ_MIN || sclfrq > SCLFRQ_MAX)
- return -EDOM;
-
- if (src_clk_khz >= 40000)
- hldt = 17;
- else if (src_clk_khz >= 12500)
- hldt = 15;
- else
- hldt = 7;
- }
-
- /* 400KHz: */
- else if (bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) {
- sclfrq = 0;
+ switch (bus_freq_hz) {
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ smb_timing = smb_timing_100khz;
+ table_size = ARRAY_SIZE(smb_timing_100khz);
+ break;
+ case I2C_MAX_FAST_MODE_FREQ:
+ smb_timing = smb_timing_400khz;
+ table_size = ARRAY_SIZE(smb_timing_400khz);
fast_mode = I2CCTL3_400K_MODE;
-
- if (src_clk_khz < 7500)
- /* 400KHZ cannot be supported for core clock < 7.5MHz */
- return -EDOM;
-
- else if (src_clk_khz >= 50000) {
- k1 = 80;
- k2 = 48;
- hldt = 12;
- dbnct = 7;
- }
-
- /* Master or Slave with frequency > 25MHz */
- else if (src_clk_khz > 25000) {
- hldt = clk_coef(src_clk_khz, 300) + 7;
- k1 = clk_coef(src_clk_khz, 1600);
- k2 = clk_coef(src_clk_khz, 900);
- }
- }
-
- /* 1MHz: */
- else if (bus_freq_hz <= I2C_MAX_FAST_MODE_PLUS_FREQ) {
- sclfrq = 0;
+ break;
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
+ smb_timing = smb_timing_1000khz;
+ table_size = ARRAY_SIZE(smb_timing_1000khz);
fast_mode = I2CCTL3_400K_MODE;
-
- /* 1MHZ cannot be supported for core clock < 24 MHz */
- if (src_clk_khz < 24000)
- return -EDOM;
-
- k1 = clk_coef(src_clk_khz, 620);
- k2 = clk_coef(src_clk_khz, 380);
-
- /* Core clk > 40 MHz */
- if (src_clk_khz > 40000) {
- /*
- * Set HLDT:
- * SDA hold time: (HLDT-7) * T(CLK) >= 120
- * HLDT = 120/T(CLK) + 7 = 120 * FREQ(CLK) + 7
- */
- hldt = clk_coef(src_clk_khz, 120) + 7;
- } else {
- hldt = 7;
- dbnct = 2;
- }
+ break;
+ default:
+ return -EINVAL;
}
- /* Frequency larger than 1 MHz is not supported */
- else
- return -EINVAL;
+ for (scl_table_cnt = 0; scl_table_cnt < table_size; scl_table_cnt++)
+ if (bus->apb_clk >= smb_timing[scl_table_cnt].core_clk)
+ break;
- if (bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) {
- k1 = round_up(k1, 2);
- k2 = round_up(k2 + 1, 2);
- if (k1 < SCLFRQ_MIN || k1 > SCLFRQ_MAX ||
- k2 < SCLFRQ_MIN || k2 > SCLFRQ_MAX)
- return -EDOM;
- }
+ if (scl_table_cnt == table_size)
+ return -EINVAL;
/* write sclfrq value. bits [6:0] are in I2CCTL2 reg */
- iowrite8(FIELD_PREP(I2CCTL2_SCLFRQ6_0, sclfrq & 0x7F),
+ iowrite8(FIELD_PREP(I2CCTL2_SCLFRQ6_0, smb_timing[scl_table_cnt].sclfrq & 0x7F),
bus->reg + NPCM_I2CCTL2);
/* bits [8:7] are in I2CCTL3 reg */
- iowrite8(fast_mode | FIELD_PREP(I2CCTL3_SCLFRQ8_7, (sclfrq >> 7) & 0x3),
+ iowrite8(FIELD_PREP(I2CCTL3_SCLFRQ8_7, (smb_timing[scl_table_cnt].sclfrq >> 7) & 0x3) |
+ fast_mode,
bus->reg + NPCM_I2CCTL3);
/* Select Bank 0 to access NPCM_I2CCTL4/NPCM_I2CCTL5 */
@@ -1912,13 +2120,13 @@ static int npcm_i2c_init_clk(struct npcm_i2c *bus, u32 bus_freq_hz)
* k1 = 2 * SCLLT7-0 -> Low Time = k1 / 2
* k2 = 2 * SCLLT7-0 -> High Time = k2 / 2
*/
- iowrite8(k1 / 2, bus->reg + NPCM_I2CSCLLT);
- iowrite8(k2 / 2, bus->reg + NPCM_I2CSCLHT);
+ iowrite8(smb_timing[scl_table_cnt].scllt, bus->reg + NPCM_I2CSCLLT);
+ iowrite8(smb_timing[scl_table_cnt].sclht, bus->reg + NPCM_I2CSCLHT);
- iowrite8(dbnct, bus->reg + NPCM_I2CCTL5);
+ iowrite8(smb_timing[scl_table_cnt].dbcnt, bus->reg + NPCM_I2CCTL5);
}
- iowrite8(hldt, bus->reg + NPCM_I2CCTL4);
+ iowrite8(smb_timing[scl_table_cnt].hldt, bus->reg + NPCM_I2CCTL4);
/* Return to Bank 1, and stay there by default: */
npcm_i2c_select_bank(bus, I2C_BANK_1);
@@ -2035,7 +2243,7 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id)
}
static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
- u8 slave_addr, u16 nwrite, u16 nread,
+ u16 nwrite, u16 nread,
u8 *write_data, u8 *read_data,
bool use_PEC, bool use_read_block)
{
@@ -2043,7 +2251,6 @@ static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
bus->cmd_err = -EBUSY;
return false;
}
- bus->dest_addr = slave_addr << 1;
bus->wr_buf = write_data;
bus->wr_size = nwrite;
bus->wr_ind = 0;
@@ -2086,7 +2293,6 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
unsigned long time_left, flags;
u16 nwrite, nread;
u8 *write_data, *read_data;
- u8 slave_addr;
unsigned long timeout;
bool read_block = false;
bool read_PEC = false;
@@ -2099,7 +2305,6 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
msg0 = &msgs[0];
- slave_addr = msg0->addr;
if (msg0->flags & I2C_M_RD) { /* read */
nwrite = 0;
write_data = NULL;
@@ -2132,19 +2337,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
}
- /*
- * Adaptive TimeOut: estimated time in usec + 100% margin:
- * 2: double the timeout for clock stretching case
- * 9: bits per transaction (including the ack/nack)
- */
- timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
- timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec));
if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
return -EINVAL;
}
- time_left = jiffies + timeout + 1;
+ time_left = jiffies + bus->adap.timeout / bus->adap.retries + 1;
do {
/*
* we must clear slave address immediately when the bus is not
@@ -2163,6 +2361,21 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
} while (time_is_after_jiffies(time_left) && bus_busy);
/*
+ * Store the address early in a global position to ensure it is
+ * accessible for a potential call to i2c_recover_bus().
+ *
+ * Since the transfer might be a read operation, remove the I2C_M_RD flag
+ * from the bus->dest_addr for the i2c_recover_bus() call later.
+ *
+ * The i2c_recover_bus() uses the address in a write direction to recover
+ * the i2c bus if some error condition occurs.
+ *
+ * Remove the I2C_M_RD flag from the address since npcm_i2c_master_start_xmit()
+ * handles the read/write operation internally.
+ */
+ bus->dest_addr = i2c_8bit_addr_from_msg(msg0) & ~I2C_M_RD;
+
+ /*
* Check the BER (bus error) state, when ber_state is true, it means that the module
* detects the bus error which is caused by some factor like that the electricity
* noise occurs on the bus. Under this condition, the module is reset and the bus
@@ -2179,7 +2392,6 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
npcm_i2c_init_params(bus);
- bus->dest_addr = slave_addr;
bus->msgs = msgs;
bus->msgs_num = num;
bus->cmd_err = 0;
@@ -2189,9 +2401,17 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
npcm_i2c_int_enable(bus, true);
- if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
+ if (npcm_i2c_master_start_xmit(bus, nwrite, nread,
write_data, read_data, read_PEC,
read_block)) {
+ /*
+ * Adaptive TimeOut: estimated time in usec + 100% margin:
+ * 2: double the timeout for clock stretching case
+ * 9: bits per transaction (including the ack/nack)
+ */
+ timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
+ timeout = max_t(unsigned long, bus->adap.timeout / bus->adap.retries,
+ usecs_to_jiffies(timeout_usec));
time_left = wait_for_completion_timeout(&bus->cmd_complete,
timeout);
@@ -2317,7 +2537,12 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
adap = &bus->adap;
adap->owner = THIS_MODULE;
adap->retries = 3;
- adap->timeout = msecs_to_jiffies(35);
+ /*
+ * The users want to connect a lot of masters on the same bus.
+ * This timeout is used to determine the time it takes to take bus ownership.
+ * The transactions are very long, so waiting 35ms is not enough.
+ */
+ adap->timeout = 2 * HZ;
adap->algo = &npcm_i2c_algo;
adap->quirks = &npcm_i2c_quirks;
adap->algo_data = bus;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 7a22e1f46e60..7bbd478171e0 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -823,11 +823,9 @@ static int geni_i2c_probe(struct platform_device *pdev)
return gi2c->irq;
ret = geni_i2c_clk_map_idx(gi2c);
- if (ret) {
- dev_err(dev, "Invalid clk frequency %d Hz: %d\n",
- gi2c->clk_freq_out, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Invalid clk frequency %d Hz\n",
+ gi2c->clk_freq_out);
gi2c->adap.algo = &geni_i2c_algo;
init_completion(&gi2c->done);
@@ -837,11 +835,10 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* Keep interrupts disabled initially to allow for low-power modes */
ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, IRQF_NO_AUTOEN,
dev_name(dev), gi2c);
- if (ret) {
- dev_err(dev, "Request_irq failed:%d: err:%d\n",
- gi2c->irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Request_irq failed: %d\n", gi2c->irq);
+
i2c_set_adapdata(&gi2c->adap, gi2c);
gi2c->adap.dev.parent = dev;
gi2c->adap.dev.of_node = dev->of_node;
@@ -870,16 +867,13 @@ static int geni_i2c_probe(struct platform_device *pdev)
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
- dev_err(dev, "Error turning on resources %d\n", ret);
- clk_disable_unprepare(gi2c->core_clk);
- return ret;
+ dev_err_probe(dev, ret, "Error turning on resources\n");
+ goto err_clk;
}
proto = geni_se_read_proto(&gi2c->se);
if (proto != GENI_SE_I2C) {
- dev_err(dev, "Invalid proto %d\n", proto);
- geni_se_resources_off(&gi2c->se);
- clk_disable_unprepare(gi2c->core_clk);
- return -ENXIO;
+ ret = dev_err_probe(dev, -ENXIO, "Invalid proto %d\n", proto);
+ goto err_resources;
}
if (desc && desc->no_dma_support)
@@ -891,11 +885,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* FIFO is disabled, so we can only use GPI DMA */
gi2c->gpi_mode = true;
ret = setup_gpi_dma(gi2c);
- if (ret) {
- geni_se_resources_off(&gi2c->se);
- clk_disable_unprepare(gi2c->core_clk);
- return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
- }
+ if (ret)
+ goto err_resources;
dev_dbg(dev, "Using GPI DMA mode for I2C\n");
} else {
@@ -907,10 +898,9 @@ static int geni_i2c_probe(struct platform_device *pdev)
tx_depth = desc->tx_fifo_depth;
if (!tx_depth) {
- dev_err(dev, "Invalid TX FIFO depth\n");
- geni_se_resources_off(&gi2c->se);
- clk_disable_unprepare(gi2c->core_clk);
- return -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL,
+ "Invalid TX FIFO depth\n");
+ goto err_resources;
}
gi2c->tx_wm = tx_depth - 1;
@@ -924,7 +914,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
clk_disable_unprepare(gi2c->core_clk);
ret = geni_se_resources_off(&gi2c->se);
if (ret) {
- dev_err(dev, "Error turning off resources %d\n", ret);
+ dev_err_probe(dev, ret, "Error turning off resources\n");
goto err_dma;
}
@@ -940,17 +930,25 @@ static int geni_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&gi2c->adap);
if (ret) {
- dev_err(dev, "Error adding i2c adapter %d\n", ret);
+ dev_err_probe(dev, ret, "Error adding i2c adapter\n");
pm_runtime_disable(gi2c->se.dev);
goto err_dma;
}
dev_dbg(dev, "Geni-I2C adaptor successfully added\n");
- return 0;
+ return ret;
+
+err_resources:
+ geni_se_resources_off(&gi2c->se);
+err_clk:
+ clk_disable_unprepare(gi2c->core_clk);
+
+ return ret;
err_dma:
release_gpi_dma(gi2c);
+
return ret;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a7b77d14ee86..5693a38da7b5 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -130,6 +130,8 @@
#define ID_P_PM_BLOCKED BIT(31)
#define ID_P_MASK GENMASK(31, 27)
+#define ID_SLAVE_NACK BIT(0)
+
enum rcar_i2c_type {
I2C_RCAR_GEN1,
I2C_RCAR_GEN2,
@@ -166,6 +168,7 @@ struct rcar_i2c_priv {
int irq;
struct i2c_client *host_notify_client;
+ u8 slave_flags;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -655,6 +658,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
{
u32 ssr_raw, ssr_filtered;
u8 value;
+ int ret;
ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff;
ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER);
@@ -670,7 +674,10 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICRXTX, value);
rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR);
} else {
- i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
rcar_i2c_read(priv, ICRXTX); /* dummy read */
rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
}
@@ -683,18 +690,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
if (ssr_filtered & SSR) {
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
+ priv->slave_flags &= ~ID_SLAVE_NACK;
rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
}
/* master wants to write to us */
if (ssr_filtered & SDR) {
- int ret;
-
value = rcar_i2c_read(priv, ICRXTX);
ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
- /* Send NACK in case of error */
- rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0));
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
+ /* Send NACK in case of error, but it will come 1 byte late :( */
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS |
+ (priv->slave_flags & ID_SLAVE_NACK ? FNA : 0));
rcar_i2c_write(priv, ICSSR, ~SDR & 0xff);
}
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 9264adc97ca9..d7dddd6c296a 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -34,46 +34,51 @@
* Also check the comments in the interrupt routines for some gory details.
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/time.h>
-#define ICCR1_ICE 0x80
-#define ICCR1_IICRST 0x40
-#define ICCR1_SOWP 0x10
+#define ICCR1_ICE BIT(7)
+#define ICCR1_IICRST BIT(6)
+#define ICCR1_SOWP BIT(4)
+#define ICCR1_SCLI BIT(1)
+#define ICCR1_SDAI BIT(0)
-#define ICCR2_BBSY 0x80
-#define ICCR2_SP 0x08
-#define ICCR2_RS 0x04
-#define ICCR2_ST 0x02
+#define ICCR2_BBSY BIT(7)
+#define ICCR2_SP BIT(3)
+#define ICCR2_RS BIT(2)
+#define ICCR2_ST BIT(1)
-#define ICMR1_CKS_MASK 0x70
-#define ICMR1_BCWP 0x08
+#define ICMR1_CKS_MASK GENMASK(6, 4)
+#define ICMR1_BCWP BIT(3)
#define ICMR1_CKS(_x) ((((_x) << 4) & ICMR1_CKS_MASK) | ICMR1_BCWP)
-#define ICMR3_RDRFS 0x20
-#define ICMR3_ACKWP 0x10
-#define ICMR3_ACKBT 0x08
+#define ICMR3_RDRFS BIT(5)
+#define ICMR3_ACKWP BIT(4)
+#define ICMR3_ACKBT BIT(3)
-#define ICFER_FMPE 0x80
+#define ICFER_FMPE BIT(7)
-#define ICIER_TIE 0x80
-#define ICIER_TEIE 0x40
-#define ICIER_RIE 0x20
-#define ICIER_NAKIE 0x10
-#define ICIER_SPIE 0x08
+#define ICIER_TIE BIT(7)
+#define ICIER_TEIE BIT(6)
+#define ICIER_RIE BIT(5)
+#define ICIER_NAKIE BIT(4)
+#define ICIER_SPIE BIT(3)
-#define ICSR2_NACKF 0x10
+#define ICSR2_NACKF BIT(4)
-#define ICBR_RESERVED 0xe0 /* Should be 1 on writes */
+#define ICBR_RESERVED GENMASK(7, 5) /* Should be 1 on writes */
#define RIIC_INIT_MSG -1
@@ -134,6 +139,27 @@ static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u
riic_writeb(riic, (riic_readb(riic, reg) & ~clear) | set, reg);
}
+static int riic_bus_barrier(struct riic_dev *riic)
+{
+ int ret;
+ u8 val;
+
+ /*
+ * The SDA line can still be low even when BBSY = 0. Therefore, after checking
+ * the BBSY flag, also verify that the SDA and SCL lines are not being held low.
+ */
+ ret = readb_poll_timeout(riic->base + riic->info->regs[RIIC_ICCR2], val,
+ !(val & ICCR2_BBSY), 10, riic->adapter.timeout);
+ if (ret)
+ return ret;
+
+ if ((riic_readb(riic, RIIC_ICCR1) & (ICCR1_SDAI | ICCR1_SCLI)) !=
+ (ICCR1_SDAI | ICCR1_SCLI))
+ return -EBUSY;
+
+ return 0;
+}
+
static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct riic_dev *riic = i2c_get_adapdata(adap);
@@ -146,13 +172,11 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
if (ret)
return ret;
- if (riic_readb(riic, RIIC_ICCR2) & ICCR2_BBSY) {
- riic->err = -EBUSY;
+ riic->err = riic_bus_barrier(riic);
+ if (riic->err)
goto out;
- }
reinit_completion(&riic->msg_done);
- riic->err = 0;
riic_writeb(riic, 0, RIIC_ICSR2);
@@ -312,6 +336,7 @@ static int riic_init_hw(struct riic_dev *riic)
{
int ret;
unsigned long rate;
+ unsigned long ns_per_tick;
int total_ticks, cks, brl, brh;
struct i2c_timings *t = &riic->i2c_t;
struct device *dev = riic->adapter.dev.parent;
@@ -320,7 +345,7 @@ static int riic_init_hw(struct riic_dev *riic)
: I2C_MAX_FAST_MODE_FREQ;
if (t->bus_freq_hz > max_freq)
- return dev_err_probe(&riic->adapter.dev, -EINVAL,
+ return dev_err_probe(dev, -EINVAL,
"unsupported bus speed %uHz (%u max)\n",
t->bus_freq_hz, max_freq);
@@ -356,11 +381,9 @@ static int riic_init_hw(struct riic_dev *riic)
rate /= 2;
}
- if (brl > (0x1F + 3)) {
- dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
- (unsigned long)t->bus_freq_hz);
- return -EINVAL;
- }
+ if (brl > (0x1F + 3))
+ return dev_err_probe(dev, -EINVAL, "invalid speed (%uHz). Too slow.\n",
+ t->bus_freq_hz);
brh = total_ticks - brl;
@@ -377,8 +400,9 @@ static int riic_init_hw(struct riic_dev *riic)
* Remove clock ticks for rise and fall times. Convert ns to clock
* ticks.
*/
- brl -= t->scl_fall_ns / (1000000000 / rate);
- brh -= t->scl_rise_ns / (1000000000 / rate);
+ ns_per_tick = NSEC_PER_SEC / rate;
+ brl -= t->scl_fall_ns / ns_per_tick;
+ brh -= t->scl_rise_ns / ns_per_tick;
/* Adjust for min register values for when SCLE=1 and NFE=1 */
if (brl < 1)
@@ -388,8 +412,7 @@ static int riic_init_hw(struct riic_dev *riic)
pr_debug("i2c-riic: freq=%lu, duty=%d, fall=%lu, rise=%lu, cks=%d, brl=%d, brh=%d\n",
rate / total_ticks, ((brl + 3) * 100) / (brl + brh + 6),
- t->scl_fall_ns / (1000000000 / rate),
- t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
+ t->scl_fall_ns / ns_per_tick, t->scl_rise_ns / ns_per_tick, cks, brl, brh);
ret = pm_runtime_resume_and_get(dev);
if (ret)
@@ -416,7 +439,7 @@ static int riic_init_hw(struct riic_dev *riic)
return 0;
}
-static struct riic_irq_desc riic_irqs[] = {
+static const struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
{ .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
@@ -424,11 +447,6 @@ static struct riic_irq_desc riic_irqs[] = {
{ .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
};
-static void riic_reset_control_assert(void *data)
-{
- reset_control_assert(data);
-}
-
static int riic_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -445,35 +463,27 @@ static int riic_i2c_probe(struct platform_device *pdev)
return PTR_ERR(riic->base);
riic->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(riic->clk)) {
- dev_err(dev, "missing controller clock");
- return PTR_ERR(riic->clk);
- }
+ if (IS_ERR(riic->clk))
+ return dev_err_probe(dev, PTR_ERR(riic->clk),
+ "missing controller clock");
- riic->rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
+ riic->rstc = devm_reset_control_get_optional_exclusive_deasserted(dev, NULL);
if (IS_ERR(riic->rstc))
return dev_err_probe(dev, PTR_ERR(riic->rstc),
- "Error: missing reset ctrl\n");
-
- ret = reset_control_deassert(riic->rstc);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, riic_reset_control_assert, riic->rstc);
- if (ret)
- return ret;
+ "failed to acquire deasserted reset\n");
for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) {
- ret = platform_get_irq(pdev, riic_irqs[i].res_num);
- if (ret < 0)
- return ret;
+ int irq;
+
+ irq = platform_get_irq(pdev, riic_irqs[i].res_num);
+ if (irq < 0)
+ return irq;
- ret = devm_request_irq(dev, ret, riic_irqs[i].isr,
+ ret = devm_request_irq(dev, irq, riic_irqs[i].isr,
0, riic_irqs[i].name, riic);
- if (ret) {
- dev_err(dev, "failed to request irq %s\n", riic_irqs[i].name);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %s\n",
+ riic_irqs[i].name);
}
riic->info = of_device_get_match_data(dev);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index c4d3eb02da09..dc1e46d834dc 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -30,6 +30,8 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
+#include <linux/spinlock.h>
#define DRIVER_NAME "xiic-i2c"
#define DYNAMIC_MODE_READ_BROKEN_BIT BIT(0)
@@ -74,6 +76,9 @@ enum i2c_scl_freq {
* @smbus_block_read: Flag to handle block read
* @input_clk: Input clock to I2C controller
* @i2c_clk: I2C SCL frequency
+ * @atomic: Mode of transfer
+ * @atomic_lock: Lock for atomic transfer mode
+ * @atomic_xfer_state: See STATE_
*/
struct xiic_i2c {
struct device *dev;
@@ -96,6 +101,9 @@ struct xiic_i2c {
bool smbus_block_read;
unsigned long input_clk;
unsigned int i2c_clk;
+ bool atomic;
+ spinlock_t atomic_lock; /* Lock for atomic transfer mode */
+ enum xilinx_i2c_state atomic_xfer_state;
};
struct xiic_version_data {
@@ -224,6 +232,8 @@ static const struct timing_regs timing_reg_values[] = {
#define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000))
/* timeout waiting for the controller finish transfers */
#define XIIC_XFER_TIMEOUT (msecs_to_jiffies(10000))
+/* timeout waiting for the controller finish transfers in micro seconds */
+#define XIIC_XFER_TIMEOUT_US 10000000
/*
* The following constant is used for the device global interrupt enable
@@ -238,6 +248,29 @@ static const struct timing_regs timing_reg_values[] = {
static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num);
static void __xiic_start_xfer(struct xiic_i2c *i2c);
+static int xiic_i2c_runtime_suspend(struct device *dev)
+{
+ struct xiic_i2c *i2c = dev_get_drvdata(dev);
+
+ clk_disable(i2c->clk);
+
+ return 0;
+}
+
+static int xiic_i2c_runtime_resume(struct device *dev)
+{
+ struct xiic_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(i2c->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* For the register read and write functions, a little-endian and big-endian
* version are necessary. Endianness is detected during the probe function.
@@ -374,9 +407,10 @@ static int xiic_setclk(struct xiic_i2c *i2c)
unsigned int index = 0;
u32 reg_val;
- dev_dbg(i2c->adap.dev.parent,
- "%s entry, i2c->input_clk: %ld, i2c->i2c_clk: %d\n",
- __func__, i2c->input_clk, i2c->i2c_clk);
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent,
+ "%s entry, i2c->input_clk: %ld, i2c->i2c_clk: %d\n",
+ __func__, i2c->input_clk, i2c->i2c_clk);
/* If not specified in DT, do not configure in SW. Rely only on Vivado design */
if (!i2c->i2c_clk || !i2c->input_clk)
@@ -467,7 +501,8 @@ static int xiic_reinit(struct xiic_i2c *i2c)
return ret;
/* Enable interrupts */
- xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
+ if (!i2c->atomic)
+ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
@@ -549,11 +584,12 @@ static void xiic_read_rx(struct xiic_i2c *i2c)
bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
- dev_dbg(i2c->adap.dev.parent,
- "%s entry, bytes in fifo: %d, rem: %d, SR: 0x%x, CR: 0x%x\n",
- __func__, bytes_in_fifo, xiic_rx_space(i2c),
- xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
- xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent,
+ "%s entry, bytes in fifo: %d, rem: %d, SR: 0x%x, CR: 0x%x\n",
+ __func__, bytes_in_fifo, xiic_rx_space(i2c),
+ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
if (bytes_in_fifo > xiic_rx_space(i2c))
bytes_in_fifo = xiic_rx_space(i2c);
@@ -612,6 +648,26 @@ static void xiic_read_rx(struct xiic_i2c *i2c)
}
}
+static bool xiic_error_check(struct xiic_i2c *i2c)
+{
+ bool status = false;
+ u32 pend, isr, ier;
+
+ isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
+ ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+ pend = isr & ier;
+
+ if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
+ ((pend & XIIC_INTR_TX_ERROR_MASK) &&
+ !(pend & XIIC_INTR_RX_FULL_MASK))) {
+ xiic_reinit(i2c);
+ status = true;
+ if (i2c->tx_msg || i2c->rx_msg)
+ i2c->atomic_xfer_state = STATE_ERROR;
+ }
+ return status;
+}
+
static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
{
/* return the actual space left in the FIFO */
@@ -625,8 +681,9 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
len = (len > fifo_space) ? fifo_space : len;
- dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
- __func__, len, fifo_space);
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
+ __func__, len, fifo_space);
while (len--) {
u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
@@ -649,9 +706,13 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
~XIIC_CR_MSMS_MASK);
}
- dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
}
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+
+ if (i2c->atomic && xiic_error_check(i2c))
+ return;
}
}
@@ -854,22 +915,51 @@ static int xiic_wait_not_busy(struct xiic_i2c *i2c)
*/
err = xiic_bus_busy(i2c);
while (err && tries--) {
- msleep(1);
+ if (i2c->atomic)
+ udelay(1000);
+ else
+ usleep_range(1000, 1100);
err = xiic_bus_busy(i2c);
}
return err;
}
+static void xiic_recv_atomic(struct xiic_i2c *i2c)
+{
+ while (xiic_rx_space(i2c)) {
+ if (xiic_getreg32(i2c, XIIC_IISR_OFFSET) & XIIC_INTR_RX_FULL_MASK) {
+ xiic_read_rx(i2c);
+
+ /* Clear Rx full and Tx error interrupts. */
+ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK |
+ XIIC_INTR_TX_ERROR_MASK);
+ }
+ if (xiic_error_check(i2c))
+ return;
+ }
+
+ i2c->rx_msg = NULL;
+ xiic_irq_clr_en(i2c, XIIC_INTR_TX_ERROR_MASK);
+
+ /* send next message if this wasn't the last. */
+ if (i2c->nmsgs > 1) {
+ i2c->nmsgs--;
+ i2c->tx_msg++;
+ __xiic_start_xfer(i2c);
+ }
+}
+
static void xiic_start_recv(struct xiic_i2c *i2c)
{
u16 rx_watermark;
u8 cr = 0, rfd_set = 0;
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
- dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
- __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
- xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
/* Disable Tx interrupts */
xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK | XIIC_INTR_TX_EMPTY_MASK);
@@ -967,9 +1057,10 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
XIIC_CR_MSMS_MASK)
& ~(XIIC_CR_DIR_IS_TX_MASK));
}
- dev_dbg(i2c->adap.dev.parent, "%s end, ISR: 0x%x, CR: 0x%x\n",
- __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
- xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s end, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
}
if (i2c->nmsgs == 1)
@@ -979,10 +1070,55 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
/* the message is tx:ed */
i2c->tx_pos = msg->len;
+ i2c->prev_msg_tx = false;
+
/* Enable interrupts */
- xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
+ if (!i2c->atomic)
+ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
+ else
+ xiic_recv_atomic(i2c);
+}
- i2c->prev_msg_tx = false;
+static void xiic_send_rem_atomic(struct xiic_i2c *i2c)
+{
+ while (xiic_tx_space(i2c)) {
+ if (xiic_tx_fifo_space(i2c)) {
+ u16 data;
+
+ data = i2c->tx_msg->buf[i2c->tx_pos];
+ i2c->tx_pos++;
+ if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) {
+ /* last message in transfer -> STOP */
+ if (i2c->dynamic) {
+ data |= XIIC_TX_DYN_STOP_MASK;
+ } else {
+ u8 cr;
+ int status;
+
+ /* Wait till FIFO is empty so STOP is sent last */
+ status = xiic_wait_tx_empty(i2c);
+ if (status)
+ return;
+
+ /* Write to CR to stop */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
+ ~XIIC_CR_MSMS_MASK);
+ }
+ }
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+ if (xiic_error_check(i2c))
+ return;
+ }
+
+ if (i2c->nmsgs > 1) {
+ i2c->nmsgs--;
+ i2c->tx_msg++;
+ __xiic_start_xfer(i2c);
+ } else {
+ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
+ }
}
static void xiic_start_send(struct xiic_i2c *i2c)
@@ -991,11 +1127,13 @@ static void xiic_start_send(struct xiic_i2c *i2c)
u16 data;
struct i2c_msg *msg = i2c->tx_msg;
- dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d",
- __func__, msg, msg->len);
- dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
- __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
- xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ if (!i2c->atomic) {
+ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d",
+ __func__, msg, msg->len);
+ dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ }
if (i2c->dynamic) {
/* write the address */
@@ -1060,19 +1198,27 @@ static void xiic_start_send(struct xiic_i2c *i2c)
XIIC_INTR_TX_ERROR_MASK |
XIIC_INTR_BNB_MASK);
}
+
i2c->prev_msg_tx = true;
+
+ if (i2c->atomic && !i2c->atomic_xfer_state)
+ xiic_send_rem_atomic(i2c);
}
static void __xiic_start_xfer(struct xiic_i2c *i2c)
{
int fifo_space = xiic_tx_fifo_space(i2c);
- dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
- __func__, i2c->tx_msg, fifo_space);
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
+ __func__, i2c->tx_msg, fifo_space);
if (!i2c->tx_msg)
return;
+ if (i2c->atomic && xiic_error_check(i2c))
+ return;
+
i2c->rx_pos = 0;
i2c->tx_pos = 0;
i2c->state = STATE_START;
@@ -1089,7 +1235,10 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
bool broken_read, max_read_len, smbus_blk_read;
int ret, count;
- mutex_lock(&i2c->lock);
+ if (i2c->atomic)
+ spin_lock(&i2c->atomic_lock);
+ else
+ mutex_lock(&i2c->lock);
if (i2c->tx_msg || i2c->rx_msg) {
dev_err(i2c->adap.dev.parent,
@@ -1098,6 +1247,8 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
goto out;
}
+ i2c->atomic_xfer_state = STATE_DONE;
+
/* In single master mode bus can only be busy, when in use by this
* driver. If the register indicates bus being busy for some reason we
* should ignore it, since bus will never be released and i2c will be
@@ -1124,7 +1275,9 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
i2c->tx_msg = msgs;
i2c->rx_msg = NULL;
i2c->nmsgs = num;
- init_completion(&i2c->completion);
+
+ if (!i2c->atomic)
+ init_completion(&i2c->completion);
/* Decide standard mode or Dynamic mode */
i2c->dynamic = true;
@@ -1159,7 +1312,10 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
__xiic_start_xfer(i2c);
out:
- mutex_unlock(&i2c->lock);
+ if (i2c->atomic)
+ spin_unlock(&i2c->atomic_lock);
+ else
+ mutex_unlock(&i2c->lock);
return ret;
}
@@ -1198,6 +1354,44 @@ out:
return err;
}
+static int xiic_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct xiic_i2c *i2c = i2c_get_adapdata(adap);
+ u32 status_reg;
+ int err;
+
+ err = xiic_i2c_runtime_resume(i2c->dev);
+ if (err)
+ return err;
+
+ i2c->atomic = true;
+ err = xiic_start_xfer(i2c, msgs, num);
+ if (err < 0)
+ return err;
+
+ err = readl_poll_timeout_atomic(i2c->base + XIIC_SR_REG_OFFSET,
+ status_reg, !(status_reg & XIIC_SR_BUS_BUSY_MASK),
+ 1, XIIC_XFER_TIMEOUT_US);
+
+ if (err) /* Timeout */
+ err = -ETIMEDOUT;
+
+ spin_lock(&i2c->atomic_lock);
+ if (err || i2c->state) {
+ i2c->tx_msg = NULL;
+ i2c->rx_msg = NULL;
+ i2c->nmsgs = 0;
+ }
+
+ err = (i2c->atomic_xfer_state == STATE_DONE) ? num : -EIO;
+ spin_unlock(&i2c->atomic_lock);
+
+ i2c->atomic = false;
+ xiic_i2c_runtime_suspend(i2c->dev);
+
+ return err;
+}
+
static u32 xiic_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
@@ -1205,6 +1399,7 @@ static u32 xiic_func(struct i2c_adapter *adap)
static const struct i2c_algorithm xiic_algorithm = {
.master_xfer = xiic_xfer,
+ .master_xfer_atomic = xiic_xfer_atomic,
.functionality = xiic_func,
};
@@ -1268,6 +1463,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
DRIVER_NAME " %s", pdev->name);
mutex_init(&i2c->lock);
+ spin_lock_init(&i2c->atomic_lock);
i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(i2c->clk))
@@ -1365,29 +1561,6 @@ static void xiic_i2c_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
}
-static int __maybe_unused xiic_i2c_runtime_suspend(struct device *dev)
-{
- struct xiic_i2c *i2c = dev_get_drvdata(dev);
-
- clk_disable(i2c->clk);
-
- return 0;
-}
-
-static int __maybe_unused xiic_i2c_runtime_resume(struct device *dev)
-{
- struct xiic_i2c *i2c = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_enable(i2c->clk);
- if (ret) {
- dev_err(dev, "Cannot enable clock.\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dev_pm_ops xiic_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend,
xiic_i2c_runtime_resume, NULL)
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index b7c10ced5a43..8fe9ddff8e96 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -412,7 +412,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
dev_name(dev), ret);
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_REMOVED_DEVICE:
i2c_atr_detach_client(client->adapter, client);
break;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 14ae0cfc325e..d2499f302b50 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -355,6 +355,25 @@ static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
{}
};
+static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = {
+ /*
+ * When a 400KHz freq is used on this model of ELAN touchpad in Linux,
+ * excessive smoothing (similar to when the touchpad's firmware detects
+ * a noisy signal) is sometimes applied. As some devices' (e.g, Lenovo
+ * V15 G4) ACPI tables specify a 400KHz frequency for this device and
+ * some I2C busses (e.g, Designware I2C) default to a 400KHz freq,
+ * force the speed to 100KHz as a workaround.
+ *
+ * For future investigation: This problem may be related to the default
+ * HCNT/LCNT values given by some busses' drivers, because they are not
+ * specified in the aforementioned devices' ACPI tables, and because
+ * the device works without issues on Windows at what is expected to be
+ * a 400KHz frequency. The root cause of the issue is not known.
+ */
+ { "ELAN06FA", 0 },
+ {}
+};
+
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value)
{
@@ -373,6 +392,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
lookup->force_speed = I2C_MAX_FAST_MODE_FREQ;
+ if (acpi_match_device_ids(adev, i2c_acpi_force_100khz_device_ids) == 0)
+ lookup->force_speed = I2C_MAX_STANDARD_MODE_FREQ;
+
return AE_OK;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7c810893bfa3..c24ccefb015e 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1015,6 +1015,8 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
if (status)
goto out_remove_swnode;
+ client->debugfs = debugfs_create_dir(dev_name(&client->dev), adap->debugfs);
+
dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
client->name, dev_name(&client->dev));
@@ -1058,6 +1060,8 @@ void i2c_unregister_device(struct i2c_client *client)
if (ACPI_COMPANION(&client->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
+
+ debugfs_remove_recursive(client->debugfs);
device_remove_software_node(&client->dev);
device_unregister(&client->dev);
}
@@ -1293,14 +1297,12 @@ new_device_store(struct device *dev, struct device_attribute *attr,
info.flags |= I2C_CLIENT_SLAVE;
}
+ info.flags |= I2C_CLIENT_USER;
+
client = i2c_new_client_device(adap, &info);
if (IS_ERR(client))
return PTR_ERR(client);
- /* Keep track of the added device */
- mutex_lock(&adap->userspace_clients_lock);
- list_add_tail(&client->detected, &adap->userspace_clients);
- mutex_unlock(&adap->userspace_clients_lock);
dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device",
info.type, info.addr);
@@ -1308,6 +1310,15 @@ new_device_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_WO(new_device);
+static int __i2c_find_user_addr(struct device *dev, void *addrp)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ unsigned short addr = *(unsigned short *)addrp;
+
+ return client && client->flags & I2C_CLIENT_USER &&
+ i2c_encode_flags_to_addr(client) == addr;
+}
+
/*
* And of course let the users delete the devices they instantiated, if
* they got it wrong. This interface can only be used to delete devices
@@ -1322,7 +1333,7 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_adapter *adap = to_i2c_adapter(dev);
- struct i2c_client *client, *next;
+ struct device *child_dev;
unsigned short addr;
char end;
int res;
@@ -1338,28 +1349,19 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
+ mutex_lock(&core_lock);
/* Make sure the device was added through sysfs */
- res = -ENOENT;
- mutex_lock_nested(&adap->userspace_clients_lock,
- i2c_adapter_depth(adap));
- list_for_each_entry_safe(client, next, &adap->userspace_clients,
- detected) {
- if (i2c_encode_flags_to_addr(client) == addr) {
- dev_info(dev, "%s: Deleting device %s at 0x%02hx\n",
- "delete_device", client->name, client->addr);
-
- list_del(&client->detected);
- i2c_unregister_device(client);
- res = count;
- break;
- }
+ child_dev = device_find_child(&adap->dev, &addr, __i2c_find_user_addr);
+ if (child_dev) {
+ i2c_unregister_device(i2c_verify_client(child_dev));
+ put_device(child_dev);
+ } else {
+ dev_err(dev, "Can't find userspace-created device at %#x\n", addr);
+ count = -ENOENT;
}
- mutex_unlock(&adap->userspace_clients_lock);
+ mutex_unlock(&core_lock);
- if (res < 0)
- dev_err(dev, "%s: Can't find device in list\n",
- "delete_device");
- return res;
+ return count;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
delete_device_store);
@@ -1530,8 +1532,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
adap->locked_flags = 0;
rt_mutex_init(&adap->bus_lock);
rt_mutex_init(&adap->mux_lock);
- mutex_init(&adap->userspace_clients_lock);
- INIT_LIST_HEAD(&adap->userspace_clients);
/* Set default timeout to 1 second if not already set */
if (adap->timeout == 0)
@@ -1562,6 +1562,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
res = device_add(&adap->dev);
if (res) {
pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
+ put_device(&adap->dev);
goto out_list;
}
@@ -1696,23 +1697,6 @@ int i2c_add_numbered_adapter(struct i2c_adapter *adap)
}
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
-static void i2c_do_del_adapter(struct i2c_driver *driver,
- struct i2c_adapter *adapter)
-{
- struct i2c_client *client, *_n;
-
- /* Remove the devices we created ourselves as the result of hardware
- * probing (using a driver's detect method) */
- list_for_each_entry_safe(client, _n, &driver->clients, detected) {
- if (client->adapter == adapter) {
- dev_dbg(&adapter->dev, "Removing %s at 0x%x\n",
- client->name, client->addr);
- list_del(&client->detected);
- i2c_unregister_device(client);
- }
- }
-}
-
static int __unregister_client(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -1728,12 +1712,6 @@ static int __unregister_dummy(struct device *dev, void *dummy)
return 0;
}
-static int __process_removed_adapter(struct device_driver *d, void *data)
-{
- i2c_do_del_adapter(to_i2c_driver(d), data);
- return 0;
-}
-
/**
* i2c_del_adapter - unregister I2C adapter
* @adap: the adapter being unregistered
@@ -1745,7 +1723,6 @@ static int __process_removed_adapter(struct device_driver *d, void *data)
void i2c_del_adapter(struct i2c_adapter *adap)
{
struct i2c_adapter *found;
- struct i2c_client *client, *next;
/* First make sure that this adapter was ever added */
mutex_lock(&core_lock);
@@ -1757,31 +1734,16 @@ void i2c_del_adapter(struct i2c_adapter *adap)
}
i2c_acpi_remove_space_handler(adap);
- /* Tell drivers about this removal */
- mutex_lock(&core_lock);
- bus_for_each_drv(&i2c_bus_type, NULL, adap,
- __process_removed_adapter);
- mutex_unlock(&core_lock);
-
- /* Remove devices instantiated from sysfs */
- mutex_lock_nested(&adap->userspace_clients_lock,
- i2c_adapter_depth(adap));
- list_for_each_entry_safe(client, next, &adap->userspace_clients,
- detected) {
- dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name,
- client->addr);
- list_del(&client->detected);
- i2c_unregister_device(client);
- }
- mutex_unlock(&adap->userspace_clients_lock);
/* Detach any active clients. This can't fail, thus we do not
* check the returned value. This is a two-pass process, because
* we can't remove the dummy devices during the first pass: they
* could have been instantiated by real devices wishing to clean
* them up properly, so we give them a chance to do that first. */
+ mutex_lock(&core_lock);
device_for_each_child(&adap->dev, NULL, __unregister_client);
device_for_each_child(&adap->dev, NULL, __unregister_dummy);
+ mutex_unlock(&core_lock);
/* device name is gone after device_unregister */
dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
@@ -2001,7 +1963,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
- INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -2019,10 +1980,13 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
}
EXPORT_SYMBOL(i2c_register_driver);
-static int __process_removed_driver(struct device *dev, void *data)
+static int __i2c_unregister_detected_client(struct device *dev, void *argp)
{
- if (dev->type == &i2c_adapter_type)
- i2c_do_del_adapter(data, to_i2c_adapter(dev));
+ struct i2c_client *client = i2c_verify_client(dev);
+
+ if (client && client->flags & I2C_CLIENT_AUTO)
+ i2c_unregister_device(client);
+
return 0;
}
@@ -2033,7 +1997,12 @@ static int __process_removed_driver(struct device *dev, void *data)
*/
void i2c_del_driver(struct i2c_driver *driver)
{
- i2c_for_each_dev(driver, __process_removed_driver);
+ mutex_lock(&core_lock);
+ /* Satisfy __must_check, function can't fail */
+ if (driver_for_each_device(&driver->driver, NULL, NULL,
+ __i2c_unregister_detected_client)) {
+ }
+ mutex_unlock(&core_lock);
driver_unregister(&driver->driver);
pr_debug("driver [%s] unregistered\n", driver->driver.name);
@@ -2460,6 +2429,7 @@ static int i2c_detect_address(struct i2c_client *temp_client,
/* Finally call the custom detection function */
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = addr;
+ info.flags = I2C_CLIENT_AUTO;
err = driver->detect(temp_client, &info);
if (err) {
/* -ENODEV is returned if the detection fails. We catch it
@@ -2486,9 +2456,7 @@ static int i2c_detect_address(struct i2c_client *temp_client,
dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n",
info.type, info.addr);
client = i2c_new_client_device(adapter, &info);
- if (!IS_ERR(client))
- list_add_tail(&client->detected, &driver->clients);
- else
+ if (IS_ERR(client))
dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n",
info.type, info.addr);
}
@@ -2498,7 +2466,7 @@ static int i2c_detect_address(struct i2c_client *temp_client,
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
{
const unsigned short *address_list;
- struct i2c_client *temp_client;
+ struct i2c_client temp_client;
int i, err = 0;
address_list = driver->address_list;
@@ -2519,22 +2487,19 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
return 0;
/* Set up a temporary client to help detect callback */
- temp_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
- if (!temp_client)
- return -ENOMEM;
- temp_client->adapter = adapter;
+ memset(&temp_client, 0, sizeof(temp_client));
+ temp_client.adapter = adapter;
for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
dev_dbg(&adapter->dev,
"found normal entry for adapter %d, addr 0x%02x\n",
i2c_adapter_id(adapter), address_list[i]);
- temp_client->addr = address_list[i];
- err = i2c_detect_address(temp_client, driver);
+ temp_client.addr = address_list[i];
+ err = i2c_detect_address(&temp_client, driver);
if (unlikely(err))
break;
}
- kfree(temp_client);
return err;
}
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 5946c0d0aef9..275d1d0e910f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -91,7 +91,7 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
}
static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
struct eeprom_data *eeprom;
unsigned long flags;
@@ -106,7 +106,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
}
static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
struct eeprom_data *eeprom;
unsigned long flags;
@@ -165,8 +165,8 @@ static int i2c_slave_eeprom_probe(struct i2c_client *client)
sysfs_bin_attr_init(&eeprom->bin);
eeprom->bin.attr.name = "slave-eeprom";
eeprom->bin.attr.mode = S_IRUSR | S_IWUSR;
- eeprom->bin.read = i2c_slave_eeprom_bin_read;
- eeprom->bin.write = i2c_slave_eeprom_bin_write;
+ eeprom->bin.read_new = i2c_slave_eeprom_bin_read;
+ eeprom->bin.write_new = i2c_slave_eeprom_bin_write;
eeprom->bin.size = size;
ret = sysfs_create_bin_file(&client->dev.kobj, &eeprom->bin);
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 0d6fbaa48248..6de4307050dd 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -38,6 +38,7 @@ enum testunit_regs {
enum testunit_flags {
TU_FLAG_IN_PROCESS,
+ TU_FLAG_NACK,
};
struct testunit_data {
@@ -90,8 +91,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
switch (event) {
case I2C_SLAVE_WRITE_REQUESTED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
@@ -99,8 +102,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
case I2C_SLAVE_WRITE_RECEIVED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
if (tu->reg_idx < TU_NUM_REGS)
tu->regs[tu->reg_idx] = *val;
@@ -129,6 +134,8 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
* here because we still need them in the workqueue!
*/
tu->reg_idx = 0;
+
+ clear_bit(TU_FLAG_NACK, &tu->flags);
break;
case I2C_SLAVE_READ_PROCESSED:
@@ -151,6 +158,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
}
+ /* If an error occurred somewhen, we NACK everything until next STOP */
+ if (ret)
+ set_bit(TU_FLAG_NACK, &tu->flags);
+
return ret;
}
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index dce18f763a09..77a740561fd7 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
}
/*
- * Check if there are pinctrl states at all. Note: we cant' use
+ * Check if there are pinctrl states at all. Note: we can't use
* devm_pinctrl_get_select() because we need to distinguish between
* the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
*/
@@ -261,7 +261,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
pm_runtime_no_callbacks(&pdev->dev);
/* switch to first parent as active master */
- i2c_demux_activate_master(priv, 0);
+ err = i2c_demux_activate_master(priv, 0);
+ if (err)
+ goto err_rollback;
err = device_create_file(&pdev->dev, &dev_attr_available_masters);
if (err)
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 42310c9a00c2..d5dc4180afbc 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1919,7 +1919,7 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_bus_cleanup;
if (master->ops->set_speed) {
- master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
+ ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
if (ret)
goto err_bus_cleanup;
}
@@ -2486,7 +2486,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
struct i2c_dev_desc *i2cdev;
struct i2c_dev_boardinfo *i2cboardinfo;
- int ret;
+ int ret, id = -ENODEV;
adap->dev.parent = master->dev.parent;
adap->owner = master->dev.parent->driver->owner;
@@ -2497,7 +2497,15 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
adap->timeout = 1000;
adap->retries = 3;
- ret = i2c_add_adapter(adap);
+ if (master->dev.of_node)
+ id = of_alias_get_id(master->dev.of_node, "i2c");
+
+ if (id >= 0) {
+ adap->nr = id;
+ ret = i2c_add_numbered_adapter(adap);
+ } else {
+ ret = i2c_add_adapter(adap);
+ }
if (ret)
return ret;
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 90dee3ec5520..77da199c7413 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -57,3 +57,14 @@ config MIPI_I3C_HCI
This driver can also be built as a module. If so, the module will be
called mipi-i3c-hci.
+
+config MIPI_I3C_HCI_PCI
+ tristate "MIPI I3C Host Controller Interface PCI support"
+ depends on MIPI_I3C_HCI
+ depends on PCI
+ help
+ Support for MIPI I3C Host Controller Interface compatible hardware
+ on the PCI bus.
+
+ This driver can also be built as a module. If so, the module will be
+ called mipi-i3c-hci-pci.
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index d4b80eb8cecd..2fbf8b2addd0 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -251,14 +251,6 @@ struct dw_i3c_i2c_dev_data {
struct i3c_generic_ibi_pool *ibi_pool;
};
-static u8 even_parity(u8 p)
-{
- p ^= p >> 4;
- p &= 0xf;
-
- return (0x9669 >> p) & 1;
-}
-
static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
const struct i3c_ccc_cmd *cmd)
{
@@ -848,7 +840,7 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
struct dw_i3c_xfer *xfer;
struct dw_i3c_cmd *cmd;
u32 olddevs, newdevs;
- u8 p, last_addr = 0;
+ u8 last_addr = 0;
int ret, pos;
ret = pm_runtime_resume_and_get(master->dev);
@@ -873,9 +865,9 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
}
master->devs[pos].addr = ret;
- p = even_parity(ret);
last_addr = ret;
- ret |= (p << 7);
+
+ ret |= parity8(ret) ? 0 : BIT(7);
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
master->regs +
@@ -1647,6 +1639,7 @@ EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
void dw_i3c_common_remove(struct dw_i3c_master *master)
{
+ cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
pm_runtime_disable(master->dev);
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 06c0592487d3..fedbe6624a1c 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -889,8 +889,7 @@ static u32 prepare_rr0_dev_address(u32 addr)
ret |= (addr & GENMASK(9, 7)) << 6;
/* RR0[0] = ~XOR(addr[6:0]) */
- if (!(hweight8(addr & 0x7f) & 1))
- ret |= 1;
+ ret |= parity8(addr & 0x7f) ? 0 : BIT(0);
return ret;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
index 1f8cd5c48fde..e3d3ef757035 100644
--- a/drivers/i3c/master/mipi-i3c-hci/Makefile
+++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
@@ -5,3 +5,4 @@ mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \
cmd_v1.o cmd_v2.o \
dat_v1.o dct_v1.o \
hci_quirks.o
+obj-$(CONFIG_MIPI_I3C_HCI_PCI) += mipi-i3c-hci-pci.o
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
index 47b9b4d4ed3f..85c4916972e4 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -40,15 +40,6 @@
#define dat_w0_write(i, v) writel(v, hci->DAT_regs + (i) * 8)
#define dat_w1_write(i, v) writel(v, hci->DAT_regs + (i) * 8 + 4)
-static inline bool dynaddr_parity(unsigned int addr)
-{
- addr |= 1 << 7;
- addr += addr >> 4;
- addr += addr >> 2;
- addr += addr >> 1;
- return (addr & 1);
-}
-
static int hci_dat_v1_init(struct i3c_hci *hci)
{
unsigned int dat_idx;
@@ -123,7 +114,7 @@ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
dat_w0 = dat_w0_read(dat_idx);
dat_w0 &= ~(DAT_0_DYNAMIC_ADDRESS | DAT_0_DYNADDR_PARITY);
dat_w0 |= FIELD_PREP(DAT_0_DYNAMIC_ADDRESS, address) |
- (dynaddr_parity(address) ? DAT_0_DYNADDR_PARITY : 0);
+ (parity8(address) ? 0 : DAT_0_DYNADDR_PARITY);
dat_w0_write(dat_idx, dat_w0);
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index e8e56a8d2057..491dfe70b660 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -758,9 +758,26 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
complete(&rh->op_done);
if (status & INTR_TRANSFER_ABORT) {
+ u32 ring_status;
+
dev_notice_ratelimited(&hci->master.dev,
"ring %d: Transfer Aborted\n", i);
mipi_i3c_hci_resume(hci);
+ ring_status = rh_reg_read(RING_STATUS);
+ if (!(ring_status & RING_STATUS_RUNNING) &&
+ status & INTR_TRANSFER_COMPLETION &&
+ status & INTR_TRANSFER_ERR) {
+ /*
+ * Ring stop followed by run is an Intel
+ * specific required quirk after resuming the
+ * halted controller. Do it only when the ring
+ * is not in running state after a transfer
+ * error.
+ */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
+ RING_CTRL_RUN_STOP);
+ }
}
if (status & INTR_WARN_INS_STOP_MODE)
dev_warn_ratelimited(&hci->master.dev,
diff --git a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
new file mode 100644
index 000000000000..c6c3a3ec11ea
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI glue code for MIPI I3C HCI driver
+ *
+ * Copyright (C) 2024 Intel Corporation
+ *
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ */
+#include <linux/acpi.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+struct mipi_i3c_hci_pci_info {
+ int (*init)(struct pci_dev *pci);
+};
+
+#define INTEL_PRIV_OFFSET 0x2b0
+#define INTEL_PRIV_SIZE 0x28
+#define INTEL_PRIV_RESETS 0x04
+#define INTEL_PRIV_RESETS_RESET BIT(0)
+#define INTEL_PRIV_RESETS_RESET_DONE BIT(1)
+
+static DEFINE_IDA(mipi_i3c_hci_pci_ida);
+
+static int mipi_i3c_hci_pci_intel_init(struct pci_dev *pci)
+{
+ unsigned long timeout;
+ void __iomem *priv;
+
+ priv = devm_ioremap(&pci->dev,
+ pci_resource_start(pci, 0) + INTEL_PRIV_OFFSET,
+ INTEL_PRIV_SIZE);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Assert reset, wait for completion and release reset */
+ writel(0, priv + INTEL_PRIV_RESETS);
+ timeout = jiffies + msecs_to_jiffies(10);
+ while (!(readl(priv + INTEL_PRIV_RESETS) &
+ INTEL_PRIV_RESETS_RESET_DONE)) {
+ if (time_after(jiffies, timeout))
+ break;
+ cpu_relax();
+ }
+ writel(INTEL_PRIV_RESETS_RESET, priv + INTEL_PRIV_RESETS);
+
+ return 0;
+}
+
+static struct mipi_i3c_hci_pci_info intel_info = {
+ .init = mipi_i3c_hci_pci_intel_init,
+};
+
+static int mipi_i3c_hci_pci_probe(struct pci_dev *pci,
+ const struct pci_device_id *id)
+{
+ struct mipi_i3c_hci_pci_info *info;
+ struct platform_device *pdev;
+ struct resource res[2];
+ int dev_id, ret;
+
+ ret = pcim_enable_device(pci);
+ if (ret)
+ return ret;
+
+ pci_set_master(pci);
+
+ memset(&res, 0, sizeof(res));
+
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = pci->irq;
+ res[1].end = pci->irq;
+
+ dev_id = ida_alloc(&mipi_i3c_hci_pci_ida, GFP_KERNEL);
+ if (dev_id < 0)
+ return dev_id;
+
+ pdev = platform_device_alloc("mipi-i3c-hci", dev_id);
+ if (!pdev)
+ return -ENOMEM;
+
+ pdev->dev.parent = &pci->dev;
+ device_set_node(&pdev->dev, dev_fwnode(&pci->dev));
+
+ ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+ if (ret)
+ goto err;
+
+ info = (struct mipi_i3c_hci_pci_info *)id->driver_data;
+ if (info && info->init) {
+ ret = info->init(pci);
+ if (ret)
+ goto err;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err;
+
+ pci_set_drvdata(pci, pdev);
+
+ return 0;
+
+err:
+ platform_device_put(pdev);
+ ida_free(&mipi_i3c_hci_pci_ida, dev_id);
+ return ret;
+}
+
+static void mipi_i3c_hci_pci_remove(struct pci_dev *pci)
+{
+ struct platform_device *pdev = pci_get_drvdata(pci);
+ int dev_id = pdev->id;
+
+ platform_device_unregister(pdev);
+ ida_free(&mipi_i3c_hci_pci_ida, dev_id);
+}
+
+static const struct pci_device_id mipi_i3c_hci_pci_devices[] = {
+ /* Panther Lake-H */
+ { PCI_VDEVICE(INTEL, 0xe37c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0xe36f), (kernel_ulong_t)&intel_info},
+ /* Panther Lake-P */
+ { PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info},
+ { },
+};
+MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
+
+static struct pci_driver mipi_i3c_hci_pci_driver = {
+ .name = "mipi_i3c_hci_pci",
+ .id_table = mipi_i3c_hci_pci_devices,
+ .probe = mipi_i3c_hci_pci_probe,
+ .remove = mipi_i3c_hci_pci_remove,
+};
+
+module_pci_driver(mipi_i3c_hci_pci_driver);
+
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MIPI I3C HCI driver on PCI bus");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index ac4d8faa3886..118fe1d37c22 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -51,6 +51,7 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
+#include <asm/cpuid.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/mwait.h>
@@ -1651,6 +1652,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &idle_cpu_srf),
{}
};
@@ -2316,10 +2318,7 @@ static int __init intel_idle_init(void)
return -ENODEV;
}
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return -ENODEV;
-
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+ cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &mwait_substates);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index b7c078b7f7cf..f8413f8a9f26 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1127,41 +1127,6 @@ err:
}
EXPORT_SYMBOL(ib_find_cached_pkey);
-int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
- u16 pkey, u16 *index)
-{
- struct ib_pkey_cache *cache;
- unsigned long flags;
- int i;
- int ret = -ENOENT;
-
- if (!rdma_is_port_valid(device, port_num))
- return -EINVAL;
-
- read_lock_irqsave(&device->cache_lock, flags);
-
- cache = device->port_data[port_num].cache.pkey;
- if (!cache) {
- ret = -EINVAL;
- goto err;
- }
-
- *index = -1;
-
- for (i = 0; i < cache->table_len; ++i)
- if (cache->table[i] == pkey) {
- *index = i;
- ret = 0;
- break;
- }
-
-err:
- read_unlock_irqrestore(&device->cache_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(ib_find_exact_cached_pkey);
-
int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
{
unsigned long flags;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index ca9b956c034d..0ded91f056f3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -209,23 +209,6 @@ static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
printk("%s(NULL ib_device): %pV", level, vaf);
}
-void ibdev_printk(const char *level, const struct ib_device *ibdev,
- const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- __ibdev_printk(level, ibdev, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(ibdev_printk);
-
#define define_ibdev_printk_level(func, level) \
void func(const struct ib_device *ibdev, const char *fmt, ...) \
{ \
@@ -2296,6 +2279,33 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
EXPORT_SYMBOL(ib_device_get_netdev);
/**
+ * ib_query_netdev_port - Query the port number of a net_device
+ * associated with an ibdev
+ * @ibdev: IB device
+ * @ndev: Network device
+ * @port: IB port the net_device is connected to
+ */
+int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
+ u32 *port)
+{
+ struct net_device *ib_ndev;
+ u32 port_num;
+
+ rdma_for_each_port(ibdev, port_num) {
+ ib_ndev = ib_device_get_netdev(ibdev, port_num);
+ if (ndev == ib_ndev) {
+ *port = port_num;
+ dev_put(ib_ndev);
+ return 0;
+ }
+ dev_put(ib_ndev);
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(ib_query_netdev_port);
+
+/**
* ib_device_get_by_netdev - Find an IB device associated with a netdev
* @ndev: netdev to locate
* @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
@@ -2761,6 +2771,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
+ SET_DEVICE_OP(dev_ops, report_port_event);
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
@@ -2854,11 +2865,62 @@ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
},
};
+void ib_dispatch_port_state_event(struct ib_device *ibdev, struct net_device *ndev)
+{
+ enum ib_port_state curr_state;
+ struct ib_event ibevent = {};
+ u32 port;
+
+ if (ib_query_netdev_port(ibdev, ndev, &port))
+ return;
+
+ curr_state = ib_get_curr_port_state(ndev);
+
+ write_lock_irq(&ibdev->cache_lock);
+ if (ibdev->port_data[port].cache.last_port_state == curr_state) {
+ write_unlock_irq(&ibdev->cache_lock);
+ return;
+ }
+ ibdev->port_data[port].cache.last_port_state = curr_state;
+ write_unlock_irq(&ibdev->cache_lock);
+
+ ibevent.event = (curr_state == IB_PORT_DOWN) ?
+ IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE;
+ ibevent.device = ibdev;
+ ibevent.element.port_num = port;
+ ib_dispatch_event(&ibevent);
+}
+EXPORT_SYMBOL(ib_dispatch_port_state_event);
+
+static void handle_port_event(struct net_device *ndev, unsigned long event)
+{
+ struct ib_device *ibdev;
+
+ /* Currently, link events in bonding scenarios are still
+ * reported by drivers that support bonding.
+ */
+ if (netif_is_lag_master(ndev) || netif_is_lag_port(ndev))
+ return;
+
+ ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
+ if (!ibdev)
+ return;
+
+ if (ibdev->ops.report_port_event) {
+ ibdev->ops.report_port_event(ibdev, ndev, event);
+ goto put_ibdev;
+ }
+
+ ib_dispatch_port_state_event(ibdev, ndev);
+
+put_ibdev:
+ ib_device_put(ibdev);
+};
+
static int ib_netdevice_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct net_device *ib_ndev;
struct ib_device *ibdev;
u32 port;
@@ -2868,15 +2930,21 @@ static int ib_netdevice_event(struct notifier_block *this,
if (!ibdev)
return NOTIFY_DONE;
- rdma_for_each_port(ibdev, port) {
- ib_ndev = ib_device_get_netdev(ibdev, port);
- if (ndev == ib_ndev)
- rdma_nl_notify_event(ibdev, port,
- RDMA_NETDEV_RENAME_EVENT);
- dev_put(ib_ndev);
+ if (ib_query_netdev_port(ibdev, ndev, &port)) {
+ ib_device_put(ibdev);
+ break;
}
+
+ rdma_nl_notify_event(ibdev, port, RDMA_NETDEV_RENAME_EVENT);
ib_device_put(ibdev);
break;
+
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ case NETDEV_DOWN:
+ handle_port_event(ndev, event);
+ break;
+
default:
break;
}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 64d9c492de64..8d3dfef9ebaa 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -462,86 +462,3 @@ int ib_ud_header_pack(struct ib_ud_header *header,
return len;
}
EXPORT_SYMBOL(ib_ud_header_pack);
-
-/**
- * ib_ud_header_unpack - Unpack UD header struct from wire format
- * @header:UD header struct
- * @buf:Buffer to pack into
- *
- * ib_ud_header_pack() unpacks the UD header structure @header from wire
- * format in the buffer @buf.
- */
-int ib_ud_header_unpack(void *buf,
- struct ib_ud_header *header)
-{
- ib_unpack(lrh_table, ARRAY_SIZE(lrh_table),
- buf, &header->lrh);
- buf += IB_LRH_BYTES;
-
- if (header->lrh.link_version != 0) {
- pr_warn("Invalid LRH.link_version %u\n",
- header->lrh.link_version);
- return -EINVAL;
- }
-
- switch (header->lrh.link_next_header) {
- case IB_LNH_IBA_LOCAL:
- header->grh_present = 0;
- break;
-
- case IB_LNH_IBA_GLOBAL:
- header->grh_present = 1;
- ib_unpack(grh_table, ARRAY_SIZE(grh_table),
- buf, &header->grh);
- buf += IB_GRH_BYTES;
-
- if (header->grh.ip_version != 6) {
- pr_warn("Invalid GRH.ip_version %u\n",
- header->grh.ip_version);
- return -EINVAL;
- }
- if (header->grh.next_header != 0x1b) {
- pr_warn("Invalid GRH.next_header 0x%02x\n",
- header->grh.next_header);
- return -EINVAL;
- }
- break;
-
- default:
- pr_warn("Invalid LRH.link_next_header %u\n",
- header->lrh.link_next_header);
- return -EINVAL;
- }
-
- ib_unpack(bth_table, ARRAY_SIZE(bth_table),
- buf, &header->bth);
- buf += IB_BTH_BYTES;
-
- switch (header->bth.opcode) {
- case IB_OPCODE_UD_SEND_ONLY:
- header->immediate_present = 0;
- break;
- case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE:
- header->immediate_present = 1;
- break;
- default:
- pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
- return -EINVAL;
- }
-
- if (header->bth.transport_header_version != 0) {
- pr_warn("Invalid BTH.transport_header_version %u\n",
- header->bth.transport_header_version);
- return -EINVAL;
- }
-
- ib_unpack(deth_table, ARRAY_SIZE(deth_table),
- buf, &header->deth);
- buf += IB_DETH_BYTES;
-
- if (header->immediate_present)
- memcpy(&header->immediate_data, buf, sizeof header->immediate_data);
-
- return 0;
-}
-EXPORT_SYMBOL(ib_ud_header_unpack);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 11a080646916..e803f609ec87 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -171,45 +171,3 @@ void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
__ib_copy_path_rec_to_user(dst, src);
}
EXPORT_SYMBOL(ib_copy_path_rec_to_user);
-
-void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
- struct ib_user_path_rec *src)
-{
- u32 slid, dlid;
-
- memset(dst, 0, sizeof(*dst));
- if ((ib_is_opa_gid((union ib_gid *)src->sgid)) ||
- (ib_is_opa_gid((union ib_gid *)src->dgid))) {
- dst->rec_type = SA_PATH_REC_TYPE_OPA;
- slid = opa_get_lid_from_gid((union ib_gid *)src->sgid);
- dlid = opa_get_lid_from_gid((union ib_gid *)src->dgid);
- } else {
- dst->rec_type = SA_PATH_REC_TYPE_IB;
- slid = ntohs(src->slid);
- dlid = ntohs(src->dlid);
- }
- memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid);
- memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid);
-
- sa_path_set_dlid(dst, dlid);
- sa_path_set_slid(dst, slid);
- sa_path_set_raw_traffic(dst, src->raw_traffic);
- dst->flow_label = src->flow_label;
- dst->hop_limit = src->hop_limit;
- dst->traffic_class = src->traffic_class;
- dst->reversible = src->reversible;
- dst->numb_path = src->numb_path;
- dst->pkey = src->pkey;
- dst->sl = src->sl;
- dst->mtu_selector = src->mtu_selector;
- dst->mtu = src->mtu;
- dst->rate_selector = src->rate_selector;
- dst->rate = src->rate;
- dst->packet_life_time = src->packet_life_time;
- dst->preference = src->preference;
- dst->packet_life_time_selector = src->packet_life_time_selector;
-
- /* TODO: No need to set this */
- sa_path_set_dmac_zero(dst);
-}
-EXPORT_SYMBOL(ib_copy_path_rec_from_user);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 1211f4317a9f..aba96ca9bce5 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
-obj-$(CONFIG_INFINIBAND_HNS) += hns/
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 2975b11b79bf..b91a85a491d0 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -204,7 +204,7 @@ struct bnxt_re_dev {
struct bnxt_re_nq_record *nqr;
/* Device Resources */
- struct bnxt_qplib_dev_attr dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_ctx qplib_ctx;
struct bnxt_qplib_res qplib_res;
struct bnxt_qplib_dpi dpi_privileged;
@@ -229,6 +229,9 @@ struct bnxt_re_dev {
DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
struct dentry *dbg_root;
struct dentry *qp_debugfs;
+ unsigned long event_bitmap;
+ struct bnxt_qplib_cc_param cc_param;
+ struct workqueue_struct *dcb_wq;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 1e63f8091748..3ac47f4e6122 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -37,18 +37,9 @@
*
*/
-#include <linux/interrupt.h>
#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/prefetch.h>
-#include <linux/delay.h>
-#include <rdma/ib_addr.h>
-
-#include "bnxt_ulp.h"
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -357,7 +348,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
bnxt_re_copy_err_stats(rdev, stats, err_s);
- if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
+ if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags) &&
!rdev->is_virtfn) {
rc = bnxt_re_get_ext_stat(rdev, stats);
if (rc) {
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index e3d26bd6de05..2de101d6e825 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -52,8 +52,6 @@
#include <rdma/uverbs_ioctl.h>
#include <linux/hashtable.h>
-#include "bnxt_ulp.h"
-
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -161,7 +159,7 @@ static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags)
static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
struct bnxt_qplib_mrw *qplib_mr)
{
- if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
+ if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
}
@@ -186,7 +184,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
@@ -275,7 +273,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
int rc;
memset(port_attr, 0, sizeof(*port_attr));
@@ -333,8 +331,8 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
- rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
- rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
+ rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
+ rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
}
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
@@ -585,7 +583,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.pd = &pd->qplib_pd;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
- if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
@@ -1057,7 +1055,7 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
align = sizeof(struct sq_send_hdr);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
@@ -1277,7 +1275,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
rq = &qplqp->rq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (init_attr->srq) {
struct bnxt_re_srq *srq;
@@ -1314,7 +1312,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
qplqp->rq.max_sge = dev_attr->max_qp_sges;
@@ -1340,7 +1338,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
sq->max_sge = init_attr->cap.max_send_sge;
entries = init_attr->cap.max_send_wr;
@@ -1393,7 +1391,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
@@ -1442,7 +1440,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
/* Setup misc params */
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
@@ -1612,7 +1610,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
ib_pd = ib_qp->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
@@ -1840,7 +1838,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
ib_pd = ib_srq->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
@@ -2044,7 +2042,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
enum ib_qp_state curr_qp_state, new_qp_state;
int rc, entries;
unsigned int flags;
@@ -3091,7 +3089,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata = &attrs->driver_udata;
struct bnxt_re_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_qplib_chip_ctx *cctx;
int cqe = attr->cqe;
int rc, entries;
@@ -3226,7 +3224,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!ibcq->uobject) {
ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
return -EOPNOTSUPP;
@@ -4199,7 +4197,7 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
- if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
@@ -4291,7 +4289,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
struct bnxt_re_ucontext *uctx =
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_re_user_mmap_entry *entry;
struct bnxt_re_uctx_resp resp = {};
struct bnxt_re_uctx_req ureq = {};
@@ -4467,9 +4465,10 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
case BNXT_RE_MMAP_TOGGLE_PAGE:
/* Driver doesn't expect write access for user space */
if (vma->vm_flags & VM_WRITE)
- return -EFAULT;
- ret = vm_insert_page(vma, vma->vm_start,
- virt_to_page((void *)bnxt_entry->mem_offset));
+ ret = -EFAULT;
+ else
+ ret = vm_insert_page(vma, vma->vm_start,
+ virt_to_page((void *)bnxt_entry->mem_offset));
break;
default:
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index c143f273b759..e9e4da4dd576 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -79,17 +79,12 @@ MODULE_LICENSE("Dual BSD/GPL");
/* globals */
static DEFINE_MUTEX(bnxt_re_mutex);
-static void bnxt_re_stop_irq(void *handle);
-static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
-static int bnxt_re_netdev_event(struct notifier_block *notifier,
- unsigned long event, void *ptr);
-static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
-static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
-static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable);
+static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
+ u8 port_num, enum ib_event_type event);
static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
@@ -153,6 +148,10 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
if (!rdev->chip_ctx)
return;
+
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+
chip_ctx = rdev->chip_ctx;
rdev->chip_ctx = NULL;
rdev->rcfw.res = NULL;
@@ -166,7 +165,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- int rc;
+ int rc = -ENOMEM;
en_dev = rdev->en_dev;
@@ -182,7 +181,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
- rdev->qplib_res.dattr = &rdev->dev_attr;
+ rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
+ if (!rdev->dev_attr)
+ goto free_chip_ctx;
+ rdev->qplib_res.dattr = rdev->dev_attr;
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
rdev->qplib_res.en_dev = en_dev;
@@ -190,16 +192,20 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
bnxt_re_set_db_offset(rdev);
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
- if (rc) {
- kfree(rdev->chip_ctx);
- rdev->chip_ctx = NULL;
- return rc;
- }
+ if (rc)
+ goto free_dev_attr;
if (bnxt_qplib_determine_atomics(en_dev->pdev))
ibdev_info(&rdev->ibdev,
"platform doesn't support global atomics.");
return 0;
+free_dev_attr:
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+free_chip_ctx:
+ kfree(rdev->chip_ctx);
+ rdev->chip_ctx = NULL;
+ return rc;
}
/* SR-IOV helper functions */
@@ -221,7 +227,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
struct bnxt_qplib_ctx *ctx;
int i;
- attr = &rdev->dev_attr;
+ attr = rdev->dev_attr;
ctx = &rdev->qplib_ctx;
ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
@@ -235,7 +241,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
rdev->qplib_ctx.tqm_ctx.qcount[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
+ rdev->dev_attr->tqm_alloc_reqs[i];
}
static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
@@ -302,17 +308,123 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
&rdev->qplib_ctx);
}
-static void bnxt_re_shutdown(struct auxiliary_device *adev)
-{
- struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+struct bnxt_re_dcb_work {
+ struct work_struct work;
struct bnxt_re_dev *rdev;
+ struct hwrm_async_event_cmpl cmpl;
+};
- rdev = en_info->rdev;
- ib_unregister_device(&rdev->ibdev);
- bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp)
+{
+ return qp->ib_qp.qp_type == IB_QPT_GSI;
+}
+
+static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ if (bnxt_re_is_qp1_qp(qp)) {
+ mutex_unlock(&rdev->qp_lock);
+ return qp;
+ }
+ }
+ mutex_unlock(&rdev->qp_lock);
+ return NULL;
+}
+
+static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
+ qp = bnxt_re_get_qp1_qp(rdev);
+ if (!qp)
+ return 0;
+
+ qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP;
+ qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp;
+
+ return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+}
+
+static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq");
}
-static void bnxt_re_stop_irq(void *handle)
+static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ if (!rdev->dcb_wq)
+ return;
+ destroy_workqueue(rdev->dcb_wq);
+}
+
+static void bnxt_re_dcb_wq_task(struct work_struct *work)
+{
+ struct bnxt_re_dcb_work *dcb_work =
+ container_of(work, struct bnxt_re_dcb_work, work);
+ struct bnxt_re_dev *rdev = dcb_work->rdev;
+ struct bnxt_qplib_cc_param *cc_param;
+ int rc;
+
+ if (!rdev)
+ goto free_dcb;
+
+ cc_param = &rdev->cc_param;
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "Failed to query ccparam rc:%d", rc);
+ goto free_dcb;
+ }
+ if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) {
+ cc_param->qp1_tos_dscp = cc_param->tos_dscp;
+ rc = bnxt_re_update_qp1_tos_dscp(rdev);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "%s: Failed to modify QP1 rc:%d",
+ __func__, rc);
+ goto free_dcb;
+ }
+ }
+
+free_dcb:
+ kfree(dcb_work);
+}
+
+static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
+{
+ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_re_dcb_work *dcb_work;
+ u32 data1, data2;
+ u16 event_id;
+
+ event_id = le16_to_cpu(cmpl->event_id);
+ data1 = le32_to_cpu(cmpl->event_data1);
+ data2 = le32_to_cpu(cmpl->event_data2);
+
+ ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
+ event_id, data1, data2);
+
+ switch (event_id) {
+ case ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ break;
+
+ dcb_work->rdev = rdev;
+ memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl));
+ INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
+ queue_work(rdev->dcb_wq, &dcb_work->work);
+ break;
+ default:
+ break;
+ }
+}
+
+static void bnxt_re_stop_irq(void *handle, bool reset)
{
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
struct bnxt_qplib_rcfw *rcfw;
@@ -323,6 +435,14 @@ static void bnxt_re_stop_irq(void *handle)
rdev = en_info->rdev;
rcfw = &rdev->rcfw;
+ if (reset) {
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ IB_EVENT_DEVICE_FATAL);
+ }
+
for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) {
nq = &rdev->nqr->nq[indx - 1];
bnxt_qplib_nq_stop_irq(nq, false);
@@ -378,6 +498,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
}
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
+ .ulp_async_notifier = bnxt_re_async_notifier,
.ulp_irq_stop = bnxt_re_stop_irq,
.ulp_irq_restart = bnxt_re_start_irq
};
@@ -839,17 +960,6 @@ static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext)
}
/* Device */
-
-static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
-{
- struct ib_device *ibdev =
- ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
- if (!ibdev)
- return NULL;
-
- return container_of(ibdev, struct bnxt_re_dev, ibdev);
-}
-
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -1627,12 +1737,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto fail;
- rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
- rdev->netdev, &rdev->dev_attr);
+ rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev);
if (rc)
goto fail;
@@ -1807,6 +1916,26 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return 0;
}
+static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
+static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -1886,6 +2015,9 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_debugfs_rem_pdev(rdev);
+ bnxt_re_net_unregister_async_event(rdev);
+ bnxt_re_uninit_dcb_wq(rdev);
+
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
@@ -2032,7 +2164,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
rdev->pacing.dbr_pacing = false;
}
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto disable_rcfw;
@@ -2081,6 +2213,11 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
if (!rdev->is_virtfn) {
+ /* Query f/w defaults of CC params */
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param);
+ if (rc)
+ ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
+
rc = bnxt_re_setup_qos(rdev);
if (rc)
ibdev_info(&rdev->ibdev,
@@ -2099,6 +2236,9 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_debugfs_add_pdev(rdev);
+ bnxt_re_init_dcb_wq(rdev);
+ bnxt_re_net_register_async_event(rdev);
+
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
@@ -2117,6 +2257,30 @@ fail:
return rc;
}
+static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
+{
+ struct bnxt_qplib_cc_param cc_param = {};
+
+ /* Do not enable congestion control on VFs */
+ if (rdev->is_virtfn)
+ return;
+
+ /* Currently enabling only for GenP5 adapters */
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
+ if (enable) {
+ cc_param.enable = 1;
+ cc_param.tos_ecn = 1;
+ }
+
+ cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
+
+ if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
+ ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
+}
+
static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev,
struct bnxt_re_en_dev_info *en_info,
struct auxiliary_device *adev)
@@ -2163,20 +2327,10 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
goto re_dev_uninit;
}
- rdev->nb.notifier_call = bnxt_re_netdev_event;
- rc = register_netdevice_notifier(&rdev->nb);
- if (rc) {
- rdev->nb.notifier_call = NULL;
- pr_err("%s: Cannot register to netdevice_notifier",
- ROCE_DRV_MODULE_NAME);
- goto re_dev_unreg;
- }
bnxt_re_setup_cc(rdev, true);
return 0;
-re_dev_unreg:
- ib_unregister_device(&rdev->ibdev);
re_dev_uninit:
bnxt_re_update_en_info_rdev(NULL, en_info, adev);
bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
@@ -2186,79 +2340,6 @@ exit:
return rc;
}
-static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
-{
- struct bnxt_qplib_cc_param cc_param = {};
-
- /* Do not enable congestion control on VFs */
- if (rdev->is_virtfn)
- return;
-
- /* Currently enabling only for GenP5 adapters */
- if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
- return;
-
- if (enable) {
- cc_param.enable = 1;
- cc_param.tos_ecn = 1;
- }
-
- cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
- CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
-
- if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
- ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
-}
-
-/*
- * "Notifier chain callback can be invoked for the same chain from
- * different CPUs at the same time".
- *
- * For cases when the netdev is already present, our call to the
- * register_netdevice_notifier() will actually get the rtnl_lock()
- * before sending NETDEV_REGISTER and (if up) NETDEV_UP
- * events.
- *
- * But for cases when the netdev is not already present, the notifier
- * chain is subjected to be invoked from different CPUs simultaneously.
- *
- * This is protected by the netdev_mutex.
- */
-static int bnxt_re_netdev_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
- struct bnxt_re_dev *rdev;
-
- real_dev = rdma_vlan_dev_real_dev(netdev);
- if (!real_dev)
- real_dev = netdev;
-
- if (real_dev != netdev)
- goto exit;
-
- rdev = bnxt_re_from_netdev(real_dev);
- if (!rdev)
- return NOTIFY_DONE;
-
-
- switch (event) {
- case NETDEV_UP:
- case NETDEV_DOWN:
- case NETDEV_CHANGE:
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
- netif_carrier_ok(real_dev) ?
- IB_EVENT_PORT_ACTIVE :
- IB_EVENT_PORT_ERR);
- break;
- default:
- break;
- }
- ib_device_put(&rdev->ibdev);
-exit:
- return NOTIFY_DONE;
-}
-
#define BNXT_ADEV_NAME "bnxt_en"
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
@@ -2316,13 +2397,9 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
if (rc)
- goto err;
- mutex_unlock(&bnxt_re_mutex);
- return 0;
+ kfree(en_info);
-err:
mutex_unlock(&bnxt_re_mutex);
- kfree(en_info);
return rc;
}
@@ -2375,6 +2452,16 @@ static int bnxt_re_resume(struct auxiliary_device *adev)
return 0;
}
+static void bnxt_re_shutdown(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ rdev = en_info->rdev;
+ ib_unregister_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+}
+
static const struct auxiliary_device_id bnxt_re_id_table[] = {
{ .name = BNXT_ADEV_NAME ".rdma", },
{},
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 0660101b5310..0d9487c889ff 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -343,6 +343,7 @@ struct bnxt_qplib_qp {
u32 msn;
u32 msn_tbl_sz;
bool is_host_msn_tbl;
+ u8 tos_dscp;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 96ceec1e8199..02922a0987ad 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -876,14 +876,13 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
}
-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
- struct net_device *netdev,
- struct bnxt_qplib_dev_attr *dev_attr)
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
{
+ struct bnxt_qplib_dev_attr *dev_attr;
int rc;
- res->pdev = pdev;
res->netdev = netdev;
+ dev_attr = res->dattr;
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
if (rc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index cbfc49a1a56d..be5d907a036b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -424,9 +424,7 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
- struct net_device *netdev,
- struct bnxt_qplib_dev_attr *dev_attr);
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9df3e3271577..4ccd4405355a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -88,9 +88,9 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
fw_ver[3] = resp.fw_rsvd;
}
-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr)
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
{
+ struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr;
struct creq_query_func_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
@@ -1022,3 +1022,116 @@ free_mem:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
return rc;
}
+
+static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext,
+ struct creq_query_roce_cc_gen1_resp_sb_tlv *sb)
+{
+ cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi);
+ cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps);
+ cc_ext->init_cp = le16_to_cpu(sb->init_cp);
+ cc_ext->tr_update_mode = sb->tr_update_mode;
+ cc_ext->tr_update_cyls = sb->tr_update_cycles;
+ cc_ext->fr_rtt = sb->fr_num_rtts;
+ cc_ext->ai_rate_incr = sb->ai_rate_increase;
+ cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th);
+ cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th);
+ cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th);
+ cc_ext->bw_avg_weight = sb->bw_avg_weight;
+ cc_ext->cr_factor = sb->actual_cr_factor;
+ cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th);
+ cc_ext->cp_bias_en = sb->cp_bias_en;
+ cc_ext->cp_bias = sb->cp_bias;
+ cc_ext->cnp_ecn = sb->cnp_ecn;
+ cc_ext->rtt_jitter_en = sb->rtt_jitter_en;
+ cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec);
+ cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th);
+ cc_ext->cr_width = sb->cr_width;
+ cc_ext->min_quota = sb->quota_period_min;
+ cc_ext->max_quota = sb->quota_period_max;
+ cc_ext->abs_max_quota = sb->quota_period_abs_max;
+ cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound);
+ cc_ext->cr_prob_fac = sb->cr_prob_factor;
+ cc_ext->tr_prob_fac = sb->tr_prob_factor;
+ cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th);
+ cc_ext->red_div = sb->red_div;
+ cc_ext->cnp_ratio_th = sb->cnp_ratio_th;
+ cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts);
+ cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio;
+ cc_ext->low_rate_en = sb->use_rate_table;
+ cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th);
+ cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1);
+ cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2);
+ cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th);
+ cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1);
+ cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2);
+ cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt);
+ cc_ext->cc_ack_bytes = sb->cc_ack_bytes;
+ cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th);
+}
+
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param)
+{
+ struct bnxt_qplib_tlv_query_rcc_sb *ext_sb;
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_roce_cc_resp resp = {};
+ struct creq_query_roce_cc_resp_sb *sb;
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_query_roce_cc req = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ size_t resp_size;
+ int rc;
+
+ /* Query the parameters from chip */
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC,
+ sizeof(req));
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx))
+ resp_size = sizeof(*ext_sb);
+ else
+ resp_size = sizeof(*sb);
+
+ sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
+ if (rc)
+ goto out;
+
+ ext_sb = sbuf.sb;
+ sb = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb :
+ (struct creq_query_roce_cc_resp_sb *)ext_sb;
+
+ cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC;
+ cc_param->tos_ecn = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT;
+ cc_param->tos_dscp = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT;
+ cc_param->alt_tos_dscp = sb->alt_tos_dscp;
+ cc_param->alt_vlan_pcp = sb->alt_vlan_pcp;
+
+ cc_param->g = sb->g;
+ cc_param->nph_per_state = sb->num_phases_per_state;
+ cc_param->init_cr = le16_to_cpu(sb->init_cr);
+ cc_param->init_tr = le16_to_cpu(sb->init_tr);
+ cc_param->cc_mode = sb->cc_mode;
+ cc_param->inact_th = le16_to_cpu(sb->inactivity_th);
+ cc_param->rtt = le16_to_cpu(sb->rtt);
+ cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp);
+ cc_param->time_pph = sb->time_per_phase;
+ cc_param->pkts_pph = sb->pkts_per_phase;
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb);
+ cc_param->inact_th |= (cc_param->cc_ext.inact_th_hi & 0x3F) << 16;
+ }
+out:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index e6beeb514b7d..e626b05038a1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -296,6 +296,7 @@ struct bnxt_qplib_cc_param_ext {
struct bnxt_qplib_cc_param {
u8 alt_vlan_pcp;
+ u8 qp1_tos_dscp;
u16 alt_tos_dscp;
u8 cc_mode;
u8 enable;
@@ -325,8 +326,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr);
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx);
@@ -355,6 +355,8 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
u32 resp_size, void *resp_va);
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
#define BNXT_VAR_MAX_WQE 4352
#define BNXT_VAR_MAX_SLOT_ALIGN 256
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 80970a1738f8..034b85c42255 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1114,8 +1114,10 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
* The math here assumes sizeof cpl_pass_accept_req >= sizeof
* cpl_rx_pkt.
*/
- skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
- sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
+ skb = alloc_skb(size_add(gl->tot_len,
+ sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header)) - pktshift,
+ GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 7b5c4522b426..955f061a55e9 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1599,6 +1599,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int count;
int rq_flushed = 0, sq_flushed;
unsigned long flag;
+ struct ib_event ev;
pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
@@ -1607,6 +1608,13 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
if (schp != rchp)
spin_lock(&schp->lock);
spin_lock(&qhp->lock);
+ if (qhp->srq && qhp->attr.state == C4IW_QP_STATE_ERROR &&
+ qhp->ibqp.event_handler) {
+ ev.device = qhp->ibqp.device;
+ ev.element.qp = &qhp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qhp->ibqp.event_handler(&ev, qhp->ibqp.qp_context);
+ }
if (qhp->wq.flushed) {
spin_unlock(&qhp->lock);
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index d7fc9d5eeefd..838182d0409c 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_H_
@@ -57,15 +57,15 @@ struct efa_dev {
u64 db_bar_addr;
u64 db_bar_len;
- unsigned int num_irq_vectors;
- int admin_msix_vector_idx;
+ u32 num_irq_vectors;
+ u32 admin_msix_vector_idx;
struct efa_irq admin_irq;
struct efa_stats stats;
/* Array of completion EQs */
struct efa_eq *eqs;
- unsigned int neqs;
+ u32 neqs;
/* Only stores CQs with interrupts enabled */
struct xarray cqs_xa;
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
index 77282234ce68..4d9ca97e4296 100644
--- a/drivers/infiniband/hw/efa/efa_com.h
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
@@ -65,7 +65,7 @@ struct efa_com_admin_queue {
u16 depth;
struct efa_com_admin_cq cq;
struct efa_com_admin_sq sq;
- u16 msix_vector_idx;
+ u32 msix_vector_idx;
unsigned long state;
@@ -89,7 +89,7 @@ struct efa_com_aenq {
struct efa_aenq_handlers *aenq_handlers;
dma_addr_t dma_addr;
u32 cc; /* consumer counter */
- u16 msix_vector_idx;
+ u32 msix_vector_idx;
u16 depth;
u8 phase;
};
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index ad225823e6f2..4f03c0ec819f 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
@@ -141,8 +141,7 @@ static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
return 0;
}
-static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
- int vector)
+static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq, u32 vector)
{
u32 cpu;
@@ -305,7 +304,7 @@ static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
efa_free_irq(dev, &eq->irq);
}
-static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
+static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u32 msix_vec)
{
int err;
@@ -328,21 +327,17 @@ err_free_comp_irq:
static int efa_create_eqs(struct efa_dev *dev)
{
- unsigned int neqs = dev->dev_attr.max_eq;
- int err;
- int i;
-
- neqs = min_t(unsigned int, neqs,
- dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
+ u32 neqs = dev->dev_attr.max_eq;
+ int err, i;
+ neqs = min_t(u32, neqs, dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
dev->neqs = neqs;
dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
if (!dev->eqs)
return -ENOMEM;
for (i = 0; i < neqs; i++) {
- err = efa_create_eq(dev, &dev->eqs[i],
- i + EFA_COMP_EQS_VEC_BASE);
+ err = efa_create_eq(dev, &dev->eqs[i], i + EFA_COMP_EQS_VEC_BASE);
if (err)
goto err_destroy_eqs;
}
@@ -470,7 +465,6 @@ static void efa_ib_device_remove(struct efa_dev *dev)
ibdev_info(&dev->ibdev, "Unregister ib device\n");
ib_unregister_device(&dev->ibdev);
efa_destroy_eqs(dev);
- efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
efa_release_doorbell_bar(dev);
}
@@ -643,12 +637,14 @@ err_disable_device:
return ERR_PTR(err);
}
-static void efa_remove_device(struct pci_dev *pdev)
+static void efa_remove_device(struct pci_dev *pdev,
+ enum efa_regs_reset_reason_types reset_reason)
{
struct efa_dev *dev = pci_get_drvdata(pdev);
struct efa_com_dev *edev;
edev = &dev->edev;
+ efa_com_dev_reset(edev, reset_reason);
efa_com_admin_destroy(edev);
efa_free_irq(dev, &dev->admin_irq);
efa_disable_msix(dev);
@@ -676,7 +672,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_remove_device:
- efa_remove_device(pdev);
+ efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
return err;
}
@@ -685,7 +681,7 @@ static void efa_remove(struct pci_dev *pdev)
struct efa_dev *dev = pci_get_drvdata(pdev);
efa_ib_device_remove(dev);
- efa_remove_device(pdev);
+ efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
}
static void efa_shutdown(struct pci_dev *pdev)
diff --git a/drivers/infiniband/hw/erdma/Kconfig b/drivers/infiniband/hw/erdma/Kconfig
index 169038e3ceb1..267fc1f3c42a 100644
--- a/drivers/infiniband/hw/erdma/Kconfig
+++ b/drivers/infiniband/hw/erdma/Kconfig
@@ -5,7 +5,7 @@ config INFINIBAND_ERDMA
depends on INFINIBAND_ADDR_TRANS
depends on INFINIBAND_USER_ACCESS
help
- This is a RDMA/iWarp driver for Alibaba Elastic RDMA Adapter(ERDMA),
+ This is a RDMA driver for Alibaba Elastic RDMA Adapter(ERDMA),
which supports RDMA features in Alibaba cloud environment.
To compile this driver as module, choose M here. The module will be
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index 3c166359448d..2a023b99f992 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -16,7 +16,7 @@
#include "erdma_hw.h"
#define DRV_MODULE_NAME "erdma"
-#define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
+#define ERDMA_NODE_DESC "Elastic RDMA Adapter stack"
struct erdma_eq {
void *qbuf;
@@ -101,8 +101,6 @@ struct erdma_cmdq {
struct erdma_comp_wait *wait_pool;
spinlock_t lock;
- bool use_event;
-
struct erdma_cmdq_sq sq;
struct erdma_cmdq_cq cq;
struct erdma_eq eq;
@@ -148,6 +146,8 @@ struct erdma_devattr {
u32 max_mr;
u32 max_pd;
u32 max_mw;
+ u32 max_gid;
+ u32 max_ah;
u32 local_dma_key;
};
@@ -177,7 +177,8 @@ struct erdma_resource_cb {
enum {
ERDMA_RES_TYPE_PD = 0,
ERDMA_RES_TYPE_STAG_IDX = 1,
- ERDMA_RES_CNT = 2,
+ ERDMA_RES_TYPE_AH = 2,
+ ERDMA_RES_CNT = 3,
};
struct erdma_dev {
@@ -192,8 +193,6 @@ struct erdma_dev {
u8 __iomem *func_bar;
struct erdma_devattr attrs;
- /* physical port state (only one port per device) */
- enum ib_port_state state;
u32 mtu;
/* cmdq and aeq use the same msix vector */
@@ -215,6 +214,7 @@ struct erdma_dev {
struct dma_pool *db_pool;
struct dma_pool *resp_pool;
+ enum erdma_proto_type proto;
};
static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
@@ -265,7 +265,7 @@ void erdma_cmdq_destroy(struct erdma_dev *dev);
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
- u64 *resp0, u64 *resp1);
+ u64 *resp0, u64 *resp1, bool sleepable);
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
int erdma_ceqs_init(struct erdma_dev *dev);
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index 771059a8eb7d..1b23c698ec25 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -567,7 +567,8 @@ reject_conn:
static int erdma_proc_mpareply(struct erdma_cep *cep)
{
- struct erdma_qp_attrs qp_attrs;
+ enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
+ struct erdma_mod_qp_params_iwarp params;
struct erdma_qp *qp = cep->qp;
struct mpa_rr *rep;
int ret;
@@ -597,26 +598,29 @@ static int erdma_proc_mpareply(struct erdma_cep *cep)
return -EINVAL;
}
- memset(&qp_attrs, 0, sizeof(qp_attrs));
- qp_attrs.irq_size = cep->ird;
- qp_attrs.orq_size = cep->ord;
- qp_attrs.state = ERDMA_QP_STATE_RTS;
+ memset(&params, 0, sizeof(params));
+ params.state = ERDMA_QPS_IWARP_RTS;
+ params.irq_size = cep->ird;
+ params.orq_size = cep->ord;
down_write(&qp->state_lock);
- if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
ret = -EINVAL;
up_write(&qp->state_lock);
goto out_err;
}
- qp->attrs.qp_type = ERDMA_QP_ACTIVE;
- if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc)
- qp->attrs.cc = COMPROMISE_CC;
+ to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_LLP_HANDLE |
+ ERDMA_QPA_IWARP_MPA | ERDMA_QPA_IWARP_IRD |
+ ERDMA_QPA_IWARP_ORD;
- ret = erdma_modify_qp_internal(qp, &qp_attrs,
- ERDMA_QP_ATTR_STATE |
- ERDMA_QP_ATTR_LLP_HANDLE |
- ERDMA_QP_ATTR_MPA);
+ params.qp_type = ERDMA_QP_ACTIVE;
+ if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc) {
+ to_modify_attrs |= ERDMA_QPA_IWARP_CC;
+ params.cc = COMPROMISE_CC;
+ }
+
+ ret = erdma_modify_qp_state_iwarp(qp, &params, to_modify_attrs);
up_write(&qp->state_lock);
@@ -722,7 +726,7 @@ static int erdma_newconn_connected(struct erdma_cep *cep)
__mpa_rr_set_revision(&cep->mpa.hdr.params.bits, MPA_REVISION_EXT_1);
memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, MPA_KEY_SIZE);
- cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
__mpa_ext_set_cc(&cep->mpa.ext_data.bits, cep->qp->attrs.cc);
ret = erdma_send_mpareqrep(cep, cep->private_data, cep->pd_len);
@@ -1126,10 +1130,11 @@ error_put_qp:
int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
{
- struct erdma_dev *dev = to_edev(id->device);
struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+ struct erdma_mod_qp_params_iwarp mod_qp_params;
+ enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
+ struct erdma_dev *dev = to_edev(id->device);
struct erdma_qp *qp;
- struct erdma_qp_attrs qp_attrs;
int ret;
erdma_cep_set_inuse(cep);
@@ -1156,7 +1161,7 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
erdma_qp_get(qp);
down_write(&qp->state_lock);
- if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
ret = -EINVAL;
up_write(&qp->state_lock);
goto error;
@@ -1181,11 +1186,11 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->cm_id = id;
id->add_ref(id);
- memset(&qp_attrs, 0, sizeof(qp_attrs));
- qp_attrs.orq_size = params->ord;
- qp_attrs.irq_size = params->ird;
+ memset(&mod_qp_params, 0, sizeof(mod_qp_params));
- qp_attrs.state = ERDMA_QP_STATE_RTS;
+ mod_qp_params.irq_size = params->ird;
+ mod_qp_params.orq_size = params->ord;
+ mod_qp_params.state = ERDMA_QPS_IWARP_RTS;
/* Associate QP with CEP */
erdma_cep_get(cep);
@@ -1194,19 +1199,21 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->state = ERDMA_EPSTATE_RDMA_MODE;
- qp->attrs.qp_type = ERDMA_QP_PASSIVE;
- qp->attrs.pd_len = params->private_data_len;
+ mod_qp_params.qp_type = ERDMA_QP_PASSIVE;
+ mod_qp_params.pd_len = params->private_data_len;
- if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits))
- qp->attrs.cc = COMPROMISE_CC;
+ to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_ORD |
+ ERDMA_QPA_IWARP_LLP_HANDLE | ERDMA_QPA_IWARP_IRD |
+ ERDMA_QPA_IWARP_MPA;
+
+ if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits)) {
+ to_modify_attrs |= ERDMA_QPA_IWARP_CC;
+ mod_qp_params.cc = COMPROMISE_CC;
+ }
/* move to rts */
- ret = erdma_modify_qp_internal(qp, &qp_attrs,
- ERDMA_QP_ATTR_STATE |
- ERDMA_QP_ATTR_ORD |
- ERDMA_QP_ATTR_LLP_HANDLE |
- ERDMA_QP_ATTR_IRD |
- ERDMA_QP_ATTR_MPA);
+ ret = erdma_modify_qp_state_iwarp(qp, &mod_qp_params, to_modify_attrs);
+
up_write(&qp->state_lock);
if (ret)
@@ -1214,7 +1221,7 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->mpa.ext_data.bits = 0;
__mpa_ext_set_cc(&cep->mpa.ext_data.bits, qp->attrs.cc);
- cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
ret = erdma_send_mpareqrep(cep, params->private_data,
params->private_data_len);
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index a3d8922d1ad1..b867aefe83b2 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -182,7 +182,6 @@ int erdma_cmdq_init(struct erdma_dev *dev)
int err;
cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
- cmdq->use_event = false;
sema_init(&cmdq->credits, cmdq->max_outstandings);
@@ -223,8 +222,6 @@ err_destroy_sq:
void erdma_finish_cmdq_init(struct erdma_dev *dev)
{
- /* after device init successfully, change cmdq to event mode. */
- dev->cmdq.use_event = true;
arm_cmdq_cq(&dev->cmdq);
}
@@ -312,8 +309,7 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
/* Copy 16B comp data after cqe hdr to outer */
be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4);
- if (cmdq->use_event)
- complete(&comp_wait->wait_event);
+ complete(&comp_wait->wait_event);
return 0;
}
@@ -332,9 +328,6 @@ static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
if (erdma_poll_single_cmd_completion(cmdq))
break;
- if (comp_num && cmdq->use_event)
- arm_cmdq_cq(cmdq);
-
spin_unlock_irqrestore(&cmdq->cq.lock, flags);
}
@@ -342,8 +335,7 @@ void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
{
int got_event = 0;
- if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
- !cmdq->use_event)
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
return;
while (get_next_valid_eqe(&cmdq->eq)) {
@@ -354,6 +346,7 @@ void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
if (got_event) {
cmdq->cq.cmdsn++;
erdma_polling_cmd_completions(cmdq);
+ arm_cmdq_cq(cmdq);
}
notify_eq(&cmdq->eq);
@@ -372,7 +365,7 @@ static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
if (time_is_before_jiffies(comp_timeout))
return -ETIME;
- msleep(20);
+ udelay(20);
}
return 0;
@@ -403,7 +396,7 @@ void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
}
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
- u64 *resp0, u64 *resp1)
+ u64 *resp0, u64 *resp1, bool sleepable)
{
struct erdma_comp_wait *comp_wait;
int ret;
@@ -411,7 +404,12 @@ int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
return -ENODEV;
- down(&cmdq->credits);
+ if (!sleepable) {
+ while (down_trylock(&cmdq->credits))
+ ;
+ } else {
+ down(&cmdq->credits);
+ }
comp_wait = get_comp_wait(cmdq);
if (IS_ERR(comp_wait)) {
@@ -425,7 +423,7 @@ int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
push_cmdq_sqe(cmdq, req, req_size, comp_wait);
spin_unlock(&cmdq->sq.lock);
- if (cmdq->use_event)
+ if (sleepable)
ret = erdma_wait_cmd_completion(comp_wait, cmdq,
ERDMA_CMDQ_TIMEOUT_MS);
else
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
index 70f89f0162aa..1f456327e63c 100644
--- a/drivers/infiniband/hw/erdma/erdma_cq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -105,6 +105,22 @@ static const struct {
{ ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
};
+static void erdma_process_ud_cqe(struct erdma_cqe *cqe, struct ib_wc *wc)
+{
+ u32 ud_info;
+
+ wc->wc_flags |= (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
+ ud_info = be32_to_cpu(cqe->ud.info);
+ wc->network_hdr_type = FIELD_GET(ERDMA_CQE_NTYPE_MASK, ud_info);
+ if (wc->network_hdr_type == ERDMA_NETWORK_TYPE_IPV4)
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ wc->src_qp = FIELD_GET(ERDMA_CQE_SQPN_MASK, ud_info);
+ wc->sl = FIELD_GET(ERDMA_CQE_SL_MASK, ud_info);
+ wc->pkey_index = 0;
+}
+
#define ERDMA_POLLCQ_NO_QP 1
static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
@@ -168,6 +184,10 @@ static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
}
+ if (erdma_device_rocev2(dev) &&
+ (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_GSI))
+ erdma_process_ud_cqe(cqe, wc);
+
if (syndrome >= ERDMA_NUM_WC_STATUS)
syndrome = ERDMA_WC_GENERAL_ERR;
@@ -201,3 +221,48 @@ int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return npolled;
}
+
+void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_cqe *cqe, *dst_cqe;
+ u32 prev_cq_ci, cur_cq_ci;
+ u32 ncqe = 0, nqp_cqe = 0;
+ unsigned long flags;
+ u8 owner;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, flags);
+
+ prev_cq_ci = cq->kern_cq.ci;
+
+ while (ncqe < cq->depth && (cqe = get_next_valid_cqe(cq)) != NULL) {
+ ++cq->kern_cq.ci;
+ ++ncqe;
+ }
+
+ while (ncqe > 0) {
+ cur_cq_ci = prev_cq_ci + ncqe - 1;
+ cqe = get_queue_entry(cq->kern_cq.qbuf, cur_cq_ci, cq->depth,
+ CQE_SHIFT);
+
+ if (be32_to_cpu(cqe->qpn) == qpn) {
+ ++nqp_cqe;
+ } else if (nqp_cqe) {
+ dst_cqe = get_queue_entry(cq->kern_cq.qbuf,
+ cur_cq_ci + nqp_cqe,
+ cq->depth, CQE_SHIFT);
+ owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ be32_to_cpu(dst_cqe->hdr));
+ cqe->hdr = cpu_to_be32(
+ (be32_to_cpu(cqe->hdr) &
+ ~ERDMA_CQE_HDR_OWNER_MASK) |
+ FIELD_PREP(ERDMA_CQE_HDR_OWNER_MASK, owner));
+ memcpy(dst_cqe, cqe, sizeof(*cqe));
+ }
+
+ --ncqe;
+ }
+
+ cq->kern_cq.ci = prev_cq_ci + nqp_cqe;
+ spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index 9a72fec6d5cc..6486234a2360 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -236,7 +236,8 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ false);
}
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
@@ -278,7 +279,8 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
req.qtype = ERDMA_EQ_TYPE_CEQ;
req.vector_idx = ceqn + 1;
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ false);
if (err)
return;
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index 05978f3b1475..ea4db53901a4 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/if_ether.h>
/* PCIe device related definition. */
#define ERDMA_PCI_WIDTH 64
@@ -21,8 +22,21 @@
#define ERDMA_NUM_MSIX_VEC 32U
#define ERDMA_MSIX_VECTOR_CMDQ 0
+/* RoCEv2 related */
+#define ERDMA_ROCEV2_GID_SIZE 16
+#define ERDMA_MAX_PKEYS 1
+#define ERDMA_DEFAULT_PKEY 0xFFFF
+
+/* erdma device protocol type */
+enum erdma_proto_type {
+ ERDMA_PROTO_IWARP = 0,
+ ERDMA_PROTO_ROCEV2 = 1,
+ ERDMA_PROTO_COUNT = 2,
+};
+
/* PCIe Bar0 Registers. */
#define ERDMA_REGS_VERSION_REG 0x0
+#define ERDMA_REGS_DEV_PROTO_REG 0xC
#define ERDMA_REGS_DEV_CTRL_REG 0x10
#define ERDMA_REGS_DEV_ST_REG 0x14
#define ERDMA_REGS_NETDEV_MAC_L_REG 0x18
@@ -136,7 +150,11 @@ enum CMDQ_RDMA_OPCODE {
CMDQ_OPCODE_DESTROY_CQ = 5,
CMDQ_OPCODE_REFLUSH = 6,
CMDQ_OPCODE_REG_MR = 8,
- CMDQ_OPCODE_DEREG_MR = 9
+ CMDQ_OPCODE_DEREG_MR = 9,
+ CMDQ_OPCODE_SET_GID = 14,
+ CMDQ_OPCODE_CREATE_AH = 15,
+ CMDQ_OPCODE_DESTROY_AH = 16,
+ CMDQ_OPCODE_QUERY_QP = 17,
};
enum CMDQ_COMMON_OPCODE {
@@ -284,6 +302,36 @@ struct erdma_cmdq_dereg_mr_req {
u32 cfg;
};
+/* create_av cfg0 */
+#define ERDMA_CMD_CREATE_AV_FL_MASK GENMASK(19, 0)
+#define ERDMA_CMD_CREATE_AV_NTYPE_MASK BIT(20)
+
+struct erdma_av_cfg {
+ u32 cfg0;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 sl;
+ u8 rsvd;
+ u16 udp_sport;
+ u16 sgid_index;
+ u8 dmac[ETH_ALEN];
+ u8 padding[2];
+ u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+};
+
+struct erdma_cmdq_create_ah_req {
+ u64 hdr;
+ u32 pdn;
+ u32 ahn;
+ struct erdma_av_cfg av_cfg;
+};
+
+struct erdma_cmdq_destroy_ah_req {
+ u64 hdr;
+ u32 pdn;
+ u32 ahn;
+};
+
/* modify qp cfg */
#define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
#define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
@@ -301,6 +349,36 @@ struct erdma_cmdq_modify_qp_req {
u32 recv_nxt;
};
+/* modify qp cfg1 for roce device */
+#define ERDMA_CMD_MODIFY_QP_DQPN_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_mod_qp_req_rocev2 {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u32 attr_mask;
+ u32 qkey;
+ u32 rq_psn;
+ u32 sq_psn;
+ struct erdma_av_cfg av_cfg;
+};
+
+/* query qp response mask */
+#define ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK GENMASK_ULL(23, 0)
+#define ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK GENMASK_ULL(47, 24)
+#define ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK GENMASK_ULL(55, 48)
+#define ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK GENMASK_ULL(56, 56)
+
+struct erdma_cmdq_query_qp_req_rocev2 {
+ u64 hdr;
+ u32 qpn;
+};
+
+enum erdma_qp_type {
+ ERDMA_QPT_RC = 0,
+ ERDMA_QPT_UD = 1,
+};
+
/* create qp cfg0 */
#define ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_QPN_MASK GENMASK(19, 0)
@@ -309,6 +387,9 @@ struct erdma_cmdq_modify_qp_req {
#define ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_PD_MASK GENMASK(19, 0)
+/* create qp cfg2 */
+#define ERDMA_CMD_CREATE_QP_TYPE_MASK GENMASK(3, 0)
+
/* create qp cqn_mtt_cfg */
#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
#define ERDMA_CMD_CREATE_QP_DB_CFG_MASK BIT(25)
@@ -342,6 +423,7 @@ struct erdma_cmdq_create_qp_req {
u64 rq_mtt_entry[3];
u32 db_cfg;
+ u32 cfg2;
};
struct erdma_cmdq_destroy_qp_req {
@@ -394,10 +476,33 @@ struct erdma_cmdq_query_stats_resp {
u64 rx_pps_meter_drop_packets_cnt;
};
+enum erdma_network_type {
+ ERDMA_NETWORK_TYPE_IPV4 = 0,
+ ERDMA_NETWORK_TYPE_IPV6 = 1,
+};
+
+enum erdma_set_gid_op {
+ ERDMA_SET_GID_OP_ADD = 0,
+ ERDMA_SET_GID_OP_DEL = 1,
+};
+
+/* set gid cfg */
+#define ERDMA_CMD_SET_GID_SGID_IDX_MASK GENMASK(15, 0)
+#define ERDMA_CMD_SET_GID_NTYPE_MASK BIT(16)
+#define ERDMA_CMD_SET_GID_OP_MASK BIT(31)
+
+struct erdma_cmdq_set_gid_req {
+ u64 hdr;
+ u32 cfg;
+ u8 gid[ERDMA_ROCEV2_GID_SIZE];
+};
+
/* cap qword 0 definition */
+#define ERDMA_CMD_DEV_CAP_MAX_GID_MASK GENMASK_ULL(51, 48)
#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
#define ERDMA_CMD_DEV_CAP_FLAGS_MASK GENMASK_ULL(31, 24)
#define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_AH_MASK GENMASK_ULL(15, 8)
#define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
/* cap qword 1 definition */
@@ -426,6 +531,10 @@ enum {
#define ERDMA_CQE_QTYPE_RQ 1
#define ERDMA_CQE_QTYPE_CMDQ 2
+#define ERDMA_CQE_NTYPE_MASK BIT(31)
+#define ERDMA_CQE_SL_MASK GENMASK(27, 20)
+#define ERDMA_CQE_SQPN_MASK GENMASK(19, 0)
+
struct erdma_cqe {
__be32 hdr;
__be32 qe_idx;
@@ -435,7 +544,16 @@ struct erdma_cqe {
__be32 inv_rkey;
};
__be32 size;
- __be32 rsvd[3];
+ union {
+ struct {
+ __be32 rsvd[3];
+ } rc;
+
+ struct {
+ __be32 rsvd[2];
+ __be32 info;
+ } ud;
+ };
};
struct erdma_sge {
@@ -487,7 +605,7 @@ struct erdma_write_sqe {
struct erdma_sge sgl[];
};
-struct erdma_send_sqe {
+struct erdma_send_sqe_rc {
__le64 hdr;
union {
__be32 imm_data;
@@ -498,6 +616,17 @@ struct erdma_send_sqe {
struct erdma_sge sgl[];
};
+struct erdma_send_sqe_ud {
+ __le64 hdr;
+ __be32 imm_data;
+ __le32 length;
+ __le32 qkey;
+ __le32 dst_qpn;
+ __le32 ahn;
+ __le32 rsvd;
+ struct erdma_sge sgl[];
+};
+
struct erdma_readreq_sqe {
__le64 hdr;
__le32 invalid_stag;
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 62f497a71004..f35b30235018 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -26,14 +26,6 @@ static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
goto done;
switch (event) {
- case NETDEV_UP:
- dev->state = IB_PORT_ACTIVE;
- erdma_port_event(dev, IB_EVENT_PORT_ACTIVE);
- break;
- case NETDEV_DOWN:
- dev->state = IB_PORT_DOWN;
- erdma_port_event(dev, IB_EVENT_PORT_ERR);
- break;
case NETDEV_CHANGEMTU:
if (dev->mtu != netdev->mtu) {
erdma_set_mtu(dev, netdev->mtu);
@@ -172,6 +164,8 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
{
int ret;
+ dev->proto = erdma_reg_read32(dev, ERDMA_REGS_DEV_PROTO_REG);
+
dev->resp_pool = dma_pool_create("erdma_resp_pool", &pdev->dev,
ERDMA_HW_RESP_SIZE, ERDMA_HW_RESP_SIZE,
0);
@@ -390,7 +384,7 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
CMDQ_OPCODE_QUERY_DEVICE);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
- &cap1);
+ &cap1, true);
if (err)
return err;
@@ -398,6 +392,8 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
+ dev->attrs.max_gid = 1 << ERDMA_GET_CAP(MAX_GID, cap0);
+ dev->attrs.max_ah = 1 << ERDMA_GET_CAP(MAX_AH, cap0);
dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
@@ -415,12 +411,13 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
+ dev->res_cb[ERDMA_RES_TYPE_AH].max_cap = dev->attrs.max_ah;
erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_QUERY_FW_INFO);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
- &cap1);
+ &cap1, true);
if (!err)
dev->attrs.fw_version =
FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
@@ -441,7 +438,8 @@ static int erdma_device_config(struct erdma_dev *dev)
req.cfg = FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PGSHIFT_MASK, PAGE_SHIFT) |
FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PS_EN_MASK, 1);
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int erdma_res_cb_init(struct erdma_dev *dev)
@@ -474,6 +472,29 @@ static void erdma_res_cb_free(struct erdma_dev *dev)
bitmap_free(dev->res_cb[i].bitmap);
}
+static const struct ib_device_ops erdma_device_ops_rocev2 = {
+ .get_link_layer = erdma_get_link_layer,
+ .add_gid = erdma_add_gid,
+ .del_gid = erdma_del_gid,
+ .query_pkey = erdma_query_pkey,
+ .create_ah = erdma_create_ah,
+ .destroy_ah = erdma_destroy_ah,
+ .query_ah = erdma_query_ah,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, erdma_ah, ibah),
+};
+
+static const struct ib_device_ops erdma_device_ops_iwarp = {
+ .iw_accept = erdma_accept,
+ .iw_add_ref = erdma_qp_get_ref,
+ .iw_connect = erdma_connect,
+ .iw_create_listen = erdma_create_listen,
+ .iw_destroy_listen = erdma_destroy_listen,
+ .iw_get_qp = erdma_get_ibqp,
+ .iw_reject = erdma_reject,
+ .iw_rem_ref = erdma_qp_put_ref,
+};
+
static const struct ib_device_ops erdma_device_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_ERDMA,
@@ -494,18 +515,9 @@ static const struct ib_device_ops erdma_device_ops = {
.get_dma_mr = erdma_get_dma_mr,
.get_hw_stats = erdma_get_hw_stats,
.get_port_immutable = erdma_get_port_immutable,
- .iw_accept = erdma_accept,
- .iw_add_ref = erdma_qp_get_ref,
- .iw_connect = erdma_connect,
- .iw_create_listen = erdma_create_listen,
- .iw_destroy_listen = erdma_destroy_listen,
- .iw_get_qp = erdma_get_ibqp,
- .iw_reject = erdma_reject,
- .iw_rem_ref = erdma_qp_put_ref,
.map_mr_sg = erdma_map_mr_sg,
.mmap = erdma_mmap,
.mmap_free = erdma_mmap_free,
- .modify_qp = erdma_modify_qp,
.post_recv = erdma_post_recv,
.post_send = erdma_post_send,
.poll_cq = erdma_poll_cq,
@@ -515,6 +527,7 @@ static const struct ib_device_ops erdma_device_ops = {
.query_qp = erdma_query_qp,
.req_notify_cq = erdma_req_notify_cq,
.reg_user_mr = erdma_reg_user_mr,
+ .modify_qp = erdma_modify_qp,
INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
@@ -537,7 +550,14 @@ static int erdma_ib_device_add(struct pci_dev *pdev)
if (ret)
return ret;
- ibdev->node_type = RDMA_NODE_RNIC;
+ if (erdma_device_iwarp(dev)) {
+ ibdev->node_type = RDMA_NODE_RNIC;
+ ib_set_device_ops(ibdev, &erdma_device_ops_iwarp);
+ } else {
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ib_set_device_ops(ibdev, &erdma_device_ops_rocev2);
+ }
+
memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
/*
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 4d1f9114cd97..25f6c49aec77 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -11,20 +11,20 @@
void erdma_qp_llp_close(struct erdma_qp *qp)
{
- struct erdma_qp_attrs qp_attrs;
+ struct erdma_mod_qp_params_iwarp params;
down_write(&qp->state_lock);
- switch (qp->attrs.state) {
- case ERDMA_QP_STATE_RTS:
- case ERDMA_QP_STATE_RTR:
- case ERDMA_QP_STATE_IDLE:
- case ERDMA_QP_STATE_TERMINATE:
- qp_attrs.state = ERDMA_QP_STATE_CLOSING;
- erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ switch (qp->attrs.iwarp.state) {
+ case ERDMA_QPS_IWARP_RTS:
+ case ERDMA_QPS_IWARP_RTR:
+ case ERDMA_QPS_IWARP_IDLE:
+ case ERDMA_QPS_IWARP_TERMINATE:
+ params.state = ERDMA_QPS_IWARP_CLOSING;
+ erdma_modify_qp_state_iwarp(qp, &params, ERDMA_QPA_IWARP_STATE);
break;
- case ERDMA_QP_STATE_CLOSING:
- qp->attrs.state = ERDMA_QP_STATE_IDLE;
+ case ERDMA_QPS_IWARP_CLOSING:
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
break;
default:
break;
@@ -48,9 +48,10 @@ struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
return NULL;
}
-static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
- struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask)
+static int
+erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ enum erdma_qpa_mask_iwarp mask)
{
int ret;
struct erdma_dev *dev = qp->dev;
@@ -59,12 +60,15 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
struct erdma_cep *cep = qp->cep;
struct sockaddr_storage local_addr, remote_addr;
- if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
+ if (!(mask & ERDMA_QPA_IWARP_LLP_HANDLE))
return -EINVAL;
- if (!(mask & ERDMA_QP_ATTR_MPA))
+ if (!(mask & ERDMA_QPA_IWARP_MPA))
return -EINVAL;
+ if (!(mask & ERDMA_QPA_IWARP_CC))
+ params->cc = qp->attrs.cc;
+
ret = getname_local(cep->sock, &local_addr);
if (ret < 0)
return ret;
@@ -73,18 +77,16 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
if (ret < 0)
return ret;
- qp->attrs.state = ERDMA_QP_STATE_RTS;
-
tp = tcp_sk(qp->cep->sock->sk);
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
- req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
- FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, params->cc) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
- req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
+ req.cookie = be32_to_cpu(cep->mpa.ext_data.cookie);
req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
req.dport = to_sockaddr_in(remote_addr).sin_port;
@@ -92,33 +94,57 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
req.send_nxt = tp->snd_nxt;
/* rsvd tcp seq for mpa-rsp in server. */
- if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
- req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
+ if (params->qp_type == ERDMA_QP_PASSIVE)
+ req.send_nxt += MPA_DEFAULT_HDR_LEN + params->pd_len;
req.recv_nxt = tp->rcv_nxt;
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ return ret;
+
+ if (mask & ERDMA_QPA_IWARP_IRD)
+ qp->attrs.irq_size = params->irq_size;
+
+ if (mask & ERDMA_QPA_IWARP_ORD)
+ qp->attrs.orq_size = params->orq_size;
+
+ if (mask & ERDMA_QPA_IWARP_CC)
+ qp->attrs.cc = params->cc;
+
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_RTS;
+
+ return 0;
}
-static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
- struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask)
+static int
+erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ enum erdma_qpa_mask_iwarp mask)
{
struct erdma_dev *dev = qp->dev;
struct erdma_cmdq_modify_qp_req req;
-
- qp->attrs.state = attrs->state;
+ int ret;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
- req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ return ret;
+
+ qp->attrs.iwarp.state = params->state;
+
+ return 0;
}
-int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask)
+int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ int mask)
{
bool need_reflush = false;
int drop_conn, ret = 0;
@@ -126,31 +152,31 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
if (!mask)
return 0;
- if (!(mask & ERDMA_QP_ATTR_STATE))
+ if (!(mask & ERDMA_QPA_IWARP_STATE))
return 0;
- switch (qp->attrs.state) {
- case ERDMA_QP_STATE_IDLE:
- case ERDMA_QP_STATE_RTR:
- if (attrs->state == ERDMA_QP_STATE_RTS) {
- ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
- } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
- qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ switch (qp->attrs.iwarp.state) {
+ case ERDMA_QPS_IWARP_IDLE:
+ case ERDMA_QPS_IWARP_RTR:
+ if (params->state == ERDMA_QPS_IWARP_RTS) {
+ ret = erdma_modify_qp_state_to_rts(qp, params, mask);
+ } else if (params->state == ERDMA_QPS_IWARP_ERROR) {
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
need_reflush = true;
if (qp->cep) {
erdma_cep_put(qp->cep);
qp->cep = NULL;
}
- ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
}
break;
- case ERDMA_QP_STATE_RTS:
+ case ERDMA_QPS_IWARP_RTS:
drop_conn = 0;
- if (attrs->state == ERDMA_QP_STATE_CLOSING ||
- attrs->state == ERDMA_QP_STATE_TERMINATE ||
- attrs->state == ERDMA_QP_STATE_ERROR) {
- ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ if (params->state == ERDMA_QPS_IWARP_CLOSING ||
+ params->state == ERDMA_QPS_IWARP_TERMINATE ||
+ params->state == ERDMA_QPS_IWARP_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
drop_conn = 1;
need_reflush = true;
}
@@ -159,17 +185,17 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
erdma_qp_cm_drop(qp);
break;
- case ERDMA_QP_STATE_TERMINATE:
- if (attrs->state == ERDMA_QP_STATE_ERROR)
- qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ case ERDMA_QPS_IWARP_TERMINATE:
+ if (params->state == ERDMA_QPS_IWARP_ERROR)
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
break;
- case ERDMA_QP_STATE_CLOSING:
- if (attrs->state == ERDMA_QP_STATE_IDLE) {
- qp->attrs.state = ERDMA_QP_STATE_IDLE;
- } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
- ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
- qp->attrs.state = ERDMA_QP_STATE_ERROR;
- } else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
+ case ERDMA_QPS_IWARP_CLOSING:
+ if (params->state == ERDMA_QPS_IWARP_IDLE) {
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
+ } else if (params->state == ERDMA_QPS_IWARP_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ } else if (params->state != ERDMA_QPS_IWARP_CLOSING) {
return -ECONNABORTED;
}
break;
@@ -186,6 +212,98 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
return ret;
}
+static int modify_qp_cmd_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ enum erdma_qpa_mask_rocev2 attr_mask)
+{
+ struct erdma_cmdq_mod_qp_req_rocev2 req;
+
+ memset(&req, 0, sizeof(req));
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK,
+ params->state);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_DQPN_MASK,
+ params->dst_qpn);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
+ req.qkey = params->qkey;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_AV)
+ erdma_set_av_cfg(&req.av_cfg, &params->av);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_SQ_PSN)
+ req.sq_psn = params->sq_psn;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_RQ_PSN)
+ req.rq_psn = params->rq_psn;
+
+ req.attr_mask = attr_mask;
+
+ return erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL,
+ NULL, true);
+}
+
+static void erdma_reset_qp(struct erdma_qp *qp)
+{
+ qp->kern_qp.sq_pi = 0;
+ qp->kern_qp.sq_ci = 0;
+ qp->kern_qp.rq_pi = 0;
+ qp->kern_qp.rq_ci = 0;
+ memset(qp->kern_qp.swr_tbl, 0, qp->attrs.sq_size * sizeof(u64));
+ memset(qp->kern_qp.rwr_tbl, 0, qp->attrs.rq_size * sizeof(u64));
+ memset(qp->kern_qp.sq_buf, 0, qp->attrs.sq_size << SQEBB_SHIFT);
+ memset(qp->kern_qp.rq_buf, 0, qp->attrs.rq_size << RQE_SHIFT);
+ erdma_remove_cqes_of_qp(&qp->scq->ibcq, QP_ID(qp));
+ if (qp->rcq != qp->scq)
+ erdma_remove_cqes_of_qp(&qp->rcq->ibcq, QP_ID(qp));
+}
+
+int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ int attr_mask)
+{
+ struct erdma_dev *dev = to_edev(qp->ibqp.device);
+ int ret;
+
+ ret = modify_qp_cmd_rocev2(qp, params, attr_mask);
+ if (ret)
+ return ret;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
+ qp->attrs.rocev2.state = params->state;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
+ qp->attrs.rocev2.qkey = params->qkey;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
+ qp->attrs.rocev2.dst_qpn = params->dst_qpn;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_AV)
+ memcpy(&qp->attrs.rocev2.av, &params->av,
+ sizeof(struct erdma_av));
+
+ if (rdma_is_kernel_res(&qp->ibqp.res) &&
+ params->state == ERDMA_QPS_ROCEV2_RESET)
+ erdma_reset_qp(qp);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res) &&
+ params->state == ERDMA_QPS_ROCEV2_ERROR) {
+ qp->flags |= ERDMA_QP_IN_FLUSHING;
+ mod_delayed_work(dev->reflush_wq, &qp->reflush_dwork,
+ usecs_to_jiffies(100));
+ }
+
+ return 0;
+}
+
static void erdma_qp_safe_free(struct kref *ref)
{
struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
@@ -282,17 +400,57 @@ static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
return 0;
}
+static void init_send_sqe_rc(struct erdma_qp *qp, struct erdma_send_sqe_rc *sqe,
+ const struct ib_send_wr *wr, u32 *hw_op)
+{
+ u32 op = ERDMA_OP_SEND;
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ op = ERDMA_OP_SEND_WITH_IMM;
+ sqe->imm_data = wr->ex.imm_data;
+ } else if (wr->opcode == IB_WR_SEND_WITH_INV) {
+ op = ERDMA_OP_SEND_WITH_INV;
+ sqe->invalid_stag = cpu_to_le32(wr->ex.invalidate_rkey);
+ }
+
+ *hw_op = op;
+}
+
+static void init_send_sqe_ud(struct erdma_qp *qp, struct erdma_send_sqe_ud *sqe,
+ const struct ib_send_wr *wr, u32 *hw_op)
+{
+ const struct ib_ud_wr *uwr = ud_wr(wr);
+ struct erdma_ah *ah = to_eah(uwr->ah);
+ u32 op = ERDMA_OP_SEND;
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ op = ERDMA_OP_SEND_WITH_IMM;
+ sqe->imm_data = wr->ex.imm_data;
+ }
+
+ *hw_op = op;
+
+ sqe->ahn = cpu_to_le32(ah->ahn);
+ sqe->dst_qpn = cpu_to_le32(uwr->remote_qpn);
+ /* Not allowed to send control qkey */
+ if (uwr->remote_qkey & 0x80000000)
+ sqe->qkey = cpu_to_le32(qp->attrs.rocev2.qkey);
+ else
+ sqe->qkey = cpu_to_le32(uwr->remote_qkey);
+}
+
static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
const struct ib_send_wr *send_wr)
{
u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
u32 idx = *pi & (qp->attrs.sq_size - 1);
enum ib_wr_opcode op = send_wr->opcode;
+ struct erdma_send_sqe_rc *rc_send_sqe;
+ struct erdma_send_sqe_ud *ud_send_sqe;
struct erdma_atomic_sqe *atomic_sqe;
struct erdma_readreq_sqe *read_sqe;
struct erdma_reg_mr_sqe *regmr_sge;
struct erdma_write_sqe *write_sqe;
- struct erdma_send_sqe *send_sqe;
struct ib_rdma_wr *rdma_wr;
struct erdma_sge *sge;
__le32 *length_field;
@@ -301,6 +459,10 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
u32 attrs;
int ret;
+ if (qp->ibqp.qp_type != IB_QPT_RC && send_wr->opcode != IB_WR_SEND &&
+ send_wr->opcode != IB_WR_SEND_WITH_IMM)
+ return -EINVAL;
+
entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
SQEBB_SHIFT);
@@ -374,21 +536,20 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
case IB_WR_SEND_WITH_INV:
- send_sqe = (struct erdma_send_sqe *)entry;
- hw_op = ERDMA_OP_SEND;
- if (op == IB_WR_SEND_WITH_IMM) {
- hw_op = ERDMA_OP_SEND_WITH_IMM;
- send_sqe->imm_data = send_wr->ex.imm_data;
- } else if (op == IB_WR_SEND_WITH_INV) {
- hw_op = ERDMA_OP_SEND_WITH_INV;
- send_sqe->invalid_stag =
- cpu_to_le32(send_wr->ex.invalidate_rkey);
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ rc_send_sqe = (struct erdma_send_sqe_rc *)entry;
+ init_send_sqe_rc(qp, rc_send_sqe, send_wr, &hw_op);
+ length_field = &rc_send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe_rc);
+ } else {
+ ud_send_sqe = (struct erdma_send_sqe_ud *)entry;
+ init_send_sqe_ud(qp, ud_send_sqe, send_wr, &hw_op);
+ length_field = &ud_send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe_ud);
}
- wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
- length_field = &send_sqe->length;
- wqe_size = sizeof(struct erdma_send_sqe);
- sgl_offset = wqe_size;
+ sgl_offset = wqe_size;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
break;
case IB_WR_REG_MR:
wqe_hdr |=
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 51d619edb6c5..af36a8d2df22 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -55,6 +55,13 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
ilog2(qp->attrs.rq_size)) |
FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
+ ERDMA_QPT_RC);
+ else
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
+ ERDMA_QPT_UD);
+
if (rdma_is_kernel_res(&qp->ibqp.res)) {
u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
@@ -119,10 +126,10 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
}
}
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
- &resp1);
- if (!err)
- qp->attrs.cookie =
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, &resp1,
+ true);
+ if (!err && erdma_device_iwarp(dev))
+ qp->attrs.iwarp.cookie =
FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
return err;
@@ -178,7 +185,8 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
}
post_cmd:
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
@@ -240,7 +248,8 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
}
}
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
@@ -336,6 +345,11 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
+ if (erdma_device_rocev2(dev)) {
+ attr->max_pkeys = ERDMA_MAX_PKEYS;
+ attr->max_ah = dev->attrs.max_ah;
+ }
+
if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC)
attr->atomic_cap = IB_ATOMIC_GLOB;
@@ -367,7 +381,14 @@ int erdma_query_port(struct ib_device *ibdev, u32 port,
memset(attr, 0, sizeof(*attr));
- attr->gid_tbl_len = 1;
+ if (erdma_device_iwarp(dev)) {
+ attr->gid_tbl_len = 1;
+ } else {
+ attr->gid_tbl_len = dev->attrs.max_gid;
+ attr->ip_gids = true;
+ attr->pkey_tbl_len = ERDMA_MAX_PKEYS;
+ }
+
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
attr->max_msg_sz = -1;
@@ -377,14 +398,10 @@ int erdma_query_port(struct ib_device *ibdev, u32 port,
ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
- if (netif_running(ndev) && netif_carrier_ok(ndev))
- dev->state = IB_PORT_ACTIVE;
- else
- dev->state = IB_PORT_DOWN;
- attr->state = dev->state;
+ attr->state = ib_get_curr_port_state(ndev);
out:
- if (dev->state == IB_PORT_ACTIVE)
+ if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
@@ -395,8 +412,18 @@ out:
int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
struct ib_port_immutable *port_immutable)
{
- port_immutable->gid_tbl_len = 1;
- port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ if (erdma_device_iwarp(dev)) {
+ port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ port_immutable->gid_tbl_len = 1;
+ } else {
+ port_immutable->core_cap_flags =
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ port_immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ port_immutable->gid_tbl_len = dev->attrs.max_gid;
+ port_immutable->pkey_tbl_len = ERDMA_MAX_PKEYS;
+ }
return 0;
}
@@ -438,7 +465,8 @@ static void erdma_flush_worker(struct work_struct *work)
req.qpn = QP_ID(qp);
req.sq_pi = qp->kern_qp.sq_pi;
req.rq_pi = qp->kern_qp.rq_pi;
- erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL);
+ erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int erdma_qp_validate_cap(struct erdma_dev *dev,
@@ -459,7 +487,11 @@ static int erdma_qp_validate_cap(struct erdma_dev *dev,
static int erdma_qp_validate_attr(struct erdma_dev *dev,
struct ib_qp_init_attr *attrs)
{
- if (attrs->qp_type != IB_QPT_RC)
+ if (erdma_device_iwarp(dev) && attrs->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+
+ if (erdma_device_rocev2(dev) && attrs->qp_type != IB_QPT_RC &&
+ attrs->qp_type != IB_QPT_UD && attrs->qp_type != IB_QPT_GSI)
return -EOPNOTSUPP;
if (attrs->srq)
@@ -937,7 +969,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
udata, struct erdma_ucontext, ibucontext);
struct erdma_ureq_create_qp ureq;
struct erdma_uresp_create_qp uresp;
- int ret;
+ void *old_entry;
+ int ret = 0;
ret = erdma_qp_validate_cap(dev, attrs);
if (ret)
@@ -956,9 +989,16 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
kref_init(&qp->ref);
init_completion(&qp->safe_free);
- ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
- XA_LIMIT(1, dev->attrs.max_qp - 1),
- &dev->next_alloc_qpn, GFP_KERNEL);
+ if (qp->ibqp.qp_type == IB_QPT_GSI) {
+ old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
+ if (xa_is_err(old_entry))
+ ret = xa_err(old_entry);
+ } else {
+ ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
+ XA_LIMIT(1, dev->attrs.max_qp - 1),
+ &dev->next_alloc_qpn, GFP_KERNEL);
+ }
+
if (ret < 0) {
ret = -ENOMEM;
goto err_out;
@@ -995,7 +1035,12 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
qp->attrs.max_send_sge = attrs->cap.max_send_sge;
qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
- qp->attrs.state = ERDMA_QP_STATE_IDLE;
+
+ if (erdma_device_iwarp(qp->dev))
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
+ else
+ qp->attrs.rocev2.state = ERDMA_QPS_ROCEV2_RESET;
+
INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker);
ret = create_qp_cmd(uctx, qp);
@@ -1219,7 +1264,8 @@ int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
- ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (ret)
return ret;
@@ -1244,7 +1290,8 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_CQ);
req.cqn = cq->cqn;
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (err)
return err;
@@ -1269,13 +1316,20 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct erdma_dev *dev = to_edev(ibqp->device);
struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
udata, struct erdma_ucontext, ibucontext);
- struct erdma_qp_attrs qp_attrs;
- int err;
struct erdma_cmdq_destroy_qp_req req;
+ union erdma_mod_qp_params params;
+ int err;
down_write(&qp->state_lock);
- qp_attrs.state = ERDMA_QP_STATE_ERROR;
- erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ if (erdma_device_iwarp(dev)) {
+ params.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ erdma_modify_qp_state_iwarp(qp, &params.iwarp,
+ ERDMA_QPA_IWARP_STATE);
+ } else {
+ params.rocev2.state = ERDMA_QPS_ROCEV2_ERROR;
+ erdma_modify_qp_state_rocev2(qp, &params.rocev2,
+ ERDMA_QPA_ROCEV2_STATE);
+ }
up_write(&qp->state_lock);
cancel_delayed_work_sync(&qp->reflush_dwork);
@@ -1284,7 +1338,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_QP);
req.qpn = QP_ID(qp);
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (err)
return err;
@@ -1382,7 +1437,8 @@ static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx,
FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
- ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1,
+ true);
if (ret)
return ret;
@@ -1417,7 +1473,8 @@ static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx)
req.rdb_off = ctx->ext_db.rdb_off;
req.cdb_off = ctx->ext_db.cdb_off;
- ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (ret)
ibdev_err_ratelimited(&dev->ibdev,
"free db resources failed %d", ret);
@@ -1506,69 +1563,248 @@ void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
atomic_dec(&dev->num_ctx);
}
-static int ib_qp_state_to_erdma_qp_state[IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = ERDMA_QP_STATE_IDLE,
- [IB_QPS_INIT] = ERDMA_QP_STATE_IDLE,
- [IB_QPS_RTR] = ERDMA_QP_STATE_RTR,
- [IB_QPS_RTS] = ERDMA_QP_STATE_RTS,
- [IB_QPS_SQD] = ERDMA_QP_STATE_CLOSING,
- [IB_QPS_SQE] = ERDMA_QP_STATE_TERMINATE,
- [IB_QPS_ERR] = ERDMA_QP_STATE_ERROR
+static void erdma_attr_to_av(const struct rdma_ah_attr *ah_attr,
+ struct erdma_av *av, u16 sport)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
+ av->port = rdma_ah_get_port_num(ah_attr);
+ av->sgid_index = grh->sgid_index;
+ av->hop_limit = grh->hop_limit;
+ av->traffic_class = grh->traffic_class;
+ av->sl = rdma_ah_get_sl(ah_attr);
+
+ av->flow_label = grh->flow_label;
+ av->udp_sport = sport;
+
+ ether_addr_copy(av->dmac, ah_attr->roce.dmac);
+ memcpy(av->dgid, grh->dgid.raw, ERDMA_ROCEV2_GID_SIZE);
+
+ if (ipv6_addr_v4mapped((struct in6_addr *)&grh->dgid))
+ av->ntype = ERDMA_NETWORK_TYPE_IPV4;
+ else
+ av->ntype = ERDMA_NETWORK_TYPE_IPV6;
+}
+
+static void erdma_av_to_attr(struct erdma_av *av, struct rdma_ah_attr *ah_attr)
+{
+ ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+
+ rdma_ah_set_sl(ah_attr, av->sl);
+ rdma_ah_set_port_num(ah_attr, av->port);
+ rdma_ah_set_ah_flags(ah_attr, IB_AH_GRH);
+
+ rdma_ah_set_grh(ah_attr, NULL, av->flow_label, av->sgid_index,
+ av->hop_limit, av->traffic_class);
+ rdma_ah_set_dgid_raw(ah_attr, av->dgid);
+}
+
+static int ib_qps_to_erdma_qps[ERDMA_PROTO_COUNT][IB_QPS_ERR + 1] = {
+ [ERDMA_PROTO_IWARP] = {
+ [IB_QPS_RESET] = ERDMA_QPS_IWARP_IDLE,
+ [IB_QPS_INIT] = ERDMA_QPS_IWARP_IDLE,
+ [IB_QPS_RTR] = ERDMA_QPS_IWARP_RTR,
+ [IB_QPS_RTS] = ERDMA_QPS_IWARP_RTS,
+ [IB_QPS_SQD] = ERDMA_QPS_IWARP_CLOSING,
+ [IB_QPS_SQE] = ERDMA_QPS_IWARP_TERMINATE,
+ [IB_QPS_ERR] = ERDMA_QPS_IWARP_ERROR,
+ },
+ [ERDMA_PROTO_ROCEV2] = {
+ [IB_QPS_RESET] = ERDMA_QPS_ROCEV2_RESET,
+ [IB_QPS_INIT] = ERDMA_QPS_ROCEV2_INIT,
+ [IB_QPS_RTR] = ERDMA_QPS_ROCEV2_RTR,
+ [IB_QPS_RTS] = ERDMA_QPS_ROCEV2_RTS,
+ [IB_QPS_SQD] = ERDMA_QPS_ROCEV2_SQD,
+ [IB_QPS_SQE] = ERDMA_QPS_ROCEV2_SQE,
+ [IB_QPS_ERR] = ERDMA_QPS_ROCEV2_ERROR,
+ },
};
-int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
- struct ib_udata *udata)
+static int erdma_qps_to_ib_qps[ERDMA_PROTO_COUNT][ERDMA_QPS_ROCEV2_COUNT] = {
+ [ERDMA_PROTO_IWARP] = {
+ [ERDMA_QPS_IWARP_IDLE] = IB_QPS_INIT,
+ [ERDMA_QPS_IWARP_RTR] = IB_QPS_RTR,
+ [ERDMA_QPS_IWARP_RTS] = IB_QPS_RTS,
+ [ERDMA_QPS_IWARP_CLOSING] = IB_QPS_ERR,
+ [ERDMA_QPS_IWARP_TERMINATE] = IB_QPS_ERR,
+ [ERDMA_QPS_IWARP_ERROR] = IB_QPS_ERR,
+ },
+ [ERDMA_PROTO_ROCEV2] = {
+ [ERDMA_QPS_ROCEV2_RESET] = IB_QPS_RESET,
+ [ERDMA_QPS_ROCEV2_INIT] = IB_QPS_INIT,
+ [ERDMA_QPS_ROCEV2_RTR] = IB_QPS_RTR,
+ [ERDMA_QPS_ROCEV2_RTS] = IB_QPS_RTS,
+ [ERDMA_QPS_ROCEV2_SQD] = IB_QPS_SQD,
+ [ERDMA_QPS_ROCEV2_SQE] = IB_QPS_SQE,
+ [ERDMA_QPS_ROCEV2_ERROR] = IB_QPS_ERR,
+ },
+};
+
+static inline enum erdma_qps_iwarp ib_to_iwarp_qps(enum ib_qp_state state)
{
- struct erdma_qp_attrs new_attrs;
- enum erdma_qp_attr_mask erdma_attr_mask = 0;
- struct erdma_qp *qp = to_eqp(ibqp);
- int ret = 0;
+ return ib_qps_to_erdma_qps[ERDMA_PROTO_IWARP][state];
+}
- if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
- return -EOPNOTSUPP;
+static inline enum erdma_qps_rocev2 ib_to_rocev2_qps(enum ib_qp_state state)
+{
+ return ib_qps_to_erdma_qps[ERDMA_PROTO_ROCEV2][state];
+}
- memset(&new_attrs, 0, sizeof(new_attrs));
+static inline enum ib_qp_state iwarp_to_ib_qps(enum erdma_qps_iwarp state)
+{
+ return erdma_qps_to_ib_qps[ERDMA_PROTO_IWARP][state];
+}
- if (attr_mask & IB_QP_STATE) {
- new_attrs.state = ib_qp_state_to_erdma_qp_state[attr->qp_state];
+static inline enum ib_qp_state rocev2_to_ib_qps(enum erdma_qps_rocev2 state)
+{
+ return erdma_qps_to_ib_qps[ERDMA_PROTO_ROCEV2][state];
+}
- erdma_attr_mask |= ERDMA_QP_ATTR_STATE;
+static int erdma_check_qp_attrs(struct erdma_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ enum ib_qp_state cur_state, nxt_state;
+ struct erdma_dev *dev = qp->dev;
+ int ret = -EINVAL;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((attr_mask & IB_QP_PORT) &&
+ !rdma_is_port_valid(&dev->ibdev, attr->port_num))
+ goto out;
+
+ if (erdma_device_rocev2(dev)) {
+ cur_state = (attr_mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state :
+ rocev2_to_ib_qps(qp->attrs.rocev2.state);
+
+ nxt_state = (attr_mask & IB_QP_STATE) ? attr->qp_state :
+ cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, nxt_state, qp->ibqp.qp_type,
+ attr_mask))
+ goto out;
+
+ if ((attr_mask & IB_QP_AV) &&
+ erdma_check_gid_attr(
+ rdma_ah_read_grh(&attr->ah_attr)->sgid_attr))
+ goto out;
+
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= ERDMA_MAX_PKEYS)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static void erdma_init_mod_qp_params_rocev2(
+ struct erdma_qp *qp, struct erdma_mod_qp_params_rocev2 *params,
+ int *erdma_attr_mask, struct ib_qp_attr *attr, int ib_attr_mask)
+{
+ enum erdma_qpa_mask_rocev2 to_modify_attrs = 0;
+ enum erdma_qps_rocev2 cur_state, nxt_state;
+ u16 udp_sport;
+
+ if (ib_attr_mask & IB_QP_CUR_STATE)
+ cur_state = ib_to_rocev2_qps(attr->cur_qp_state);
+ else
+ cur_state = qp->attrs.rocev2.state;
+
+ if (ib_attr_mask & IB_QP_STATE)
+ nxt_state = ib_to_rocev2_qps(attr->qp_state);
+ else
+ nxt_state = cur_state;
+
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_STATE;
+ params->state = nxt_state;
+
+ if (ib_attr_mask & IB_QP_QKEY) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_QKEY;
+ params->qkey = attr->qkey;
+ }
+
+ if (ib_attr_mask & IB_QP_SQ_PSN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_SQ_PSN;
+ params->sq_psn = attr->sq_psn;
+ }
+
+ if (ib_attr_mask & IB_QP_RQ_PSN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_RQ_PSN;
+ params->rq_psn = attr->rq_psn;
+ }
+
+ if (ib_attr_mask & IB_QP_DEST_QPN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_DST_QPN;
+ params->dst_qpn = attr->dest_qp_num;
}
+ if (ib_attr_mask & IB_QP_AV) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_AV;
+ udp_sport = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ QP_ID(qp), params->dst_qpn);
+ erdma_attr_to_av(&attr->ah_attr, &params->av, udp_sport);
+ }
+
+ *erdma_attr_mask = to_modify_attrs;
+}
+
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ union erdma_mod_qp_params params;
+ int ret = 0, erdma_attr_mask = 0;
+
down_write(&qp->state_lock);
- ret = erdma_modify_qp_internal(qp, &new_attrs, erdma_attr_mask);
+ ret = erdma_check_qp_attrs(qp, attr, attr_mask);
+ if (ret)
+ goto out;
- up_write(&qp->state_lock);
+ if (erdma_device_iwarp(qp->dev)) {
+ if (attr_mask & IB_QP_STATE) {
+ erdma_attr_mask |= ERDMA_QPA_IWARP_STATE;
+ params.iwarp.state = ib_to_iwarp_qps(attr->qp_state);
+ }
+
+ ret = erdma_modify_qp_state_iwarp(qp, &params.iwarp,
+ erdma_attr_mask);
+ } else {
+ erdma_init_mod_qp_params_rocev2(
+ qp, &params.rocev2, &erdma_attr_mask, attr, attr_mask);
+
+ ret = erdma_modify_qp_state_rocev2(qp, &params.rocev2,
+ erdma_attr_mask);
+ }
+out:
+ up_write(&qp->state_lock);
return ret;
}
static enum ib_qp_state query_qp_state(struct erdma_qp *qp)
{
- switch (qp->attrs.state) {
- case ERDMA_QP_STATE_IDLE:
- return IB_QPS_INIT;
- case ERDMA_QP_STATE_RTR:
- return IB_QPS_RTR;
- case ERDMA_QP_STATE_RTS:
- return IB_QPS_RTS;
- case ERDMA_QP_STATE_CLOSING:
- return IB_QPS_ERR;
- case ERDMA_QP_STATE_TERMINATE:
- return IB_QPS_ERR;
- case ERDMA_QP_STATE_ERROR:
- return IB_QPS_ERR;
- default:
- return IB_QPS_ERR;
- }
+ if (erdma_device_iwarp(qp->dev))
+ return iwarp_to_ib_qps(qp->attrs.iwarp.state);
+ else
+ return rocev2_to_ib_qps(qp->attrs.rocev2.state);
}
int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
+ struct erdma_cmdq_query_qp_req_rocev2 req;
struct erdma_dev *dev;
struct erdma_qp *qp;
+ u64 resp0, resp1;
+ int ret;
if (ibqp && qp_attr && qp_init_attr) {
qp = to_eqp(ibqp);
@@ -1595,8 +1831,37 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
- qp_attr->qp_state = query_qp_state(qp);
- qp_attr->cur_qp_state = query_qp_state(qp);
+ if (erdma_device_rocev2(dev)) {
+ /* Query hardware to get some attributes */
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_QUERY_QP);
+ req.qpn = QP_ID(qp);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
+ &resp1, true);
+ if (ret)
+ return ret;
+
+ qp_attr->sq_psn =
+ FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK, resp0);
+ qp_attr->rq_psn =
+ FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK, resp0);
+ qp_attr->qp_state = rocev2_to_ib_qps(FIELD_GET(
+ ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK, resp0));
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->sq_draining = FIELD_GET(
+ ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK, resp0);
+
+ qp_attr->pkey_index = 0;
+ qp_attr->dest_qp_num = qp->attrs.rocev2.dst_qpn;
+
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ erdma_av_to_attr(&qp->attrs.rocev2.av,
+ &qp_attr->ah_attr);
+ } else {
+ qp_attr->qp_state = query_qp_state(qp);
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ }
return 0;
}
@@ -1736,7 +2001,7 @@ void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
CMDQ_OPCODE_CONF_MTU);
req.mtu = mtu;
- erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, true);
}
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
@@ -1806,7 +2071,8 @@ static int erdma_query_hw_stats(struct erdma_dev *dev,
req.target_addr = dma_addr;
req.target_length = ERDMA_HW_RESP_SIZE;
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (err)
goto out;
@@ -1839,3 +2105,159 @@ int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
return stats->num_counters;
}
+
+enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev, u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int erdma_set_gid(struct erdma_dev *dev, u8 op, u32 idx,
+ const union ib_gid *gid)
+{
+ struct erdma_cmdq_set_gid_req req;
+ u8 ntype;
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_SET_GID_SGID_IDX_MASK, idx) |
+ FIELD_PREP(ERDMA_CMD_SET_GID_OP_MASK, op);
+
+ if (op == ERDMA_SET_GID_OP_ADD) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)gid))
+ ntype = ERDMA_NETWORK_TYPE_IPV4;
+ else
+ ntype = ERDMA_NETWORK_TYPE_IPV6;
+
+ req.cfg |= FIELD_PREP(ERDMA_CMD_SET_GID_NTYPE_MASK, ntype);
+
+ memcpy(&req.gid, gid, ERDMA_ROCEV2_GID_SIZE);
+ }
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_SET_GID);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+}
+
+int erdma_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct erdma_dev *dev = to_edev(attr->device);
+ int ret;
+
+ ret = erdma_check_gid_attr(attr);
+ if (ret)
+ return ret;
+
+ return erdma_set_gid(dev, ERDMA_SET_GID_OP_ADD, attr->index,
+ &attr->gid);
+}
+
+int erdma_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ return erdma_set_gid(to_edev(attr->device), ERDMA_SET_GID_OP_DEL,
+ attr->index, NULL);
+}
+
+int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
+{
+ if (index >= ERDMA_MAX_PKEYS)
+ return -EINVAL;
+
+ *pkey = ERDMA_DEFAULT_PKEY;
+ return 0;
+}
+
+void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av)
+{
+ av_cfg->cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_AV_FL_MASK, av->flow_label) |
+ FIELD_PREP(ERDMA_CMD_CREATE_AV_NTYPE_MASK, av->ntype);
+
+ av_cfg->traffic_class = av->traffic_class;
+ av_cfg->hop_limit = av->hop_limit;
+ av_cfg->sl = av->sl;
+
+ av_cfg->udp_sport = av->udp_sport;
+ av_cfg->sgid_index = av->sgid_index;
+
+ ether_addr_copy(av_cfg->dmac, av->dmac);
+ memcpy(av_cfg->dgid, av->dgid, ERDMA_ROCEV2_GID_SIZE);
+}
+
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(init_attr->ah_attr);
+ struct erdma_dev *dev = to_edev(ibah->device);
+ struct erdma_pd *pd = to_epd(ibah->pd);
+ struct erdma_ah *ah = to_eah(ibah);
+ struct erdma_cmdq_create_ah_req req;
+ u32 udp_sport;
+ int ret;
+
+ ret = erdma_check_gid_attr(grh->sgid_attr);
+ if (ret)
+ return ret;
+
+ ret = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_AH]);
+ if (ret < 0)
+ return ret;
+
+ ah->ahn = ret;
+
+ if (grh->flow_label)
+ udp_sport = rdma_flow_label_to_udp_sport(grh->flow_label);
+ else
+ udp_sport =
+ IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + (ah->ahn & 0x3FFF);
+
+ erdma_attr_to_av(init_attr->ah_attr, &ah->av, udp_sport);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_AH);
+
+ req.pdn = pd->pdn;
+ req.ahn = ah->ahn;
+ erdma_set_av_cfg(&req.av_cfg, &ah->av);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
+ if (ret) {
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+ return ret;
+ }
+
+ return 0;
+}
+
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct erdma_dev *dev = to_edev(ibah->device);
+ struct erdma_pd *pd = to_epd(ibah->pd);
+ struct erdma_ah *ah = to_eah(ibah);
+ struct erdma_cmdq_destroy_ah_req req;
+ int ret;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_AH);
+
+ req.pdn = pd->pdn;
+ req.ahn = ah->ahn;
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ flags & RDMA_DESTROY_AH_SLEEPABLE);
+ if (ret)
+ return ret;
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+
+ return 0;
+}
+
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct erdma_ah *ah = to_eah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ erdma_av_to_attr(&ah->av, ah_attr);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index c998acd39a78..f9408ccc8bad 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -136,6 +136,25 @@ struct erdma_user_dbrecords_page {
int refcnt;
};
+struct erdma_av {
+ u8 port;
+ u8 hop_limit;
+ u8 traffic_class;
+ u8 sl;
+ u8 sgid_index;
+ u16 udp_sport;
+ u32 flow_label;
+ u8 dmac[ETH_ALEN];
+ u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+ enum erdma_network_type ntype;
+};
+
+struct erdma_ah {
+ struct ib_ah ibah;
+ struct erdma_av av;
+ u32 ahn;
+};
+
struct erdma_uqp {
struct erdma_mem sq_mem;
struct erdma_mem rq_mem;
@@ -176,33 +195,91 @@ struct erdma_kqp {
u8 sig_all;
};
-enum erdma_qp_state {
- ERDMA_QP_STATE_IDLE = 0,
- ERDMA_QP_STATE_RTR = 1,
- ERDMA_QP_STATE_RTS = 2,
- ERDMA_QP_STATE_CLOSING = 3,
- ERDMA_QP_STATE_TERMINATE = 4,
- ERDMA_QP_STATE_ERROR = 5,
- ERDMA_QP_STATE_UNDEF = 7,
- ERDMA_QP_STATE_COUNT = 8
+enum erdma_qps_iwarp {
+ ERDMA_QPS_IWARP_IDLE = 0,
+ ERDMA_QPS_IWARP_RTR = 1,
+ ERDMA_QPS_IWARP_RTS = 2,
+ ERDMA_QPS_IWARP_CLOSING = 3,
+ ERDMA_QPS_IWARP_TERMINATE = 4,
+ ERDMA_QPS_IWARP_ERROR = 5,
+ ERDMA_QPS_IWARP_UNDEF = 6,
+ ERDMA_QPS_IWARP_COUNT = 7,
+};
+
+enum erdma_qpa_mask_iwarp {
+ ERDMA_QPA_IWARP_STATE = (1 << 0),
+ ERDMA_QPA_IWARP_LLP_HANDLE = (1 << 2),
+ ERDMA_QPA_IWARP_ORD = (1 << 3),
+ ERDMA_QPA_IWARP_IRD = (1 << 4),
+ ERDMA_QPA_IWARP_SQ_SIZE = (1 << 5),
+ ERDMA_QPA_IWARP_RQ_SIZE = (1 << 6),
+ ERDMA_QPA_IWARP_MPA = (1 << 7),
+ ERDMA_QPA_IWARP_CC = (1 << 8),
};
-enum erdma_qp_attr_mask {
- ERDMA_QP_ATTR_STATE = (1 << 0),
- ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
- ERDMA_QP_ATTR_ORD = (1 << 3),
- ERDMA_QP_ATTR_IRD = (1 << 4),
- ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
- ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
- ERDMA_QP_ATTR_MPA = (1 << 7)
+enum erdma_qps_rocev2 {
+ ERDMA_QPS_ROCEV2_RESET = 0,
+ ERDMA_QPS_ROCEV2_INIT = 1,
+ ERDMA_QPS_ROCEV2_RTR = 2,
+ ERDMA_QPS_ROCEV2_RTS = 3,
+ ERDMA_QPS_ROCEV2_SQD = 4,
+ ERDMA_QPS_ROCEV2_SQE = 5,
+ ERDMA_QPS_ROCEV2_ERROR = 6,
+ ERDMA_QPS_ROCEV2_COUNT = 7,
+};
+
+enum erdma_qpa_mask_rocev2 {
+ ERDMA_QPA_ROCEV2_STATE = (1 << 0),
+ ERDMA_QPA_ROCEV2_QKEY = (1 << 1),
+ ERDMA_QPA_ROCEV2_AV = (1 << 2),
+ ERDMA_QPA_ROCEV2_SQ_PSN = (1 << 3),
+ ERDMA_QPA_ROCEV2_RQ_PSN = (1 << 4),
+ ERDMA_QPA_ROCEV2_DST_QPN = (1 << 5),
};
enum erdma_qp_flags {
ERDMA_QP_IN_FLUSHING = (1 << 0),
};
+#define ERDMA_QP_ACTIVE 0
+#define ERDMA_QP_PASSIVE 1
+
+struct erdma_mod_qp_params_iwarp {
+ enum erdma_qps_iwarp state;
+ enum erdma_cc_alg cc;
+ u8 qp_type;
+ u8 pd_len;
+ u32 irq_size;
+ u32 orq_size;
+};
+
+struct erdma_qp_attrs_iwarp {
+ enum erdma_qps_iwarp state;
+ u32 cookie;
+};
+
+struct erdma_mod_qp_params_rocev2 {
+ enum erdma_qps_rocev2 state;
+ u32 qkey;
+ u32 sq_psn;
+ u32 rq_psn;
+ u32 dst_qpn;
+ struct erdma_av av;
+};
+
+union erdma_mod_qp_params {
+ struct erdma_mod_qp_params_iwarp iwarp;
+ struct erdma_mod_qp_params_rocev2 rocev2;
+};
+
+struct erdma_qp_attrs_rocev2 {
+ enum erdma_qps_rocev2 state;
+ u32 qkey;
+ u32 dst_qpn;
+ struct erdma_av av;
+};
+
struct erdma_qp_attrs {
- enum erdma_qp_state state;
enum erdma_cc_alg cc; /* Congestion control algorithm */
u32 sq_size;
u32 rq_size;
@@ -210,11 +287,10 @@ struct erdma_qp_attrs {
u32 irq_size;
u32 max_send_sge;
u32 max_recv_sge;
- u32 cookie;
-#define ERDMA_QP_ACTIVE 0
-#define ERDMA_QP_PASSIVE 1
- u8 qp_type;
- u8 pd_len;
+ union {
+ struct erdma_qp_attrs_iwarp iwarp;
+ struct erdma_qp_attrs_rocev2 rocev2;
+ };
};
struct erdma_qp {
@@ -286,11 +362,25 @@ static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
void erdma_qp_get(struct erdma_qp *qp);
void erdma_qp_put(struct erdma_qp *qp);
-int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask);
+int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ int mask);
+int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ int attr_mask);
void erdma_qp_llp_close(struct erdma_qp *qp);
void erdma_qp_cm_drop(struct erdma_qp *qp);
+static inline bool erdma_device_iwarp(struct erdma_dev *dev)
+{
+ return dev->proto == ERDMA_PROTO_IWARP;
+}
+
+static inline bool erdma_device_rocev2(struct erdma_dev *dev)
+{
+ return dev->proto == ERDMA_PROTO_ROCEV2;
+}
+
static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
{
return container_of(ibctx, struct erdma_ucontext, ibucontext);
@@ -316,6 +406,21 @@ static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
return container_of(ibcq, struct erdma_cq, ibcq);
}
+static inline struct erdma_ah *to_eah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct erdma_ah, ibah);
+}
+
+static inline int erdma_check_gid_attr(const struct ib_gid_attr *attr)
+{
+ u8 ntype = rdma_gid_attr_network_type(attr);
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6)
+ return -EINVAL;
+
+ return 0;
+}
+
static inline struct erdma_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry *ibmmap)
{
@@ -360,6 +465,7 @@ int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn);
struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg);
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
@@ -370,5 +476,15 @@ struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
u32 port_num);
int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port, int index);
+enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev,
+ u32 port_num);
+int erdma_add_gid(const struct ib_gid_attr *attr, void **context);
+int erdma_del_gid(const struct ib_gid_attr *attr, void **context);
+int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av);
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags);
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
#endif
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index eb38f81aeeb1..cb630551cf1a 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -2339,20 +2339,6 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
-/*
- * this is used for formatting hw error messages...
- */
-struct hfi1_hwerror_msgs {
- u64 mask;
- const char *msg;
- size_t sz;
-};
-
-/* in intr.c... */
-void hfi1_format_hwerrors(u64 hwerrs,
- const struct hfi1_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t lmsg);
-
#define USER_OPCODE_CHECK_VAL 0xC0
#define USER_OPCODE_CHECK_MASK 0xC0
#define OPCODE_CHECK_VAL_DISABLED 0x0
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 3737f632d62a..d8dd1a599631 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -47,37 +47,6 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
hfi1_event_pkey_change(ppd->dd, ppd->port);
}
-/**
- * format_hwmsg - format a single hwerror message
- * @msg: message buffer
- * @msgl: length of message buffer
- * @hwmsg: message to add to message buffer
- */
-static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
- strlcat(msg, "[", msgl);
- strlcat(msg, hwmsg, msgl);
- strlcat(msg, "]", msgl);
-}
-
-/**
- * hfi1_format_hwerrors - format hardware error messages for display
- * @hwerrs: hardware errors bit vector
- * @hwerrmsgs: hardware error descriptions
- * @nhwerrmsgs: number of hwerrmsgs
- * @msg: message buffer
- * @msgl: message buffer length
- */
-void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t msgl)
-{
- int i;
-
- for (i = 0; i < nhwerrmsgs; i++)
- if (hwerrs & hwerrmsgs[i].mask)
- format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-}
-
static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
{
struct ib_event event;
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index d62ba5fdd80c..d94216c7d576 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -27,8 +27,8 @@ static struct hfi1_pportdata *hfi1_get_pportdata_kobj(struct kobject *kobj)
* Congestion control table size followed by table entries
*/
static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t pos, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
int ret;
struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
@@ -57,7 +57,7 @@ static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
@@ -65,7 +65,7 @@ static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
@@ -93,9 +93,9 @@ static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-static struct bin_attribute *port_cc_bin_attributes[] = {
+static const struct bin_attribute *const port_cc_bin_attributes[] = {
&bin_attr_cc_setting_bin,
&bin_attr_cc_table_bin,
NULL
@@ -134,7 +134,7 @@ static struct attribute *port_cc_attributes[] = {
static const struct attribute_group port_cc_group = {
.name = "CCMgtA",
.attrs = port_cc_attributes,
- .bin_attrs = port_cc_bin_attributes,
+ .bin_attrs_new = port_cc_bin_attributes,
};
/* Start sc2vl */
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index ab3fbba70789..44cdb706fe27 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,21 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_HNS
- tristate "HNS RoCE Driver"
- depends on NET_VENDOR_HISILICON
- depends on ARM64 || (COMPILE_TEST && 64BIT)
- depends on (HNS_DSAF && HNS_ENET) || HNS3
- help
- This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
-
- To compile HIP08 driver as module, choose M here.
-
config INFINIBAND_HNS_HIP08
- bool "Hisilicon Hip08 Family RoCE support"
- depends on INFINIBAND_HNS && PCI && HNS3
- depends on INFINIBAND_HNS=m || HNS3=y
+ tristate "Hisilicon Hip08 Family RoCE support"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on PCI && HNS3
help
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
- To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
- module will be called hns-roce-hw-v2.
+ To compile this driver, choose M here. This module will be called
+ hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index be1e1cdbcfa8..7917af8e6380 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -5,12 +5,9 @@
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
+hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
- hns_roce_debugfs.o
+ hns_roce_debugfs.o hns_roce_hw_v2.o
-ifdef CONFIG_INFINIBAND_HNS_HIP08
-hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0144e7210d05..dded339802b3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -7185,9 +7185,22 @@ static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
return ret;
}
+static void hns_roce_hw_v2_link_status_change(struct hnae3_handle *handle,
+ bool linkup)
+{
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct net_device *netdev = handle->rinfo.netdev;
+
+ if (linkup || !hr_dev)
+ return;
+
+ ib_dispatch_port_state_event(&hr_dev->ib_dev, netdev);
+}
+
static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
.init_instance = hns_roce_hw_v2_init_instance,
.uninit_instance = hns_roce_hw_v2_uninit_instance,
+ .link_status_change = hns_roce_hw_v2_link_status_change,
.reset_notify = hns_roce_hw_v2_reset_notify,
};
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index e1e3d3ae72b7..ddf02a462efa 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -59,10 +59,6 @@ int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
struct irdma_hmc_fcn_info *hmcfcninfo,
u16 *pmf_idx);
-int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index d7c8ea948bcd..c0c9441885d3 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -85,10 +85,6 @@ int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
int irdma_process_bh(struct irdma_sc_dev *dev);
int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
-int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 0422787592d8..0e594122baa7 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -320,9 +320,6 @@ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
case NETDEV_DOWN:
iwdev->iw_status = 0;
fallthrough;
- case NETDEV_UP:
- irdma_port_ibevent(iwdev);
- break;
default:
break;
}
@@ -972,74 +969,6 @@ void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
}
/**
- * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
- * @dev: function device struct
- * @val_mem: buffer for fpm
- * @hmc_fn_id: function id for fpm
- */
-int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
-{
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- struct irdma_pci_f *rf = dev_to_rf(dev);
- int status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- cqp_request->param = NULL;
- cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
- cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
- cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
- cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
- cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
- cqp_info->post_sq = 1;
- cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
-
- status = irdma_handle_cqp_op(rf, cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
- * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
- * @dev: hardware control device structure
- * @val_mem: buffer with fpm values
- * @hmc_fn_id: function id for fpm
- */
-int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
-{
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- struct irdma_pci_f *rf = dev_to_rf(dev);
- int status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- cqp_request->param = NULL;
- cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
- cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
- cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
- cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
- cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
- cqp_info->post_sq = 1;
- cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
-
- status = irdma_handle_cqp_op(rf, cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
* irdma_cqp_cq_create_cmd - create a cq for the cqp
* @dev: device pointer
* @cq: pointer to created cq
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index aa9ea6ba26e5..c592374f4a58 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -150,8 +150,12 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev,
return PTR_ERR(*umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
- err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+ err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
if (err)
goto err_buf;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 529db874d67c..dd35e03402ab 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -351,7 +351,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
struct mlx4_port_gid_table *port_gid_table;
int ret = 0;
int hw_update = 0;
- struct gid_entry *gids;
+ struct gid_entry *gids = NULL;
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -389,10 +389,10 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
}
spin_unlock_bh(&iboe->lock);
- if (!ret && hw_update) {
+ if (gids)
ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
- kfree(gids);
- }
+
+ kfree(gids);
return ret;
}
@@ -2341,37 +2341,38 @@ static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev,
iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL;
- if (event == NETDEV_UP || event == NETDEV_DOWN) {
- enum ib_port_state port_state;
- struct ib_event ibev = { };
+ spin_unlock_bh(&iboe->lock);
- if (ib_get_cached_port_state(&ibdev->ib_dev, dev->dev_port + 1,
- &port_state))
- goto iboe_out;
+ if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER)
+ mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
+}
- if (event == NETDEV_UP &&
- (port_state != IB_PORT_ACTIVE ||
- iboe->last_port_state[dev->dev_port] != IB_PORT_DOWN))
- goto iboe_out;
- if (event == NETDEV_DOWN &&
- (port_state != IB_PORT_DOWN ||
- iboe->last_port_state[dev->dev_port] != IB_PORT_ACTIVE))
- goto iboe_out;
- iboe->last_port_state[dev->dev_port] = port_state;
+static void mlx4_ib_port_event(struct ib_device *ibdev, struct net_device *ndev,
+ unsigned long event)
+{
+ struct mlx4_ib_dev *mlx4_ibdev =
+ container_of(ibdev, struct mlx4_ib_dev, ib_dev);
+ struct mlx4_ib_iboe *iboe = &mlx4_ibdev->iboe;
- ibev.device = &ibdev->ib_dev;
- ibev.element.port_num = dev->dev_port + 1;
- ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
- IB_EVENT_PORT_ERR;
- ib_dispatch_event(&ibev);
- }
+ if (!net_eq(dev_net(ndev), &init_net))
+ return;
+
+ ASSERT_RTNL();
+
+ if (ndev->dev.parent != mlx4_ibdev->ib_dev.dev.parent)
+ return;
+
+ spin_lock_bh(&iboe->lock);
+
+ iboe->netdevs[ndev->dev_port] = event != NETDEV_UNREGISTER ? ndev : NULL;
+
+ if (event == NETDEV_UP || event == NETDEV_DOWN)
+ ib_dispatch_port_state_event(&mlx4_ibdev->ib_dev, ndev);
-iboe_out:
spin_unlock_bh(&iboe->lock);
- if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
- event == NETDEV_UP || event == NETDEV_CHANGE)
- mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
+ if (event == NETDEV_UP || event == NETDEV_CHANGE)
+ mlx4_ib_update_qps(mlx4_ibdev, ndev, ndev->dev_port + 1);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -2569,6 +2570,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
.req_notify_cq = mlx4_ib_arm_cq,
.rereg_user_mr = mlx4_ib_rereg_user_mr,
.resize_cq = mlx4_ib_resize_cq,
+ .report_port_event = mlx4_ib_port_event,
INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index b52bceff7d97..f53b1846594c 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -667,6 +667,9 @@ struct mlx4_uverbs_ex_query_device {
__u32 reserved;
};
+/* 4k - 4G */
+#define MLX4_PAGE_SIZE_SUPPORTED ((unsigned long)GENMASK_ULL(31, 12))
+
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -936,8 +939,19 @@ mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
{
return 0;
}
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
- int *num_of_mtts);
+static inline int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
+ u64 start,
+ int *num_of_mtts)
+{
+ unsigned long pg_sz;
+
+ pg_sz = ib_umem_find_best_pgsz(umem, MLX4_PAGE_SIZE_SUPPORTED, start);
+ if (!pg_sz)
+ return -EOPNOTSUPP;
+
+ *num_of_mtts = ib_umem_num_dma_blocks(umem, pg_sz);
+ return order_base_2(pg_sz);
+}
int mlx4_ib_cm_init(void);
void mlx4_ib_cm_destroy(void);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index a40bf58bcdd3..e77645a673fb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -87,286 +87,20 @@ err_free:
return ERR_PTR(err);
}
-enum {
- MLX4_MAX_MTT_SHIFT = 31
-};
-
-static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
- struct mlx4_mtt *mtt,
- u64 mtt_size, u64 mtt_shift, u64 len,
- u64 cur_start_addr, u64 *pages,
- int *start_index, int *npages)
-{
- u64 cur_end_addr = cur_start_addr + len;
- u64 cur_end_addr_aligned = 0;
- u64 mtt_entries;
- int err = 0;
- int k;
-
- len += (cur_start_addr & (mtt_size - 1ULL));
- cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
- len += (cur_end_addr_aligned - cur_end_addr);
- if (len & (mtt_size - 1ULL)) {
- pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
- len, mtt_size);
- return -EINVAL;
- }
-
- mtt_entries = (len >> mtt_shift);
-
- /*
- * Align the MTT start address to the mtt_size.
- * Required to handle cases when the MR starts in the middle of an MTT
- * record. Was not required in old code since the physical addresses
- * provided by the dma subsystem were page aligned, which was also the
- * MTT size.
- */
- cur_start_addr = round_down(cur_start_addr, mtt_size);
- /* A new block is started ... */
- for (k = 0; k < mtt_entries; ++k) {
- pages[*npages] = cur_start_addr + (mtt_size * k);
- (*npages)++;
- /*
- * Be friendly to mlx4_write_mtt() and pass it chunks of
- * appropriate size.
- */
- if (*npages == PAGE_SIZE / sizeof(u64)) {
- err = mlx4_write_mtt(dev->dev, mtt, *start_index,
- *npages, pages);
- if (err)
- return err;
-
- (*start_index) += *npages;
- *npages = 0;
- }
- }
-
- return 0;
-}
-
-static inline u64 alignment_of(u64 ptr)
-{
- return ilog2(ptr & (~(ptr - 1)));
-}
-
-static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
- u64 current_block_end,
- u64 block_shift)
-{
- /* Check whether the alignment of the new block is aligned as well as
- * the previous block.
- * Block address must start with zeros till size of entity_size.
- */
- if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
- /*
- * It is not as well aligned as the previous block-reduce the
- * mtt size accordingly. Here we take the last right bit which
- * is 1.
- */
- block_shift = alignment_of(next_block_start);
-
- /*
- * Check whether the alignment of the end of previous block - is it
- * aligned as well as the start of the block
- */
- if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
- /*
- * It is not as well aligned as the start of the block -
- * reduce the mtt size accordingly.
- */
- block_shift = alignment_of(current_block_end);
-
- return block_shift;
-}
-
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
- u64 *pages;
- u64 len = 0;
- int err = 0;
- u64 mtt_size;
- u64 cur_start_addr = 0;
- u64 mtt_shift;
- int start_index = 0;
- int npages = 0;
- struct scatterlist *sg;
- int i;
-
- pages = (u64 *) __get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- mtt_shift = mtt->page_shift;
- mtt_size = 1ULL << mtt_shift;
+ struct ib_block_iter biter;
+ int err, i = 0;
+ u64 addr;
- for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
- if (cur_start_addr + len == sg_dma_address(sg)) {
- /* still the same block */
- len += sg_dma_len(sg);
- continue;
- }
- /*
- * A new block is started ...
- * If len is malaligned, write an extra mtt entry to cover the
- * misaligned area (round up the division)
- */
- err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
- mtt_shift, len,
- cur_start_addr,
- pages, &start_index,
- &npages);
- if (err)
- goto out;
-
- cur_start_addr = sg_dma_address(sg);
- len = sg_dma_len(sg);
- }
-
- /* Handle the last block */
- if (len > 0) {
- /*
- * If len is malaligned, write an extra mtt entry to cover
- * the misaligned area (round up the division)
- */
- err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
- mtt_shift, len,
- cur_start_addr, pages,
- &start_index, &npages);
+ rdma_umem_for_each_dma_block(umem, &biter, BIT(mtt->page_shift)) {
+ addr = rdma_block_iter_dma_address(&biter);
+ err = mlx4_write_mtt(dev->dev, mtt, i++, 1, &addr);
if (err)
- goto out;
- }
-
- if (npages)
- err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
-
-out:
- free_page((unsigned long) pages);
- return err;
-}
-
-/*
- * Calculate optimal mtt size based on contiguous pages.
- * Function will return also the number of pages that are not aligned to the
- * calculated mtt_size to be added to total number of pages. For that we should
- * check the first chunk length & last chunk length and if not aligned to
- * mtt_size we should increment the non_aligned_pages number. All chunks in the
- * middle already handled as part of mtt shift calculation for both their start
- * & end addresses.
- */
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
- int *num_of_mtts)
-{
- u64 block_shift = MLX4_MAX_MTT_SHIFT;
- u64 min_shift = PAGE_SHIFT;
- u64 last_block_aligned_end = 0;
- u64 current_block_start = 0;
- u64 first_block_start = 0;
- u64 current_block_len = 0;
- u64 last_block_end = 0;
- struct scatterlist *sg;
- u64 current_block_end;
- u64 misalignment_bits;
- u64 next_block_start;
- u64 total_len = 0;
- int i;
-
- *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-
- for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
- /*
- * Initialization - save the first chunk start as the
- * current_block_start - block means contiguous pages.
- */
- if (current_block_len == 0 && current_block_start == 0) {
- current_block_start = sg_dma_address(sg);
- first_block_start = current_block_start;
- /*
- * Find the bits that are different between the physical
- * address and the virtual address for the start of the
- * MR.
- * umem_get aligned the start_va to a page boundary.
- * Therefore, we need to align the start va to the same
- * boundary.
- * misalignment_bits is needed to handle the case of a
- * single memory region. In this case, the rest of the
- * logic will not reduce the block size. If we use a
- * block size which is bigger than the alignment of the
- * misalignment bits, we might use the virtual page
- * number instead of the physical page number, resulting
- * in access to the wrong data.
- */
- misalignment_bits =
- (start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^
- current_block_start;
- block_shift = min(alignment_of(misalignment_bits),
- block_shift);
- }
-
- /*
- * Go over the scatter entries and check if they continue the
- * previous scatter entry.
- */
- next_block_start = sg_dma_address(sg);
- current_block_end = current_block_start + current_block_len;
- /* If we have a split (non-contig.) between two blocks */
- if (current_block_end != next_block_start) {
- block_shift = mlx4_ib_umem_calc_block_mtt
- (next_block_start,
- current_block_end,
- block_shift);
-
- /*
- * If we reached the minimum shift for 4k page we stop
- * the loop.
- */
- if (block_shift <= min_shift)
- goto end;
-
- /*
- * If not saved yet we are in first block - we save the
- * length of first block to calculate the
- * non_aligned_pages number at the end.
- */
- total_len += current_block_len;
-
- /* Start a new block */
- current_block_start = next_block_start;
- current_block_len = sg_dma_len(sg);
- continue;
- }
- /* The scatter entry is another part of the current block,
- * increase the block size.
- * An entry in the scatter can be larger than 4k (page) as of
- * dma mapping which merge some blocks together.
- */
- current_block_len += sg_dma_len(sg);
+ return err;
}
-
- /* Account for the last block in the total len */
- total_len += current_block_len;
- /* Add to the first block the misalignment that it suffers from. */
- total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
- last_block_end = current_block_start + current_block_len;
- last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
- total_len += (last_block_aligned_end - last_block_end);
-
- if (total_len & ((1ULL << block_shift) - 1ULL))
- pr_warn("misaligned total length detected (%llu, %llu)!",
- total_len, block_shift);
-
- *num_of_mtts = total_len >> block_shift;
-end:
- if (block_shift < min_shift) {
- /*
- * If shift is less than the min we set a warning and return the
- * min shift.
- */
- pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
-
- block_shift = min_shift;
- }
- return block_shift;
+ return 0;
}
static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
@@ -424,6 +158,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
+ if (shift < 0) {
+ err = shift;
+ goto err_umem;
+ }
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9d08aa99f3cb..50fd407103c7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -925,8 +925,12 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
- err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;
@@ -1108,8 +1112,12 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
- err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 520034acf73a..162814ae8cb4 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -943,7 +943,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
}
dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dst.counter_id = mlx5_fc_id(opfc->fc);
+ dst.counter = opfc->fc;
flow_act.action =
MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
@@ -1113,8 +1113,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
handler->ibcounters = flow_act.counters;
dest_arr[dest_num].type =
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest_arr[dest_num].counter_id =
- mlx5_fc_id(mcounters->hw_cntrs_hndl);
+ dest_arr[dest_num].counter =
+ mcounters->hw_cntrs_hndl;
dest_num++;
}
@@ -1603,7 +1603,7 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
static struct mlx5_ib_flow_handler *raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act,
- u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type)
+ struct mlx5_fc *counter, void *cmd_in, int inlen, int dest_id, int dest_type)
{
struct mlx5_flow_destination *dst;
struct mlx5_ib_flow_prio *ft_prio;
@@ -1652,8 +1652,12 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
}
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (WARN_ON(!counter)) {
+ err = -EINVAL;
+ goto unlock;
+ }
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dst[dst_num].counter_id = counter_id;
+ dst[dst_num].counter = counter;
dst_num++;
}
@@ -1878,7 +1882,8 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
return 0;
}
-static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
+static bool
+is_flow_counter(void *obj, u32 offset, u32 *counter_id, u32 *fc_bulk_size)
{
struct devx_obj *devx_obj = obj;
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
@@ -1888,6 +1893,7 @@ static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
if (offset && offset >= devx_obj->flow_counter_bulk_size)
return false;
+ *fc_bulk_size = devx_obj->flow_counter_bulk_size;
*counter_id = MLX5_GET(dealloc_flow_counter_in,
devx_obj->dinbox,
flow_counter_id);
@@ -1904,13 +1910,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
{
struct mlx5_flow_context flow_context = {.flow_tag =
MLX5_FS_DEFAULT_FLOW_TAG};
- u32 *offset_attr, offset = 0, counter_id = 0;
int dest_id, dest_type = -1, inlen, len, ret, i;
struct mlx5_ib_flow_handler *flow_handler;
struct mlx5_ib_flow_matcher *fs_matcher;
struct ib_uobject **arr_flow_actions;
struct ib_uflow_resources *uflow_res;
struct mlx5_flow_act flow_act = {};
+ struct mlx5_fc *counter = NULL;
struct ib_qp *qp = NULL;
void *devx_obj, *cmd_in;
struct ib_uobject *uobj;
@@ -1937,6 +1943,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
if (len) {
+ u32 *offset_attr, fc_bulk_size, offset = 0, counter_id = 0;
devx_obj = arr_flow_actions[0]->object;
if (uverbs_attr_is_valid(attrs,
@@ -1956,8 +1963,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
offset = *offset_attr;
}
- if (!is_flow_counter(devx_obj, offset, &counter_id))
+ if (!is_flow_counter(devx_obj, offset, &counter_id, &fc_bulk_size))
return -EINVAL;
+ counter = mlx5_fc_local_create(counter_id, offset, fc_bulk_size);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
@@ -1968,8 +1978,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
- if (!uflow_res)
- return -ENOMEM;
+ if (!uflow_res) {
+ ret = -ENOMEM;
+ goto destroy_counter;
+ }
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
@@ -1996,7 +2008,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
flow_handler =
raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act,
- counter_id, cmd_in, inlen, dest_id, dest_type);
+ counter, cmd_in, inlen, dest_id, dest_type);
if (IS_ERR(flow_handler)) {
ret = PTR_ERR(flow_handler);
goto err_out;
@@ -2007,6 +2019,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
return 0;
err_out:
ib_uverbs_flow_resources_free(uflow_res);
+destroy_counter:
+ if (counter)
+ mlx5_fc_local_destroy(counter);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index f5b59d02f4d3..81849eb671a1 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -242,6 +242,10 @@ static int mlx5_netdev_event(struct notifier_block *this,
case NETDEV_DOWN: {
struct net_device *upper = NULL;
+ if (!netif_is_lag_master(ndev) && !netif_is_lag_port(ndev) &&
+ !mlx5_core_mp_enabled(mdev))
+ return NOTIFY_DONE;
+
if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
struct net_device *lag_ndev;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a01b592aa716..974a45c92fbb 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -669,6 +669,12 @@ struct mlx5_ib_mkey {
#define mlx5_update_odp_stats(mr, counter_name, value) \
atomic64_add(value, &((mr)->odp_stats.counter_name))
+#define mlx5_update_odp_stats_with_handled(mr, counter_name, value) \
+ do { \
+ mlx5_update_odp_stats(mr, counter_name, value); \
+ atomic64_add(1, &((mr)->odp_stats.counter_name##_handled)); \
+ } while (0)
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_ib_mkey mmkey;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 45d9dc9c6c8f..bb02b6adbf2c 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -2021,6 +2021,11 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+ bool is_odp = is_odp_mr(mr);
+ int ret = 0;
+
+ if (is_odp)
+ mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
ent = mr->mmkey.cache_ent;
@@ -2032,7 +2037,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
- return 0;
+ goto out;
}
if (ent) {
@@ -2041,7 +2046,15 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
mr->mmkey.cache_ent = NULL;
spin_unlock_irq(&ent->mkeys_queue.lock);
}
- return destroy_mkey(dev, mr);
+ ret = destroy_mkey(dev, mr);
+out:
+ if (is_odp) {
+ if (!ret)
+ to_ib_umem_odp(mr->umem)->private = NULL;
+ mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ }
+
+ return ret;
}
static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 4b37446758fd..f1e23583e6c0 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -228,13 +228,27 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *imr = mr->parent;
+ /*
+ * If userspace is racing freeing the parent implicit ODP MR then we can
+ * loose the race with parent destruction. In this case
+ * mlx5_ib_free_odp_mr() will free everything in the implicit_children
+ * xarray so NOP is fine. This child MR cannot be destroyed here because
+ * we are under its umem_mutex.
+ */
if (!refcount_inc_not_zero(&imr->mmkey.usecount))
return;
- xa_erase(&imr->implicit_children, idx);
+ xa_lock(&imr->implicit_children);
+ if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
+ mr) {
+ xa_unlock(&imr->implicit_children);
+ return;
+ }
+
if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
- xa_erase(&mr_to_mdev(mr)->odp_mkeys,
- mlx5_base_mkey(mr->mmkey.key));
+ __xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key));
+ xa_unlock(&imr->implicit_children);
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
@@ -268,6 +282,8 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
if (!umem_odp->npages)
goto out;
mr = umem_odp->private;
+ if (!mr)
+ goto out;
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
@@ -313,7 +329,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
- mlx5_update_odp_stats(mr, invalidations, invalidations);
+ mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations);
/*
* We are now sure that the device will not access the
@@ -500,18 +516,18 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
refcount_inc(&ret->mmkey.usecount);
goto out_lock;
}
- xa_unlock(&imr->implicit_children);
if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
- ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_KERNEL);
+ ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_KERNEL);
if (xa_is_err(ret)) {
ret = ERR_PTR(xa_err(ret));
- xa_erase(&imr->implicit_children, idx);
- goto out_mr;
+ __xa_erase(&imr->implicit_children, idx);
+ goto out_lock;
}
mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD;
}
+ xa_unlock(&imr->implicit_children);
mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
return mr;
@@ -944,8 +960,7 @@ out:
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
- * Returns number of OS pages retrieved on success. The caller may continue to
- * the next data segment.
+ * Returns zero on success. The caller may continue to the next data segment.
* Can return the following error codes:
* -EAGAIN to designate a temporary error. The caller will abort handling the
* page fault and resolve it.
@@ -958,7 +973,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
u32 *bytes_committed,
u32 *bytes_mapped)
{
- int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
+ int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range;
struct pf_frame *head = NULL, *frame;
struct mlx5_ib_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -993,13 +1008,20 @@ next_mr:
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) -
+ (io_virt & PAGE_MASK)) >>
+ PAGE_SHIFT;
ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
if (ret < 0)
goto end;
- mlx5_update_odp_stats(mr, faults, ret);
+ mlx5_update_odp_stats_with_handled(mr, faults, ret);
+
+ if (ret < pages_in_range) {
+ ret = -EFAULT;
+ goto end;
+ }
- npages += ret;
ret = 0;
break;
@@ -1090,7 +1112,7 @@ end:
kfree(out);
*bytes_committed = 0;
- return ret ? ret : npages;
+ return ret;
}
/*
@@ -1109,8 +1131,7 @@ end:
* the committed bytes).
* @receive_queue: receive WQE end of sg list
*
- * Returns the number of pages loaded if positive, zero for an empty WQE, or a
- * negative error code.
+ * Returns zero for success or a negative error code.
*/
static int pagefault_data_segments(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault,
@@ -1118,7 +1139,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
void *wqe_end, u32 *bytes_mapped,
u32 *total_wqe_bytes, bool receive_queue)
{
- int ret = 0, npages = 0;
+ int ret = 0;
u64 io_virt;
__be32 key;
u32 byte_count;
@@ -1175,10 +1196,9 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
bytes_mapped);
if (ret < 0)
break;
- npages += ret;
}
- return ret < 0 ? ret : npages;
+ return ret;
}
/*
@@ -1414,12 +1434,6 @@ resolve_page_fault:
free_page((unsigned long)wqe_start);
}
-static int pages_in_range(u64 address, u32 length)
-{
- return (ALIGN(address + length, PAGE_SIZE) -
- (address & PAGE_MASK)) >> PAGE_SHIFT;
-}
-
static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{
@@ -1458,7 +1472,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
- } else if (ret < 0 || pages_in_range(address, length) > ret) {
+ } else if (ret < 0) {
mlx5_ib_page_fault_resume(dev, pfault, 1);
if (ret != -ENOENT)
mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n",
@@ -1529,7 +1543,7 @@ static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
goto err;
}
- mlx5_update_odp_stats(mr, faults, ret);
+ mlx5_update_odp_stats_with_handled(mr, faults, ret);
mlx5r_deref_odp_mkey(mmkey);
if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST)
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
index affcf8fe943c..67841922c7b8 100644
--- a/drivers/infiniband/hw/mlx5/restrack.c
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -96,9 +96,18 @@ static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
atomic64_read(&mr->odp_stats.faults)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_faults_handled",
+ atomic64_read(&mr->odp_stats.faults_handled)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
msg, "page_invalidations",
atomic64_read(&mr->odp_stats.invalidations)))
goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations_handled",
+ atomic64_read(&mr->odp_stats.invalidations_handled)))
+ goto err_table;
+
if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
atomic64_read(&mr->odp_stats.prefetch)))
goto err_table;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index ba2cd68b53e6..805e37dc7621 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -214,8 +214,8 @@ static const struct attribute_group port_linkcontrol_group = {
* Congestion control table size followed by table entries
*/
static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t pos, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
@@ -241,7 +241,7 @@ static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
@@ -249,8 +249,8 @@ static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t pos, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
@@ -274,9 +274,9 @@ static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-static struct bin_attribute *port_ccmgta_attributes[] = {
+static const struct bin_attribute *const port_ccmgta_attributes[] = {
&bin_attr_cc_setting_bin,
&bin_attr_cc_table_bin,
NULL,
@@ -295,7 +295,7 @@ static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj,
static const struct attribute_group port_ccmgta_attribute_group = {
.name = "CCMgtA",
.is_bin_visible = qib_ccmgta_is_bin_visible,
- .bin_attrs = port_ccmgta_attributes,
+ .bin_attrs_new = port_ccmgta_attributes,
};
/* Start sl2vl */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 13b654ddd3cc..4ddcd5860e0f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -151,34 +151,6 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
ib_event.element.port_num = 1;
ib_dispatch_event(&ib_event);
break;
- case NETDEV_UP:
- case NETDEV_DOWN:
- case NETDEV_CHANGE:
- if (!us_ibdev->ufdev->link_up &&
- netif_carrier_ok(netdev)) {
- usnic_fwd_carrier_up(us_ibdev->ufdev);
- usnic_info("Link UP on %s\n",
- dev_name(&us_ibdev->ib_dev.dev));
- ib_event.event = IB_EVENT_PORT_ACTIVE;
- ib_event.device = &us_ibdev->ib_dev;
- ib_event.element.port_num = 1;
- ib_dispatch_event(&ib_event);
- } else if (us_ibdev->ufdev->link_up &&
- !netif_carrier_ok(netdev)) {
- usnic_fwd_carrier_down(us_ibdev->ufdev);
- usnic_info("Link DOWN on %s\n",
- dev_name(&us_ibdev->ib_dev.dev));
- usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
- ib_event.event = IB_EVENT_PORT_ERR;
- ib_event.device = &us_ibdev->ib_dev;
- ib_event.element.port_num = 1;
- ib_dispatch_event(&ib_event);
- } else {
- usnic_dbg("Ignoring %s on %s\n",
- netdev_cmd_to_name(event),
- dev_name(&us_ibdev->ib_dev.dev));
- }
- break;
case NETDEV_CHANGEADDR:
if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
sizeof(us_ibdev->ufdev->mac))) {
@@ -218,6 +190,50 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
mutex_unlock(&us_ibdev->usdev_lock);
}
+static void usnic_ib_handle_port_event(struct ib_device *ibdev,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(ibdev, struct usnic_ib_dev, ib_dev);
+ struct ib_event ib_event;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ if (!us_ibdev->ufdev->link_up &&
+ netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_up(us_ibdev->ufdev);
+ usnic_info("Link UP on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
+ ib_event.event = IB_EVENT_PORT_ACTIVE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else if (us_ibdev->ufdev->link_up &&
+ !netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_down(us_ibdev->ufdev);
+ usnic_info("Link DOWN on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_PORT_ERR;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else {
+ usnic_dbg("Ignoring %s on %s\n",
+ netdev_cmd_to_name(event),
+ dev_name(&us_ibdev->ib_dev.dev));
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+}
+
static int usnic_ib_netdevice_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
@@ -358,6 +374,7 @@ static const struct ib_device_ops usnic_dev_ops = {
.query_port = usnic_ib_query_port,
.query_qp = usnic_ib_query_qp,
.reg_user_mr = usnic_ib_reg_mr,
+ .report_port_event = usnic_ib_handle_port_event,
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp),
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 768aad364c89..1664d1d7d969 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -143,6 +143,46 @@ static int pvrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
}
+static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
+ enum ib_event_type event)
+{
+ struct ib_event ib_event;
+
+ memset(&ib_event, 0, sizeof(ib_event));
+ ib_event.device = &dev->ib_dev;
+ ib_event.element.port_num = port;
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+}
+
+static void pvrdma_report_event_handle(struct ib_device *ibdev,
+ struct net_device *ndev,
+ unsigned long event)
+{
+ struct pvrdma_dev *dev = container_of(ibdev, struct pvrdma_dev, ib_dev);
+
+ switch (event) {
+ case NETDEV_DOWN:
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_UP:
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL,
+ PVRDMA_DEVICE_CTL_UNQUIESCE);
+
+ mb();
+
+ if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
+ dev_err(&dev->pdev->dev,
+ "failed to activate device during link up\n");
+ else
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+
+ default:
+ break;
+ }
+}
+
static const struct ib_device_ops pvrdma_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_VMW_PVRDMA,
@@ -181,6 +221,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.query_qp = pvrdma_query_qp,
.reg_user_mr = pvrdma_reg_user_mr,
.req_notify_cq = pvrdma_req_notify_cq,
+ .report_port_event = pvrdma_report_event_handle,
INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
@@ -362,18 +403,6 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
}
}
-static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
- enum ib_event_type event)
-{
- struct ib_event ib_event;
-
- memset(&ib_event, 0, sizeof(ib_event));
- ib_event.device = &dev->ib_dev;
- ib_event.element.port_num = port;
- ib_event.event = event;
- ib_dispatch_event(&ib_event);
-}
-
static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
{
if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
@@ -666,21 +695,8 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
switch (event) {
case NETDEV_REBOOT:
- case NETDEV_DOWN:
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
break;
- case NETDEV_UP:
- pvrdma_write_reg(dev, PVRDMA_REG_CTL,
- PVRDMA_DEVICE_CTL_UNQUIESCE);
-
- mb();
-
- if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
- dev_err(&dev->pdev->dev,
- "failed to activate device during link up\n");
- else
- pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
- break;
case NETDEV_UNREGISTER:
ib_device_set_netdev(&dev->ib_dev, NULL, 1);
dev_put(dev->netdev);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 8cc64ceeb356..132a87e52d5c 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -571,11 +571,6 @@ static void rxe_port_event(struct rxe_dev *rxe,
/* Caller must hold net_info_lock */
void rxe_port_up(struct rxe_dev *rxe)
{
- struct rxe_port *port;
-
- port = &rxe->port;
- port->attr.state = IB_PORT_ACTIVE;
-
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
dev_info(&rxe->ib_dev.dev, "set active\n");
}
@@ -583,11 +578,6 @@ void rxe_port_up(struct rxe_dev *rxe)
/* Caller must hold net_info_lock */
void rxe_port_down(struct rxe_dev *rxe)
{
- struct rxe_port *port;
-
- port = &rxe->port;
- port->attr.state = IB_PORT_DOWN;
-
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
dev_info(&rxe->ib_dev.dev, "set down\n");
@@ -601,7 +591,7 @@ void rxe_set_port_state(struct rxe_dev *rxe)
if (!ndev)
return;
- if (netif_running(ndev) && netif_carrier_ok(ndev))
+ if (ib_get_curr_port_state(ndev) == IB_PORT_ACTIVE)
rxe_port_up(rxe);
else
rxe_port_down(rxe);
@@ -623,18 +613,14 @@ static int rxe_notify(struct notifier_block *not_blk,
case NETDEV_UNREGISTER:
ib_unregister_device_queued(&rxe->ib_dev);
break;
- case NETDEV_UP:
- rxe_port_up(rxe);
- break;
- case NETDEV_DOWN:
- rxe_port_down(rxe);
- break;
case NETDEV_CHANGEMTU:
rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
rxe_set_mtu(rxe, ndev->mtu);
break;
+ case NETDEV_DOWN:
case NETDEV_CHANGE:
- rxe_set_port_state(rxe);
+ if (ib_get_curr_port_state(ndev) == IB_PORT_DOWN)
+ rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
break;
case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index d2f57ead78ad..003f681e5dc0 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -129,7 +129,7 @@ enum rxe_device_param {
enum rxe_port_param {
RXE_PORT_GID_TBL_LEN = 1024,
RXE_PORT_PORT_CAP_FLAGS = IB_PORT_CM_SUP,
- RXE_PORT_MAX_MSG_SZ = 0x800000,
+ RXE_PORT_MAX_MSG_SZ = (1UL << 31),
RXE_PORT_BAD_PKEY_CNTR = 0,
RXE_PORT_QKEY_VIOL_CNTR = 0,
RXE_PORT_LID = 0,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 67567d62195e..d9cb682fd71f 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -178,7 +178,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
{
struct rxe_pool *pool = elem->pool;
struct xarray *xa = &pool->xa;
- static int timeout = RXE_POOL_TIMEOUT;
int ret, err = 0;
void *xa_ret;
@@ -202,19 +201,19 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
* return to rdma-core
*/
if (sleepable) {
- if (!completion_done(&elem->complete) && timeout) {
+ if (!completion_done(&elem->complete)) {
ret = wait_for_completion_timeout(&elem->complete,
- timeout);
+ msecs_to_jiffies(50000));
/* Shouldn't happen. There are still references to
* the object but, rather than deadlock, free the
* object or pass back to rdma-core.
*/
if (WARN_ON(!ret))
- err = -EINVAL;
+ err = -ETIMEDOUT;
}
} else {
- unsigned long until = jiffies + timeout;
+ unsigned long until = jiffies + RXE_POOL_TIMEOUT;
/* AH objects are unique in that the destroy_ah verb
* can be called in atomic context. This delay
@@ -226,7 +225,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
mdelay(1);
if (WARN_ON(!completion_done(&elem->complete)))
- err = -EINVAL;
+ err = -ETIMEDOUT;
}
if (pool->cleanup)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 8a5fc20fd186..6152a0fdfc8c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -62,6 +62,7 @@ static int rxe_query_port(struct ib_device *ibdev,
ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
&attr->active_width);
+ attr->state = ib_get_curr_port_state(ndev);
if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else if (dev_get_flags(ndev) & IFF_UP)
@@ -696,7 +697,7 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
for (i = 0; i < ibwr->num_sge; i++)
length += ibwr->sg_list[i].length;
- if (length > (1UL << 31)) {
+ if (length > RXE_PORT_MAX_MSG_SZ) {
rxe_err_qp(qp, "message length too long\n");
break;
}
@@ -980,8 +981,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
- /* IBA max message size is 2^31 */
- if (length >= (1UL<<31)) {
+ if (length > RXE_PORT_MAX_MSG_SZ) {
err = -EINVAL;
rxe_dbg("message length too long\n");
goto err_out;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 14d3103aee6f..b17752bd1ecc 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -379,14 +379,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
sdev = to_siw_dev(base_dev);
switch (event) {
- case NETDEV_UP:
- siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
- break;
-
- case NETDEV_DOWN:
- siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
- break;
-
case NETDEV_REGISTER:
/*
* Device registration now handled only by
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 7ca0297d68a4..5ac8bd450d24 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -189,10 +189,9 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
attr->max_msg_sz = -1;
attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
- attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
+ attr->state = ib_get_curr_port_state(ndev);
+ attr->phys_state = attr->state == IB_PORT_ACTIVE ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
- attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
/*
* All zero
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 4e17d546d4cc..bf38ac6f87c4 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -584,6 +584,9 @@ static void dev_free(struct kref *ref)
list_del(&dev->entry);
mutex_unlock(&pool->mutex);
+ if (pool->ops && pool->ops->deinit)
+ pool->ops->deinit(dev);
+
ib_dealloc_pd(dev->ib_pd);
kfree(dev);
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 2916e77f589b..7289ae0b83ac 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3978,7 +3978,6 @@ static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
return host;
put_host:
- device_del(&host->dev);
put_device(&host->dev);
return NULL;
}
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 3bdbd34314b3..88ecdf5218ee 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -152,20 +152,6 @@ config INPUT_EVDEV
To compile this driver as a module, choose M here: the
module will be called evdev.
-config INPUT_EVBUG
- tristate "Event debugging"
- help
- Say Y here if you have a problem with the input subsystem and
- want all events (keypresses, mouse movements), to be output to
- the system log. While this is useful for debugging, it's also
- a security threat - your keypresses include your passwords, of
- course.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called evbug.
-
config INPUT_KUNIT_TEST
tristate "KUnit tests for Input" if !KUNIT_ALL_TESTS
depends on INPUT && KUNIT
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index c78753274921..930b64d2115e 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_INPUT_LEDS) += input-leds.o
obj-$(CONFIG_INPUT_MOUSEDEV) += mousedev.o
obj-$(CONFIG_INPUT_JOYDEV) += joydev.o
obj-$(CONFIG_INPUT_EVDEV) += evdev.o
-obj-$(CONFIG_INPUT_EVBUG) += evbug.o
obj-$(CONFIG_INPUT_KEYBOARD) += keyboard/
obj-$(CONFIG_INPUT_MOUSE) += mouse/
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
deleted file mode 100644
index e47bdf92088a..000000000000
--- a/drivers/input/evbug.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 1999-2001 Vojtech Pavlik
- */
-
-/*
- * Input driver event debug module - dumps all events into syslog
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/init.h>
-#include <linux/device.h>
-
-MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
-MODULE_DESCRIPTION("Input driver event debug module");
-MODULE_LICENSE("GPL");
-
-static void evbug_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
-{
- printk(KERN_DEBUG pr_fmt("Event. Dev: %s, Type: %d, Code: %d, Value: %d\n"),
- dev_name(&handle->dev->dev), type, code, value);
-}
-
-static int evbug_connect(struct input_handler *handler, struct input_dev *dev,
- const struct input_device_id *id)
-{
- struct input_handle *handle;
- int error;
-
- handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->dev = dev;
- handle->handler = handler;
- handle->name = "evbug";
-
- error = input_register_handle(handle);
- if (error)
- goto err_free_handle;
-
- error = input_open_device(handle);
- if (error)
- goto err_unregister_handle;
-
- printk(KERN_DEBUG pr_fmt("Connected device: %s (%s at %s)\n"),
- dev_name(&dev->dev),
- dev->name ?: "unknown",
- dev->phys ?: "unknown");
-
- return 0;
-
- err_unregister_handle:
- input_unregister_handle(handle);
- err_free_handle:
- kfree(handle);
- return error;
-}
-
-static void evbug_disconnect(struct input_handle *handle)
-{
- printk(KERN_DEBUG pr_fmt("Disconnected device: %s\n"),
- dev_name(&handle->dev->dev));
-
- input_close_device(handle);
- input_unregister_handle(handle);
- kfree(handle);
-}
-
-static const struct input_device_id evbug_ids[] = {
- { .driver_info = 1 }, /* Matches all devices */
- { }, /* Terminating zero entry */
-};
-
-MODULE_DEVICE_TABLE(input, evbug_ids);
-
-static struct input_handler evbug_handler = {
- .event = evbug_event,
- .connect = evbug_connect,
- .disconnect = evbug_disconnect,
- .name = "evbug",
- .id_table = evbug_ids,
-};
-
-static int __init evbug_init(void)
-{
- return input_register_handler(&evbug_handler);
-}
-
-static void __exit evbug_exit(void)
-{
- input_unregister_handler(&evbug_handler);
-}
-
-module_init(evbug_init);
-module_exit(evbug_exit);
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 609a5f01761b..b527308cb52e 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -93,7 +93,7 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
{
struct ff_device *ff = dev->ff;
struct ff_effect *old;
- int ret = 0;
+ int error;
int id;
if (!test_bit(EV_FF, dev->evbit))
@@ -114,22 +114,20 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
}
if (!test_bit(effect->type, ff->ffbit)) {
- ret = compat_effect(ff, effect);
- if (ret)
- return ret;
+ error = compat_effect(ff, effect);
+ if (error)
+ return error;
}
- mutex_lock(&ff->mutex);
+ guard(mutex)(&ff->mutex);
if (effect->id == -1) {
for (id = 0; id < ff->max_effects; id++)
if (!ff->effect_owners[id])
break;
- if (id >= ff->max_effects) {
- ret = -ENOSPC;
- goto out;
- }
+ if (id >= ff->max_effects)
+ return -ENOSPC;
effect->id = id;
old = NULL;
@@ -137,30 +135,26 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
} else {
id = effect->id;
- ret = check_effect_access(ff, id, file);
- if (ret)
- goto out;
+ error = check_effect_access(ff, id, file);
+ if (error)
+ return error;
old = &ff->effects[id];
- if (!check_effects_compatible(effect, old)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!check_effects_compatible(effect, old))
+ return -EINVAL;
}
- ret = ff->upload(dev, effect, old);
- if (ret)
- goto out;
+ error = ff->upload(dev, effect, old);
+ if (error)
+ return error;
- spin_lock_irq(&dev->event_lock);
- ff->effects[id] = *effect;
- ff->effect_owners[id] = file;
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ ff->effects[id] = *effect;
+ ff->effect_owners[id] = file;
+ }
- out:
- mutex_unlock(&ff->mutex);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(input_ff_upload);
@@ -178,17 +172,16 @@ static int erase_effect(struct input_dev *dev, int effect_id,
if (error)
return error;
- spin_lock_irq(&dev->event_lock);
- ff->playback(dev, effect_id, 0);
- ff->effect_owners[effect_id] = NULL;
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ ff->playback(dev, effect_id, 0);
+ ff->effect_owners[effect_id] = NULL;
+ }
if (ff->erase) {
error = ff->erase(dev, effect_id);
if (error) {
- spin_lock_irq(&dev->event_lock);
- ff->effect_owners[effect_id] = file;
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock)
+ ff->effect_owners[effect_id] = file;
return error;
}
@@ -210,16 +203,12 @@ static int erase_effect(struct input_dev *dev, int effect_id,
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
{
struct ff_device *ff = dev->ff;
- int ret;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
- mutex_lock(&ff->mutex);
- ret = erase_effect(dev, effect_id, file);
- mutex_unlock(&ff->mutex);
-
- return ret;
+ guard(mutex)(&ff->mutex);
+ return erase_effect(dev, effect_id, file);
}
EXPORT_SYMBOL_GPL(input_ff_erase);
@@ -239,13 +228,11 @@ int input_ff_flush(struct input_dev *dev, struct file *file)
dev_dbg(&dev->dev, "flushing now\n");
- mutex_lock(&ff->mutex);
+ guard(mutex)(&ff->mutex);
for (i = 0; i < ff->max_effects; i++)
erase_effect(dev, i, file);
- mutex_unlock(&ff->mutex);
-
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_flush);
@@ -303,8 +290,6 @@ EXPORT_SYMBOL_GPL(input_ff_event);
*/
int input_ff_create(struct input_dev *dev, unsigned int max_effects)
{
- struct ff_device *ff;
- size_t ff_dev_size;
int i;
if (!max_effects) {
@@ -317,25 +302,19 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
return -EINVAL;
}
- ff_dev_size = struct_size(ff, effect_owners, max_effects);
- if (ff_dev_size == SIZE_MAX) /* overflow */
- return -EINVAL;
-
- ff = kzalloc(ff_dev_size, GFP_KERNEL);
+ struct ff_device *ff __free(kfree) =
+ kzalloc(struct_size(ff, effect_owners, max_effects),
+ GFP_KERNEL);
if (!ff)
return -ENOMEM;
- ff->effects = kcalloc(max_effects, sizeof(struct ff_effect),
- GFP_KERNEL);
- if (!ff->effects) {
- kfree(ff);
+ ff->effects = kcalloc(max_effects, sizeof(*ff->effects), GFP_KERNEL);
+ if (!ff->effects)
return -ENOMEM;
- }
ff->max_effects = max_effects;
mutex_init(&ff->mutex);
- dev->ff = ff;
dev->flush = input_ff_flush;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
@@ -348,6 +327,8 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
if (test_bit(FF_PERIODIC, ff->ffbit))
__set_bit(FF_RUMBLE, dev->ffbit);
+ dev->ff = no_free_ptr(ff);
+
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create);
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index c321cdabd214..e9120ba6bae0 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -401,13 +401,11 @@ static void ml_effect_timer(struct timer_list *t)
{
struct ml_device *ml = from_timer(ml, t, timer);
struct input_dev *dev = ml->dev;
- unsigned long flags;
pr_debug("timer: updating effects\n");
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
ml_play_effects(ml);
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
/*
@@ -465,7 +463,7 @@ static int ml_ff_upload(struct input_dev *dev,
struct ml_device *ml = dev->ff->private;
struct ml_effect_state *state = &ml->states[effect->id];
- spin_lock_irq(&dev->event_lock);
+ guard(spinlock_irq)(&dev->event_lock);
if (test_bit(FF_EFFECT_STARTED, &state->flags)) {
__clear_bit(FF_EFFECT_PLAYING, &state->flags);
@@ -477,8 +475,6 @@ static int ml_ff_upload(struct input_dev *dev,
ml_schedule_timer(ml);
}
- spin_unlock_irq(&dev->event_lock);
-
return 0;
}
@@ -507,12 +503,11 @@ static void ml_ff_destroy(struct ff_device *ff)
int input_ff_create_memless(struct input_dev *dev, void *data,
int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
{
- struct ml_device *ml;
struct ff_device *ff;
int error;
int i;
- ml = kzalloc(sizeof(struct ml_device), GFP_KERNEL);
+ struct ml_device *ml __free(kfree) = kzalloc(sizeof(*ml), GFP_KERNEL);
if (!ml)
return -ENOMEM;
@@ -525,13 +520,10 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
set_bit(FF_GAIN, dev->ffbit);
error = input_ff_create(dev, FF_MEMLESS_EFFECTS);
- if (error) {
- kfree(ml);
+ if (error)
return error;
- }
ff = dev->ff;
- ff->private = ml;
ff->upload = ml_ff_upload;
ff->playback = ml_ff_playback;
ff->set_gain = ml_ff_set_gain;
@@ -548,6 +540,8 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
for (i = 0; i < FF_MEMLESS_EFFECTS; i++)
ml->states[i].effect = &ff->effects[i];
+ ff->private = no_free_ptr(ml);
+
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create_memless);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 6b04a674f832..337006dd9dcf 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -39,20 +39,20 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
unsigned int flags)
{
- struct input_mt *mt = dev->mt;
- int i;
-
if (!num_slots)
return 0;
- if (mt)
- return mt->num_slots != num_slots ? -EINVAL : 0;
+
+ if (dev->mt)
+ return dev->mt->num_slots != num_slots ? -EINVAL : 0;
+
/* Arbitrary limit for avoiding too large memory allocation. */
if (num_slots > 1024)
return -EINVAL;
- mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
+ struct input_mt *mt __free(kfree) =
+ kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
if (!mt)
- goto err_mem;
+ return -ENOMEM;
mt->num_slots = num_slots;
mt->flags = flags;
@@ -86,21 +86,18 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
unsigned int n2 = num_slots * num_slots;
mt->red = kcalloc(n2, sizeof(*mt->red), GFP_KERNEL);
if (!mt->red)
- goto err_mem;
+ return -ENOMEM;
}
/* Mark slots as 'inactive' */
- for (i = 0; i < num_slots; i++)
+ for (unsigned int i = 0; i < num_slots; i++)
input_mt_set_value(&mt->slots[i], ABS_MT_TRACKING_ID, -1);
/* Mark slots as 'unused' */
mt->frame = 1;
- dev->mt = mt;
+ dev->mt = no_free_ptr(mt);
return 0;
-err_mem:
- kfree(mt);
- return -ENOMEM;
}
EXPORT_SYMBOL(input_mt_init_slots);
@@ -285,14 +282,10 @@ void input_mt_drop_unused(struct input_dev *dev)
struct input_mt *mt = dev->mt;
if (mt) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
__input_mt_drop_unused(dev, mt);
mt->frame++;
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_mt_drop_unused);
@@ -339,11 +332,8 @@ void input_mt_sync_frame(struct input_dev *dev)
return;
if (mt->flags & INPUT_MT_DROP_UNUSED) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
__input_mt_drop_unused(dev, mt);
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
diff --git a/drivers/input/input-poller.c b/drivers/input/input-poller.c
index 688e3cb1c2a0..9c57713a6151 100644
--- a/drivers/input/input-poller.c
+++ b/drivers/input/input-poller.c
@@ -162,7 +162,7 @@ static ssize_t input_dev_set_poll_interval(struct device *dev,
if (interval > poller->poll_interval_max)
return -EINVAL;
- mutex_lock(&input->mutex);
+ guard(mutex)(&input->mutex);
poller->poll_interval = interval;
@@ -172,8 +172,6 @@ static ssize_t input_dev_set_poll_interval(struct device *dev,
input_dev_poller_queue_work(poller);
}
- mutex_unlock(&input->mutex);
-
return count;
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7f0477e04ad2..c9e3ac64bcd0 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -115,23 +115,23 @@ static void input_pass_values(struct input_dev *dev,
lockdep_assert_held(&dev->event_lock);
- rcu_read_lock();
+ scoped_guard(rcu) {
+ handle = rcu_dereference(dev->grab);
+ if (handle) {
+ count = handle->handle_events(handle, vals, count);
+ break;
+ }
- handle = rcu_dereference(dev->grab);
- if (handle) {
- count = handle->handle_events(handle, vals, count);
- } else {
- list_for_each_entry_rcu(handle, &dev->h_list, d_node)
+ list_for_each_entry_rcu(handle, &dev->h_list, d_node) {
if (handle->open) {
count = handle->handle_events(handle, vals,
count);
if (!count)
break;
}
+ }
}
- rcu_read_unlock();
-
/* trigger auto repeat for key events */
if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
for (v = vals; v != vals + count; v++) {
@@ -390,13 +390,9 @@ void input_handle_event(struct input_dev *dev,
void input_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
- unsigned long flags;
-
if (is_event_supported(type, dev->evbit, EV_MAX)) {
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
input_handle_event(dev, type, code, value);
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_event);
@@ -417,18 +413,15 @@ void input_inject_event(struct input_handle *handle,
{
struct input_dev *dev = handle->dev;
struct input_handle *grab;
- unsigned long flags;
if (is_event_supported(type, dev->evbit, EV_MAX)) {
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
+ guard(rcu)();
- rcu_read_lock();
grab = rcu_dereference(dev->grab);
if (!grab || grab == handle)
input_handle_event(dev, type, code, value);
- rcu_read_unlock();
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_inject_event);
@@ -526,22 +519,15 @@ EXPORT_SYMBOL(input_copy_abs);
int input_grab_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- int retval;
- retval = mutex_lock_interruptible(&dev->mutex);
- if (retval)
- return retval;
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ if (dev->grab)
+ return -EBUSY;
- if (dev->grab) {
- retval = -EBUSY;
- goto out;
+ rcu_assign_pointer(dev->grab, handle);
}
- rcu_assign_pointer(dev->grab, handle);
-
- out:
- mutex_unlock(&dev->mutex);
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_grab_device);
@@ -576,9 +562,8 @@ void input_release_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
__input_release_device(handle);
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_release_device);
@@ -592,67 +577,57 @@ EXPORT_SYMBOL(input_release_device);
int input_open_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- int retval;
-
- retval = mutex_lock_interruptible(&dev->mutex);
- if (retval)
- return retval;
-
- if (dev->going_away) {
- retval = -ENODEV;
- goto out;
- }
+ int error;
- handle->open++;
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ if (dev->going_away)
+ return -ENODEV;
- if (handle->handler->passive_observer)
- goto out;
+ handle->open++;
- if (dev->users++ || dev->inhibited) {
- /*
- * Device is already opened and/or inhibited,
- * so we can exit immediately and report success.
- */
- goto out;
- }
+ if (handle->handler->passive_observer)
+ return 0;
- if (dev->open) {
- retval = dev->open(dev);
- if (retval) {
- dev->users--;
- handle->open--;
+ if (dev->users++ || dev->inhibited) {
/*
- * Make sure we are not delivering any more events
- * through this handle
+ * Device is already opened and/or inhibited,
+ * so we can exit immediately and report success.
*/
- synchronize_rcu();
- goto out;
+ return 0;
}
- }
- if (dev->poller)
- input_dev_poller_start(dev->poller);
+ if (dev->open) {
+ error = dev->open(dev);
+ if (error) {
+ dev->users--;
+ handle->open--;
+ /*
+ * Make sure we are not delivering any more
+ * events through this handle.
+ */
+ synchronize_rcu();
+ return error;
+ }
+ }
- out:
- mutex_unlock(&dev->mutex);
- return retval;
+ if (dev->poller)
+ input_dev_poller_start(dev->poller);
+ }
+
+ return 0;
}
EXPORT_SYMBOL(input_open_device);
int input_flush_device(struct input_handle *handle, struct file *file)
{
struct input_dev *dev = handle->dev;
- int retval;
- retval = mutex_lock_interruptible(&dev->mutex);
- if (retval)
- return retval;
-
- if (dev->flush)
- retval = dev->flush(dev, file);
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ if (dev->flush)
+ return dev->flush(dev, file);
+ }
- mutex_unlock(&dev->mutex);
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_flush_device);
@@ -667,7 +642,7 @@ void input_close_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
__input_release_device(handle);
@@ -688,8 +663,6 @@ void input_close_device(struct input_handle *handle)
*/
synchronize_rcu();
}
-
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_close_device);
@@ -726,11 +699,10 @@ static void input_disconnect_device(struct input_dev *dev)
* not to protect access to dev->going_away but rather to ensure
* that there are no threads in the middle of input_open_device()
*/
- mutex_lock(&dev->mutex);
- dev->going_away = true;
- mutex_unlock(&dev->mutex);
+ scoped_guard(mutex, &dev->mutex)
+ dev->going_away = true;
- spin_lock_irq(&dev->event_lock);
+ guard(spinlock_irq)(&dev->event_lock);
/*
* Simulate keyup events for all pressed keys so that handlers
@@ -743,8 +715,6 @@ static void input_disconnect_device(struct input_dev *dev)
list_for_each_entry(handle, &dev->h_list, d_node)
handle->open = 0;
-
- spin_unlock_irq(&dev->event_lock);
}
/**
@@ -901,14 +871,9 @@ static int input_default_setkeycode(struct input_dev *dev,
*/
int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
{
- unsigned long flags;
- int retval;
+ guard(spinlock_irqsave)(&dev->event_lock);
- spin_lock_irqsave(&dev->event_lock, flags);
- retval = dev->getkeycode(dev, ke);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return retval;
+ return dev->getkeycode(dev, ke);
}
EXPORT_SYMBOL(input_get_keycode);
@@ -923,18 +888,17 @@ EXPORT_SYMBOL(input_get_keycode);
int input_set_keycode(struct input_dev *dev,
const struct input_keymap_entry *ke)
{
- unsigned long flags;
unsigned int old_keycode;
- int retval;
+ int error;
if (ke->keycode > KEY_MAX)
return -EINVAL;
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
- retval = dev->setkeycode(dev, ke, &old_keycode);
- if (retval)
- goto out;
+ error = dev->setkeycode(dev, ke, &old_keycode);
+ if (error)
+ return error;
/* Make sure KEY_RESERVED did not get enabled. */
__clear_bit(KEY_RESERVED, dev->keybit);
@@ -962,10 +926,7 @@ int input_set_keycode(struct input_dev *dev,
EV_SYN, SYN_REPORT, 1);
}
- out:
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_set_keycode);
@@ -1799,26 +1760,21 @@ static void input_dev_toggle(struct input_dev *dev, bool activate)
*/
void input_reset_device(struct input_dev *dev)
{
- unsigned long flags;
-
- mutex_lock(&dev->mutex);
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(mutex)(&dev->mutex);
+ guard(spinlock_irqsave)(&dev->event_lock);
input_dev_toggle(dev, true);
if (input_dev_release_keys(dev))
input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_reset_device);
static int input_inhibit_device(struct input_dev *dev)
{
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
if (dev->inhibited)
- goto out;
+ return 0;
if (dev->users) {
if (dev->close)
@@ -1827,54 +1783,50 @@ static int input_inhibit_device(struct input_dev *dev)
input_dev_poller_stop(dev->poller);
}
- spin_lock_irq(&dev->event_lock);
- input_mt_release_slots(dev);
- input_dev_release_keys(dev);
- input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
- input_dev_toggle(dev, false);
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ input_mt_release_slots(dev);
+ input_dev_release_keys(dev);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
+ input_dev_toggle(dev, false);
+ }
dev->inhibited = true;
-out:
- mutex_unlock(&dev->mutex);
return 0;
}
static int input_uninhibit_device(struct input_dev *dev)
{
- int ret = 0;
+ int error;
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
if (!dev->inhibited)
- goto out;
+ return 0;
if (dev->users) {
if (dev->open) {
- ret = dev->open(dev);
- if (ret)
- goto out;
+ error = dev->open(dev);
+ if (error)
+ return error;
}
if (dev->poller)
input_dev_poller_start(dev->poller);
}
dev->inhibited = false;
- spin_lock_irq(&dev->event_lock);
- input_dev_toggle(dev, true);
- spin_unlock_irq(&dev->event_lock);
-out:
- mutex_unlock(&dev->mutex);
- return ret;
+ scoped_guard(spinlock_irq, &dev->event_lock)
+ input_dev_toggle(dev, true);
+
+ return 0;
}
static int input_dev_suspend(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/*
* Keys that are pressed now are unlikely to be
@@ -1886,8 +1838,6 @@ static int input_dev_suspend(struct device *dev)
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -1895,13 +1845,11 @@ static int input_dev_resume(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/* Restore state of LEDs and sounds, if any were active. */
input_dev_toggle(input_dev, true);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -1909,7 +1857,7 @@ static int input_dev_freeze(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/*
* Keys that are pressed now are unlikely to be
@@ -1918,8 +1866,6 @@ static int input_dev_freeze(struct device *dev)
if (input_dev_release_keys(input_dev))
input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -1927,13 +1873,11 @@ static int input_dev_poweroff(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -2274,18 +2218,16 @@ static void __input_unregister_device(struct input_dev *dev)
input_disconnect_device(dev);
- mutex_lock(&input_mutex);
-
- list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
- handle->handler->disconnect(handle);
- WARN_ON(!list_empty(&dev->h_list));
+ scoped_guard(mutex, &input_mutex) {
+ list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
+ handle->handler->disconnect(handle);
+ WARN_ON(!list_empty(&dev->h_list));
- del_timer_sync(&dev->timer);
- list_del_init(&dev->node);
+ del_timer_sync(&dev->timer);
+ list_del_init(&dev->node);
- input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
+ input_wakeup_procfs_readers();
+ }
device_del(&dev->dev);
}
@@ -2308,9 +2250,8 @@ static void devm_input_device_unregister(struct device *dev, void *res)
static void input_repeat_key(struct timer_list *t)
{
struct input_dev *dev = from_timer(dev, t, timer);
- unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
if (!dev->inhibited &&
test_bit(dev->repeat_key, dev->key) &&
@@ -2324,8 +2265,6 @@ static void input_repeat_key(struct timer_list *t)
mod_timer(&dev->timer, jiffies +
msecs_to_jiffies(dev->rep[REP_PERIOD]));
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
/**
@@ -2370,10 +2309,10 @@ static int input_device_tune_vals(struct input_dev *dev)
if (!vals)
return -ENOMEM;
- spin_lock_irq(&dev->event_lock);
- dev->max_vals = max_vals;
- swap(dev->vals, vals);
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ dev->max_vals = max_vals;
+ swap(dev->vals, vals);
+ }
/* Because of swap() above, this frees the old vals memory */
kfree(vals);
@@ -2465,18 +2404,15 @@ int input_register_device(struct input_dev *dev)
path ? path : "N/A");
kfree(path);
- error = mutex_lock_interruptible(&input_mutex);
- if (error)
- goto err_device_del;
+ error = -EINTR;
+ scoped_cond_guard(mutex_intr, goto err_device_del, &input_mutex) {
+ list_add_tail(&dev->node, &input_dev_list);
- list_add_tail(&dev->node, &input_dev_list);
+ list_for_each_entry(handler, &input_handler_list, node)
+ input_attach_handler(dev, handler);
- list_for_each_entry(handler, &input_handler_list, node)
- input_attach_handler(dev, handler);
-
- input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
+ input_wakeup_procfs_readers();
+ }
if (dev->devres_managed) {
dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
@@ -2556,20 +2492,17 @@ int input_register_handler(struct input_handler *handler)
if (error)
return error;
- INIT_LIST_HEAD(&handler->h_list);
+ scoped_cond_guard(mutex_intr, return -EINTR, &input_mutex) {
+ INIT_LIST_HEAD(&handler->h_list);
- error = mutex_lock_interruptible(&input_mutex);
- if (error)
- return error;
-
- list_add_tail(&handler->node, &input_handler_list);
+ list_add_tail(&handler->node, &input_handler_list);
- list_for_each_entry(dev, &input_dev_list, node)
- input_attach_handler(dev, handler);
+ list_for_each_entry(dev, &input_dev_list, node)
+ input_attach_handler(dev, handler);
- input_wakeup_procfs_readers();
+ input_wakeup_procfs_readers();
+ }
- mutex_unlock(&input_mutex);
return 0;
}
EXPORT_SYMBOL(input_register_handler);
@@ -2585,7 +2518,7 @@ void input_unregister_handler(struct input_handler *handler)
{
struct input_handle *handle, *next;
- mutex_lock(&input_mutex);
+ guard(mutex)(&input_mutex);
list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
handler->disconnect(handle);
@@ -2594,8 +2527,6 @@ void input_unregister_handler(struct input_handler *handler)
list_del_init(&handler->node);
input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
}
EXPORT_SYMBOL(input_unregister_handler);
@@ -2615,19 +2546,17 @@ int input_handler_for_each_handle(struct input_handler *handler, void *data,
int (*fn)(struct input_handle *, void *))
{
struct input_handle *handle;
- int retval = 0;
+ int retval;
- rcu_read_lock();
+ guard(rcu)();
list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
retval = fn(handle, data);
if (retval)
- break;
+ return retval;
}
- rcu_read_unlock();
-
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_handler_for_each_handle);
@@ -2715,27 +2644,22 @@ int input_register_handle(struct input_handle *handle)
{
struct input_handler *handler = handle->handler;
struct input_dev *dev = handle->dev;
- int error;
input_handle_setup_event_handler(handle);
/*
* We take dev->mutex here to prevent race with
* input_release_device().
*/
- error = mutex_lock_interruptible(&dev->mutex);
- if (error)
- return error;
-
- /*
- * Filters go to the head of the list, normal handlers
- * to the tail.
- */
- if (handler->filter)
- list_add_rcu(&handle->d_node, &dev->h_list);
- else
- list_add_tail_rcu(&handle->d_node, &dev->h_list);
-
- mutex_unlock(&dev->mutex);
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ /*
+ * Filters go to the head of the list, normal handlers
+ * to the tail.
+ */
+ if (handler->filter)
+ list_add_rcu(&handle->d_node, &dev->h_list);
+ else
+ list_add_tail_rcu(&handle->d_node, &dev->h_list);
+ }
/*
* Since we are supposed to be called from ->connect()
@@ -2771,9 +2695,8 @@ void input_unregister_handle(struct input_handle *handle)
/*
* Take dev->mutex to prevent race with input_release_device().
*/
- mutex_lock(&dev->mutex);
- list_del_rcu(&handle->d_node);
- mutex_unlock(&dev->mutex);
+ scoped_guard(mutex, &dev->mutex)
+ list_del_rcu(&handle->d_node);
synchronize_rcu();
}
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index f6e92db4d789..3a5873e5fcb3 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -14,6 +14,7 @@
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
+#include <linux/string_choices.h>
#define DRIVER_DESC "Microsoft SideWinder joystick family driver"
@@ -677,7 +678,7 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
case 48: /* Ambiguous */
if (j == 14) { /* ID length 14*3 -> FFP */
sw->type = SW_ID_FFP;
- sprintf(comment, " [AC %s]", sw_get_bits(idbuf,38,1,3) ? "off" : "on");
+ sprintf(comment, " [AC %s]", str_off_on(sw_get_bits(idbuf,38,1,3)));
} else
sw->type = SW_ID_PP;
break;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ff9bc87f2f70..8fe2a51df649 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -150,6 +150,7 @@ static const struct xpad_device {
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
{ 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ { 0x045e, 0x02a9, "Xbox 360 Wireless Receiver (Unofficial)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE },
@@ -305,6 +306,7 @@ static const struct xpad_device {
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
+ { 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -373,16 +375,19 @@ static const struct xpad_device {
{ 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
- { 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
+ { 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -514,6 +519,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
+ XPAD_XBOX360_VENDOR(0x1a86), /* QH Electronics */
XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */
@@ -530,6 +536,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
+ XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
{ }
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index ec94fcfa4cde..adf0f311996c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -89,7 +89,7 @@ static const unsigned short atkbd_set2_keycode[ATKBD_KEYMAP_SIZE] = {
0, 46, 45, 32, 18, 5, 4, 95, 0, 57, 47, 33, 20, 19, 6,183,
0, 49, 48, 35, 34, 21, 7,184, 0, 0, 50, 36, 22, 8, 9,185,
0, 51, 37, 23, 24, 11, 10, 0, 0, 52, 53, 38, 39, 25, 12, 0,
- 0, 89, 40, 0, 26, 13, 0, 0, 58, 54, 28, 27, 0, 43, 0, 85,
+ 0, 89, 40, 0, 26, 13, 0,193, 58, 54, 28, 27, 0, 43, 0, 85,
0, 86, 91, 90, 92, 0, 14, 94, 0, 79,124, 75, 71,121, 0, 0,
82, 83, 80, 76, 77, 72, 1, 69, 87, 78, 81, 74, 55, 73, 70, 99,
diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c
index 993cdbda509e..4184dd2eaeeb 100644
--- a/drivers/input/keyboard/dlink-dir685-touchkeys.c
+++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/bitops.h>
struct dir685_touchkeys {
@@ -48,7 +49,7 @@ static irqreturn_t dir685_tk_irq_thread(int irq, void *data)
changed = tk->cur_key ^ key;
for_each_set_bit(i, &changed, num_bits) {
dev_dbg(tk->dev, "key %d is %s\n", i,
- test_bit(i, &key) ? "down" : "up");
+ str_down_up(test_bit(i, &key)));
input_report_key(tk->input, tk->codes[i], test_bit(i, &key));
}
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index e26bf2956344..e19442c6f80f 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -21,6 +21,7 @@
#include <linux/platform_data/lm8323.h>
#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/* Commands to send to the chip. */
#define LM8323_CMD_READ_ID 0x80 /* Read chip ID. */
@@ -269,7 +270,7 @@ static void process_keys(struct lm8323_chip *lm)
unsigned short keycode = lm->keymap[key];
dev_vdbg(&lm->client->dev, "key 0x%02x %s\n",
- key, isdown ? "down" : "up");
+ key, str_down_up(isdown));
if (lm->kp_enabled) {
input_event(lm->idev, EV_MSC, MSC_SCAN, key);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 6a852c76331b..13d135257e06 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -917,6 +917,18 @@ config INPUT_HISI_POWERKEY
To compile this driver as a module, choose M here: the
module will be called hisi_powerkey.
+config INPUT_QNAP_MCU
+ tristate "Input Support for QNAP MCU controllers"
+ depends on MFD_QNAP_MCU
+ help
+ This option enables support for input elements available on
+ embedded controllers used in QNAP NAS devices.
+
+ This includes a polled power-button as well as a beeper.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qnap-mcu-input.
+
config INPUT_RAVE_SP_PWRBUTTON
tristate "RAVE SP Power button Driver"
depends on RAVE_SP_CORE
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 4f7f736831ba..6d91804d0a6f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o
obj-$(CONFIG_INPUT_PWM_VIBRA) += pwm-vibra.o
+obj-$(CONFIG_INPUT_QNAP_MCU) += qnap-mcu-input.o
obj-$(CONFIG_INPUT_RAVE_SP_PWRBUTTON) += rave-sp-pwrbutton.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o
diff --git a/drivers/input/misc/ideapad_slidebar.c b/drivers/input/misc/ideapad_slidebar.c
index f6e5fc807b4d..ab2e0a401904 100644
--- a/drivers/input/misc/ideapad_slidebar.c
+++ b/drivers/input/misc/ideapad_slidebar.c
@@ -121,7 +121,7 @@ static void slidebar_mode_set(u8 mode)
}
static bool slidebar_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
static bool extended = false;
@@ -219,7 +219,7 @@ static int __init ideapad_probe(struct platform_device* pdev)
input_set_capability(slidebar_input_dev, EV_ABS, ABS_X);
input_set_abs_params(slidebar_input_dev, ABS_X, 0, 0xff, 0, 0);
- err = i8042_install_filter(slidebar_i8042_filter);
+ err = i8042_install_filter(slidebar_i8042_filter, NULL);
if (err) {
dev_err(&pdev->dev,
"Failed to install i8042 filter: %d\n", err);
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 0e646f1b257b..cdb9be737e48 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/workqueue.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/max77693.h>
@@ -94,7 +95,7 @@ static int max77843_haptic_bias(struct max77693_haptic *haptic, bool on)
on << MAINCTRL1_BIASEN_SHIFT);
if (error) {
dev_err(haptic->dev, "failed to %s bias: %d\n",
- on ? "enable" : "disable", error);
+ str_enable_disable(on), error);
return error;
}
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 08412239b8e6..0c661140fb88 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -38,6 +38,8 @@
#define MMA8450_CTRL_REG1 0x38
#define MMA8450_CTRL_REG2 0x39
+#define MMA8450_ID 0xc6
+#define MMA8450_WHO_AM_I 0x0f
static int mma8450_read(struct i2c_client *c, unsigned int off)
{
@@ -148,8 +150,20 @@ static void mma8450_close(struct input_dev *input)
*/
static int mma8450_probe(struct i2c_client *c)
{
+ struct i2c_adapter *adapter = c->adapter;
struct input_dev *input;
- int err;
+ int err, client_id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA))
+ return dev_err_probe(&c->dev, -EINVAL,
+ "I2C adapter doesn't support SMBUS BYTE");
+
+ client_id = i2c_smbus_read_byte_data(c, MMA8450_WHO_AM_I);
+ if (client_id != MMA8450_ID)
+ return dev_err_probe(&c->dev, -EINVAL,
+ "unexpected chip ID 0x%x (vs 0x%x)\n",
+ client_id, MMA8450_ID);
input = devm_input_allocate_device(&c->dev);
if (!input)
diff --git a/drivers/input/misc/nxp-bbnsm-pwrkey.c b/drivers/input/misc/nxp-bbnsm-pwrkey.c
index eb4173f9c820..7ba8d166d68c 100644
--- a/drivers/input/misc/nxp-bbnsm-pwrkey.c
+++ b/drivers/input/misc/nxp-bbnsm-pwrkey.c
@@ -187,6 +187,12 @@ static int bbnsm_pwrkey_probe(struct platform_device *pdev)
return 0;
}
+static void bbnsm_pwrkey_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+}
+
static int __maybe_unused bbnsm_pwrkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -223,6 +229,8 @@ static struct platform_driver bbnsm_pwrkey_driver = {
.of_match_table = bbnsm_pwrkey_ids,
},
.probe = bbnsm_pwrkey_probe,
+ .remove = bbnsm_pwrkey_remove,
+
};
module_platform_driver(bbnsm_pwrkey_driver);
diff --git a/drivers/input/misc/qnap-mcu-input.c b/drivers/input/misc/qnap-mcu-input.c
new file mode 100644
index 000000000000..76e62f0816c1
--- /dev/null
+++ b/drivers/input/misc/qnap-mcu-input.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Driver for input events on QNAP-MCUs
+ *
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/input.h>
+#include <linux/mfd/qnap-mcu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <uapi/linux/input-event-codes.h>
+
+/*
+ * The power-key needs to be pressed for a while to create an event,
+ * so there is no use for overly frequent polling.
+ */
+#define POLL_INTERVAL 500
+
+struct qnap_mcu_input_dev {
+ struct input_dev *input;
+ struct qnap_mcu *mcu;
+ struct device *dev;
+
+ struct work_struct beep_work;
+ int beep_type;
+};
+
+static void qnap_mcu_input_poll(struct input_dev *input)
+{
+ struct qnap_mcu_input_dev *idev = input_get_drvdata(input);
+ static const u8 cmd[] = { '@', 'C', 'V' };
+ u8 reply[4];
+ int state, ret;
+
+ /* poll the power button */
+ ret = qnap_mcu_exec(idev->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return;
+
+ /* First bytes must mirror the sent command */
+ if (memcmp(cmd, reply, sizeof(cmd))) {
+ dev_err(idev->dev, "malformed data received\n");
+ return;
+ }
+
+ state = reply[3] - 0x30;
+ input_event(input, EV_KEY, KEY_POWER, state);
+ input_sync(input);
+}
+
+static void qnap_mcu_input_beeper_work(struct work_struct *work)
+{
+ struct qnap_mcu_input_dev *idev =
+ container_of(work, struct qnap_mcu_input_dev, beep_work);
+ const u8 cmd[] = { '@', 'C', (idev->beep_type == SND_TONE) ? '3' : '2' };
+
+ qnap_mcu_exec_with_ack(idev->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_input_event(struct input_dev *input, unsigned int type,
+ unsigned int code, int value)
+{
+ struct qnap_mcu_input_dev *idev = input_get_drvdata(input);
+
+ if (type != EV_SND || (code != SND_BELL && code != SND_TONE))
+ return -EOPNOTSUPP;
+
+ if (value < 0)
+ return -EINVAL;
+
+ /* beep runtime is determined by the MCU */
+ if (value == 0)
+ return 0;
+
+ /* Schedule work to actually turn the beeper on */
+ idev->beep_type = code;
+ schedule_work(&idev->beep_work);
+
+ return 0;
+}
+
+static void qnap_mcu_input_close(struct input_dev *input)
+{
+ struct qnap_mcu_input_dev *idev = input_get_drvdata(input);
+
+ cancel_work_sync(&idev->beep_work);
+}
+
+static int qnap_mcu_input_probe(struct platform_device *pdev)
+{
+ struct qnap_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
+ struct qnap_mcu_input_dev *idev;
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ int ret;
+
+ idev = devm_kzalloc(dev, sizeof(*idev), GFP_KERNEL);
+ if (!idev)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return dev_err_probe(dev, -ENOMEM, "no memory for input device\n");
+
+ idev->input = input;
+ idev->dev = dev;
+ idev->mcu = mcu;
+
+ input_set_drvdata(input, idev);
+
+ input->name = "qnap-mcu";
+ input->phys = "qnap-mcu-input/input0";
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+ input->event = qnap_mcu_input_event;
+ input->close = qnap_mcu_input_close;
+
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ input_set_capability(input, EV_SND, SND_BELL);
+ input_set_capability(input, EV_SND, SND_TONE);
+
+ INIT_WORK(&idev->beep_work, qnap_mcu_input_beeper_work);
+
+ ret = input_setup_polling(input, qnap_mcu_input_poll);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to set up polling\n");
+
+ input_set_poll_interval(input, POLL_INTERVAL);
+
+ ret = input_register_device(input);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to register input device\n");
+
+ return 0;
+}
+
+static struct platform_driver qnap_mcu_input_driver = {
+ .probe = qnap_mcu_input_probe,
+ .driver = {
+ .name = "qnap-mcu-input",
+ },
+};
+module_platform_driver(qnap_mcu_input_driver);
+
+MODULE_ALIAS("platform:qnap-mcu-input");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("QNAP MCU input driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 3666ba6d1f30..9711f5c7c78a 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#define MAX_MAGNITUDE_SHIFT 16
@@ -44,7 +45,7 @@ static int regulator_haptic_toggle(struct regulator_haptic *haptic, bool on)
if (error) {
dev_err(haptic->dev,
"failed to switch regulator %s: %d\n",
- on ? "on" : "off", error);
+ str_on_off(on), error);
return error;
}
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index a841883660fb..fee1796da3d0 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/string_choices.h>
#include <linux/input.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>
@@ -199,7 +200,7 @@ static int elan_set_power(struct elan_tp_data *data, bool on)
} while (--repeat > 0);
dev_err(&data->client->dev, "failed to set power %s: %d\n",
- on ? "on" : "off", error);
+ str_on_off(on), error);
return error;
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2735f86c23cc..aba57abe6978 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -665,23 +665,50 @@ static void synaptics_pt_stop(struct serio *serio)
priv->pt_port = NULL;
}
+static int synaptics_pt_open(struct serio *serio)
+{
+ struct psmouse *parent = psmouse_from_serio(serio->parent);
+ struct synaptics_data *priv = parent->private;
+
+ guard(serio_pause_rx)(parent->ps2dev.serio);
+ priv->pt_port_open = true;
+
+ return 0;
+}
+
+static void synaptics_pt_close(struct serio *serio)
+{
+ struct psmouse *parent = psmouse_from_serio(serio->parent);
+ struct synaptics_data *priv = parent->private;
+
+ guard(serio_pause_rx)(parent->ps2dev.serio);
+ priv->pt_port_open = false;
+}
+
static int synaptics_is_pt_packet(u8 *buf)
{
return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
}
-static void synaptics_pass_pt_packet(struct serio *ptport, u8 *packet)
+static void synaptics_pass_pt_packet(struct synaptics_data *priv, u8 *packet)
{
- struct psmouse *child = psmouse_from_serio(ptport);
+ struct serio *ptport;
- if (child && child->state == PSMOUSE_ACTIVATED) {
- serio_interrupt(ptport, packet[1], 0);
- serio_interrupt(ptport, packet[4], 0);
- serio_interrupt(ptport, packet[5], 0);
- if (child->pktsize == 4)
- serio_interrupt(ptport, packet[2], 0);
- } else {
- serio_interrupt(ptport, packet[1], 0);
+ ptport = priv->pt_port;
+ if (!ptport)
+ return;
+
+ serio_interrupt(ptport, packet[1], 0);
+
+ if (priv->pt_port_open) {
+ struct psmouse *child = psmouse_from_serio(ptport);
+
+ if (child->state == PSMOUSE_ACTIVATED) {
+ serio_interrupt(ptport, packet[4], 0);
+ serio_interrupt(ptport, packet[5], 0);
+ if (child->pktsize == 4)
+ serio_interrupt(ptport, packet[2], 0);
+ }
}
}
@@ -720,6 +747,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
serio->write = synaptics_pt_write;
serio->start = synaptics_pt_start;
serio->stop = synaptics_pt_stop;
+ serio->open = synaptics_pt_open;
+ serio->close = synaptics_pt_close;
serio->parent = psmouse->ps2dev.serio;
psmouse->pt_activate = synaptics_pt_activate;
@@ -1216,11 +1245,10 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
if (SYN_CAP_PASS_THROUGH(priv->info.capabilities) &&
synaptics_is_pt_packet(psmouse->packet)) {
- if (priv->pt_port)
- synaptics_pass_pt_packet(priv->pt_port,
- psmouse->packet);
- } else
+ synaptics_pass_pt_packet(priv, psmouse->packet);
+ } else {
synaptics_process_packet(psmouse);
+ }
return PSMOUSE_FULL_PACKET;
}
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 899aee598632..3853165b6b3a 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -188,6 +188,7 @@ struct synaptics_data {
bool disable_gesture; /* disable gestures */
struct serio *pt_port; /* Pass-through serio port */
+ bool pt_port_open;
/*
* Last received Advanced Gesture Mode (AGM) packet. An AGM packet
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 509330a27880..cab5a4c5baf5 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -179,8 +179,8 @@ static struct platform_device *i8042_platform_device;
static struct notifier_block i8042_kbd_bind_notifier_block;
static bool i8042_handle_data(int irq);
-static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
- struct serio *serio);
+static i8042_filter_t i8042_platform_filter;
+static void *i8042_platform_filter_context;
void i8042_lock_chip(void)
{
@@ -194,8 +194,7 @@ void i8042_unlock_chip(void)
}
EXPORT_SYMBOL(i8042_unlock_chip);
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+int i8042_install_filter(i8042_filter_t filter, void *context)
{
guard(spinlock_irqsave)(&i8042_lock);
@@ -203,12 +202,12 @@ int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
return -EBUSY;
i8042_platform_filter = filter;
+ i8042_platform_filter_context = context;
return 0;
}
EXPORT_SYMBOL(i8042_install_filter);
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *port))
+int i8042_remove_filter(i8042_filter_t filter)
{
guard(spinlock_irqsave)(&i8042_lock);
@@ -216,6 +215,7 @@ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
return -EINVAL;
i8042_platform_filter = NULL;
+ i8042_platform_filter_context = NULL;
return 0;
}
EXPORT_SYMBOL(i8042_remove_filter);
@@ -480,7 +480,10 @@ static bool i8042_filter(unsigned char data, unsigned char str,
}
}
- if (i8042_platform_filter && i8042_platform_filter(data, str, serio)) {
+ if (!i8042_platform_filter)
+ return false;
+
+ if (i8042_platform_filter(data, str, serio, i8042_platform_filter_context)) {
dbg("Filtered out by platform filter\n");
return true;
}
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index f4e950920e84..eb3cc2befcdf 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -23,6 +23,7 @@
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/bitops.h>
#include <linux/input/mt.h>
@@ -102,7 +103,7 @@ static irqreturn_t egalax_ts_interrupt(int irq, void *dev_id)
input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, down);
dev_dbg(&client->dev, "%s id:%d x:%d y:%d z:%d",
- down ? "down" : "up", id, x, y, z);
+ str_down_up(down), id, x, y, z);
if (down) {
input_report_abs(input_dev, ABS_MT_POSITION_X, x);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 47c46e4b739e..ec1b5e32b972 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -367,6 +367,18 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
'arm-smmu.disable_bypass' will continue to override this
config.
+config ARM_SMMU_MMU_500_CPRE_ERRATA
+ bool "Enable errata workaround for CPRE in SMMU reset path"
+ depends on ARM_SMMU
+ default y
+ help
+ Say Y here (by default) to apply workaround to disable
+ MMU-500's next-page prefetcher for sake of 4 known errata.
+
+ Say N here only when it is sure that any errata related to
+ prefetch enablement are not applicable on the platform.
+ Refer silicon-errata.rst for info on errata IDs.
+
config ARM_SMMU_QCOM
def_tristate y
depends on ARM_SMMU && ARCH_QCOM
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 1bef5d55b2f9..68debf5ee2d7 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -16,7 +16,6 @@ irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
irqreturn_t amd_iommu_int_handler(int irq, void *data);
-void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
u8 cntrl_intr, u8 cntrl_log,
u32 status_run_mask, u32 status_overflow_mask);
@@ -41,13 +40,13 @@ void amd_iommu_disable(void);
int amd_iommu_reenable(int mode);
int amd_iommu_enable_faulting(unsigned int cpu);
extern int amd_iommu_guest_ir;
-extern enum io_pgtable_fmt amd_iommu_pgtable;
+extern enum protection_domain_mode amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
extern unsigned long amd_iommu_pgsize_bitmap;
/* Protection domain ops */
void amd_iommu_init_identity_domain(void);
-struct protection_domain *protection_domain_alloc(unsigned int type, int nid);
+struct protection_domain *protection_domain_alloc(void);
void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm);
@@ -89,7 +88,6 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
*/
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
-void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
@@ -184,3 +182,6 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
#endif
+
+struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
+struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index fdb0357e0bb9..0bbda60d3cdc 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -220,6 +220,8 @@
#define DEV_ENTRY_EX 0x67
#define DEV_ENTRY_SYSMGT1 0x68
#define DEV_ENTRY_SYSMGT2 0x69
+#define DTE_DATA1_SYSMGT_MASK GENMASK_ULL(41, 40)
+
#define DEV_ENTRY_IRQ_TBL_EN 0x80
#define DEV_ENTRY_INIT_PASS 0xb8
#define DEV_ENTRY_EINT_PASS 0xb9
@@ -407,8 +409,7 @@
#define DTE_FLAG_HAD (3ULL << 7)
#define DTE_FLAG_GIOV BIT_ULL(54)
#define DTE_FLAG_GV BIT_ULL(55)
-#define DTE_GLX_SHIFT (56)
-#define DTE_GLX_MASK (3)
+#define DTE_GLX GENMASK_ULL(57, 56)
#define DTE_FLAG_IR BIT_ULL(61)
#define DTE_FLAG_IW BIT_ULL(62)
@@ -416,18 +417,18 @@
#define DTE_FLAG_MASK (0x3ffULL << 32)
#define DEV_DOMID_MASK 0xffffULL
-#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
-#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
-
-#define DTE_GCR3_SHIFT_A 58
-#define DTE_GCR3_SHIFT_B 16
-#define DTE_GCR3_SHIFT_C 43
+#define DTE_GCR3_14_12 GENMASK_ULL(60, 58)
+#define DTE_GCR3_30_15 GENMASK_ULL(31, 16)
+#define DTE_GCR3_51_31 GENMASK_ULL(63, 43)
#define DTE_GPT_LEVEL_SHIFT 54
+#define DTE_GPT_LEVEL_MASK GENMASK_ULL(55, 54)
#define GCR3_VALID 0x01ULL
+/* DTE[128:179] | DTE[184:191] */
+#define DTE_DATA2_INTR_MASK ~GENMASK_ULL(55, 52)
+
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD)
@@ -468,7 +469,7 @@ extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
do { \
if (amd_iommu_dump) \
- pr_info("AMD-Vi: " format, ## arg); \
+ pr_info(format, ## arg); \
} while(0);
/* global flag if IOMMUs cache non-present entries */
@@ -516,6 +517,9 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \
list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list)
+#define for_each_ivhd_dte_flags(entry) \
+ list_for_each_entry((entry), &amd_ivhd_dev_flags_list, list)
+
struct amd_iommu;
struct iommu_domain;
struct irq_domain;
@@ -837,6 +841,7 @@ struct devid_map {
struct iommu_dev_data {
/*Protect against attach/detach races */
struct mutex mutex;
+ spinlock_t dte_lock; /* DTE lock for 256-bit access */
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
@@ -881,7 +886,21 @@ extern struct list_head amd_iommu_list;
* Structure defining one entry in the device table
*/
struct dev_table_entry {
- u64 data[4];
+ union {
+ u64 data[4];
+ u128 data128[2];
+ };
+};
+
+/*
+ * Structure to sture persistent DTE flags from IVHD
+ */
+struct ivhd_dte_flags {
+ struct list_head list;
+ u16 segid;
+ u16 devid_first;
+ u16 devid_last;
+ struct dev_table_entry dte;
};
/*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 0e0a531042ac..c5cd92edada0 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -152,7 +152,7 @@ struct ivmd_header {
bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
-enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
+enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1;
/* Guest page table level */
int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
@@ -174,8 +174,8 @@ bool amd_iommu_snp_en;
EXPORT_SYMBOL(amd_iommu_snp_en);
LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
-LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
- system */
+LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */
+LIST_HEAD(amd_ivhd_dev_flags_list); /* list of all IVHD device entry settings */
/* Number of IOMMUs present in the system */
static int amd_iommus_present;
@@ -984,36 +984,12 @@ static void iommu_enable_gt(struct amd_iommu *iommu)
}
/* sets a specific bit in the device table entry. */
-static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
- u16 devid, u8 bit)
+static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- dev_table[devid].data[i] |= (1UL << _bit);
-}
-
-static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
-{
- struct dev_table_entry *dev_table = get_dev_table(iommu);
-
- return __set_dev_entry_bit(dev_table, devid, bit);
-}
-
-static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
- u16 devid, u8 bit)
-{
- int i = (bit >> 6) & 0x03;
- int _bit = bit & 0x3f;
-
- return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
-}
-
-static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
-{
- struct dev_table_entry *dev_table = get_dev_table(iommu);
-
- return __get_dev_entry_bit(dev_table, devid, bit);
+ dte->data[i] |= (1UL << _bit);
}
static bool __copy_device_table(struct amd_iommu *iommu)
@@ -1081,11 +1057,9 @@ static bool __copy_device_table(struct amd_iommu *iommu)
}
/* If gcr3 table existed, mask it out */
if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
- tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
- tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+ tmp = (DTE_GCR3_30_15 | DTE_GCR3_51_31);
pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
- tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
- tmp |= DTE_FLAG_GV;
+ tmp = (DTE_GCR3_14_12 | DTE_FLAG_GV);
pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
}
}
@@ -1136,42 +1110,107 @@ static bool copy_device_table(void)
return true;
}
-void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
+struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid)
{
- int sysmgt;
+ struct ivhd_dte_flags *e;
+ unsigned int best_len = UINT_MAX;
+ struct dev_table_entry *dte = NULL;
- sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
- (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
+ for_each_ivhd_dte_flags(e) {
+ /*
+ * Need to go through the whole list to find the smallest range,
+ * which contains the devid.
+ */
+ if ((e->segid == segid) &&
+ (e->devid_first <= devid) && (devid <= e->devid_last)) {
+ unsigned int len = e->devid_last - e->devid_first;
+
+ if (len < best_len) {
+ dte = &(e->dte);
+ best_len = len;
+ }
+ }
+ }
+ return dte;
+}
+
+static bool search_ivhd_dte_flags(u16 segid, u16 first, u16 last)
+{
+ struct ivhd_dte_flags *e;
- if (sysmgt == 0x01)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
+ for_each_ivhd_dte_flags(e) {
+ if ((e->segid == segid) &&
+ (e->devid_first == first) &&
+ (e->devid_last == last))
+ return true;
+ }
+ return false;
}
/*
* This function takes the device specific flags read from the ACPI
* table and sets up the device table entry with that information
*/
-static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
- u16 devid, u32 flags, u32 ext_flags)
+static void __init
+set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last,
+ u32 flags, u32 ext_flags)
{
- if (flags & ACPI_DEVFLAG_INITPASS)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
- if (flags & ACPI_DEVFLAG_EXTINT)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
- if (flags & ACPI_DEVFLAG_NMI)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
- if (flags & ACPI_DEVFLAG_SYSMGT1)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
- if (flags & ACPI_DEVFLAG_SYSMGT2)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
- if (flags & ACPI_DEVFLAG_LINT0)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
- if (flags & ACPI_DEVFLAG_LINT1)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
+ int i;
+ struct dev_table_entry dte = {};
- amd_iommu_apply_erratum_63(iommu, devid);
+ /* Parse IVHD DTE setting flags and store information */
+ if (flags) {
+ struct ivhd_dte_flags *d;
- amd_iommu_set_rlookup_table(iommu, devid);
+ if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last))
+ return;
+
+ d = kzalloc(sizeof(struct ivhd_dte_flags), GFP_KERNEL);
+ if (!d)
+ return;
+
+ pr_debug("%s: devid range %#x:%#x\n", __func__, first, last);
+
+ if (flags & ACPI_DEVFLAG_INITPASS)
+ set_dte_bit(&dte, DEV_ENTRY_INIT_PASS);
+ if (flags & ACPI_DEVFLAG_EXTINT)
+ set_dte_bit(&dte, DEV_ENTRY_EINT_PASS);
+ if (flags & ACPI_DEVFLAG_NMI)
+ set_dte_bit(&dte, DEV_ENTRY_NMI_PASS);
+ if (flags & ACPI_DEVFLAG_SYSMGT1)
+ set_dte_bit(&dte, DEV_ENTRY_SYSMGT1);
+ if (flags & ACPI_DEVFLAG_SYSMGT2)
+ set_dte_bit(&dte, DEV_ENTRY_SYSMGT2);
+ if (flags & ACPI_DEVFLAG_LINT0)
+ set_dte_bit(&dte, DEV_ENTRY_LINT0_PASS);
+ if (flags & ACPI_DEVFLAG_LINT1)
+ set_dte_bit(&dte, DEV_ENTRY_LINT1_PASS);
+
+ /* Apply erratum 63, which needs info in initial_dte */
+ if (FIELD_GET(DTE_DATA1_SYSMGT_MASK, dte.data[1]) == 0x1)
+ dte.data[0] |= DTE_FLAG_IW;
+
+ memcpy(&d->dte, &dte, sizeof(dte));
+ d->segid = iommu->pci_seg->id;
+ d->devid_first = first;
+ d->devid_last = last;
+ list_add_tail(&d->list, &amd_ivhd_dev_flags_list);
+ }
+
+ for (i = first; i <= last; i++) {
+ if (flags) {
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ memcpy(&dev_table[i], &dte, sizeof(dte));
+ }
+ amd_iommu_set_rlookup_table(iommu, i);
+ }
+}
+
+static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
+ u16 devid, u32 flags, u32 ext_flags)
+{
+ set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags);
}
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
@@ -1239,7 +1278,7 @@ static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
entry->cmd_line = cmd_line;
entry->root_devid = (entry->devid & (~0x7));
- pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
+ pr_info("%s, add hid:%s, uid:%s, rdevid:%#x\n",
entry->cmd_line ? "cmd" : "ivrs",
entry->hid, entry->uid, entry->root_devid);
@@ -1331,15 +1370,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
switch (e->type) {
case IVHD_DEV_ALL:
- DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
-
- for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
- set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
+ DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags);
+ set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0);
break;
case IVHD_DEV_SELECT:
- DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
- "flags: %02x\n",
+ DUMP_printk(" DEV_SELECT\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1350,8 +1386,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_SELECT_RANGE_START:
- DUMP_printk(" DEV_SELECT_RANGE_START\t "
- "devid: %04x:%02x:%02x.%x flags: %02x\n",
+ DUMP_printk(" DEV_SELECT_RANGE_START\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1364,8 +1399,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS:
- DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
- "flags: %02x devid_to: %02x:%02x.%x\n",
+ DUMP_printk(" DEV_ALIAS\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %02x:%02x.%x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1382,9 +1416,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS_RANGE:
- DUMP_printk(" DEV_ALIAS_RANGE\t\t "
- "devid: %04x:%02x:%02x.%x flags: %02x "
- "devid_to: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_ALIAS_RANGE\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %04x:%02x:%02x.%x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1401,8 +1433,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT:
- DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
- "flags: %02x ext: %08x\n",
+ DUMP_printk(" DEV_EXT_SELECT\t\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1414,8 +1445,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT_RANGE:
- DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
- "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
+ DUMP_printk(" DEV_EXT_SELECT_RANGE\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1428,21 +1458,18 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_RANGE_END:
- DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_RANGE_END\t\tdevid: %04x:%02x:%02x.%x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid));
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias) {
+ if (alias)
pci_seg->alias_table[dev_i] = devid_to;
- set_dev_entry_from_acpi(iommu,
- devid_to, flags, ext_flags);
- }
- set_dev_entry_from_acpi(iommu, dev_i,
- flags, ext_flags);
}
+ set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
+ set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break;
case IVHD_DEV_SPECIAL: {
u8 handle, type;
@@ -1461,11 +1488,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
else
var = "UNKNOWN";
- DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
var, (int)handle,
seg_id, PCI_BUS_NUM(devid),
PCI_SLOT(devid),
- PCI_FUNC(devid));
+ PCI_FUNC(devid),
+ e->flags);
ret = add_special_device(type, handle, &devid, false);
if (ret)
@@ -1525,11 +1553,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
}
devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
- DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
hid, uid, seg_id,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
- PCI_FUNC(devid));
+ PCI_FUNC(devid),
+ e->flags);
flags = e->flags;
@@ -1757,13 +1786,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- /*
- * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
- * GAM also requires GA mode. Therefore, we need to
- * check cmpxchg16b support before enabling it.
- */
- if (!boot_cpu_has(X86_FEATURE_CX16) ||
- ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+ /* GAM requires GA mode. */
+ if ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
case 0x11:
@@ -1773,13 +1797,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- /*
- * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
- * XT, GAM also requires GA mode. Therefore, we need to
- * check cmpxchg16b support before enabling them.
- */
- if (!boot_cpu_has(X86_FEATURE_CX16) ||
- ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
+ /* XT and GAM require GA mode. */
+ if ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
}
@@ -2145,7 +2164,7 @@ static void print_iommu_info(void)
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
- if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (amd_iommu_pgtable == PD_MODE_V2) {
pr_info("V2 page table enabled (Paging mode : %d level)\n",
amd_iommu_gpt_level);
}
@@ -2332,7 +2351,7 @@ static struct irq_chip intcapxt_controller = {
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_affinity = intcapxt_set_affinity,
.irq_set_wake = intcapxt_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_MOVE_DEFERRED,
};
static const struct irq_domain_ops intcapxt_domain_ops = {
@@ -2575,9 +2594,9 @@ static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
return;
for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
- __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
+ set_dte_bit(&dev_table[devid], DEV_ENTRY_VALID);
if (!amd_iommu_snp_en)
- __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
+ set_dte_bit(&dev_table[devid], DEV_ENTRY_TRANSLATION);
}
}
@@ -2605,8 +2624,7 @@ static void init_device_table(void)
for_each_pci_segment(pci_seg) {
for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
- __set_dev_entry_bit(pci_seg->dev_table,
- devid, DEV_ENTRY_IRQ_TBL_EN);
+ set_dte_bit(&pci_seg->dev_table[devid], DEV_ENTRY_IRQ_TBL_EN);
}
}
@@ -3033,6 +3051,11 @@ static int __init early_amd_iommu_init(void)
return -EINVAL;
}
+ if (!boot_cpu_has(X86_FEATURE_CX16)) {
+ pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
+ return -EINVAL;
+ }
+
/*
* Validate checksum here so we don't need to do it when
* we actually parse the table
@@ -3059,10 +3082,10 @@ static int __init early_amd_iommu_init(void)
FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
- if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (amd_iommu_pgtable == PD_MODE_V2) {
if (!amd_iommu_v2_pgtbl_supported()) {
pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
+ amd_iommu_pgtable = PD_MODE_V1;
}
}
@@ -3185,7 +3208,7 @@ static void iommu_snp_enable(void)
goto disable_snp;
}
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ if (amd_iommu_pgtable != PD_MODE_V1) {
pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
goto disable_snp;
}
@@ -3398,25 +3421,23 @@ static bool amd_iommu_sme_check(void)
* IOMMUs
*
****************************************************************************/
-int __init amd_iommu_detect(void)
+void __init amd_iommu_detect(void)
{
int ret;
if (no_iommu || (iommu_detected && !gart_iommu_aperture))
- return -ENODEV;
+ return;
if (!amd_iommu_sme_check())
- return -ENODEV;
+ return;
ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
if (ret)
- return ret;
+ return;
amd_iommu_detected = true;
iommu_detected = 1;
x86_init.iommu.iommu_init = amd_iommu_init;
-
- return 1;
}
/****************************************************************************
@@ -3464,9 +3485,9 @@ static int __init parse_amd_iommu_options(char *str)
} else if (strncmp(str, "force_isolation", 15) == 0) {
amd_iommu_force_isolation = true;
} else if (strncmp(str, "pgtbl_v1", 8) == 0) {
- amd_iommu_pgtable = AMD_IOMMU_V1;
+ amd_iommu_pgtable = PD_MODE_V1;
} else if (strncmp(str, "pgtbl_v2", 8) == 0) {
- amd_iommu_pgtable = AMD_IOMMU_V2;
+ amd_iommu_pgtable = PD_MODE_V2;
} else if (strncmp(str, "irtcachedis", 11) == 0) {
amd_iommu_irtcachedis = true;
} else if (strncmp(str, "nohugepages", 11) == 0) {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 16f40b8000d7..b48a72bd7b23 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -83,12 +83,142 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
static void set_dte_entry(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data);
+static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
+
+static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid);
+
/****************************************************************************
*
* Helper functions
*
****************************************************************************/
+static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val)
+{
+ /*
+ * Note:
+ * We use arch_cmpxchg128_local() because:
+ * - Need cmpxchg16b instruction mainly for 128-bit store to DTE
+ * (not necessary for cmpxchg since this function is already
+ * protected by a spin_lock for this DTE).
+ * - Neither need LOCK_PREFIX nor try loop because of the spin_lock.
+ */
+ arch_cmpxchg128_local(ptr, *ptr, val);
+}
+
+static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new)
+{
+ struct dev_table_entry old;
+
+ old.data128[1] = ptr->data128[1];
+ /*
+ * Preserve DTE_DATA2_INTR_MASK. This needs to be
+ * done here since it requires to be inside
+ * spin_lock(&dev_data->dte_lock) context.
+ */
+ new->data[2] &= ~DTE_DATA2_INTR_MASK;
+ new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK;
+
+ amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]);
+}
+
+static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new)
+{
+ amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]);
+}
+
+/*
+ * Note:
+ * IOMMU reads the entire Device Table entry in a single 256-bit transaction
+ * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver
+ * need to ensure the following:
+ * - DTE[V|GV] bit is being written last when setting.
+ * - DTE[V|GV] bit is being written first when clearing.
+ *
+ * This function is used only by code, which updates DMA translation part of the DTE.
+ * So, only consider control bits related to DMA when updating the entry.
+ */
+static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
+ struct dev_table_entry *new)
+{
+ unsigned long flags;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct dev_table_entry *ptr = &dev_table[dev_data->devid];
+
+ spin_lock_irqsave(&dev_data->dte_lock, flags);
+
+ if (!(ptr->data[0] & DTE_FLAG_V)) {
+ /* Existing DTE is not valid. */
+ write_dte_upper128(ptr, new);
+ write_dte_lower128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (!(new->data[0] & DTE_FLAG_V)) {
+ /* Existing DTE is valid. New DTE is not valid. */
+ write_dte_lower128(ptr, new);
+ write_dte_upper128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
+ /*
+ * Both DTEs are valid.
+ * Existing DTE has no guest page table.
+ */
+ write_dte_upper128(ptr, new);
+ write_dte_lower128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
+ /*
+ * Both DTEs are valid.
+ * Existing DTE has guest page table,
+ * new DTE has no guest page table,
+ */
+ write_dte_lower128(ptr, new);
+ write_dte_upper128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) !=
+ FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) {
+ /*
+ * Both DTEs are valid and have guest page table,
+ * but have different number of levels. So, we need
+ * to upadte both upper and lower 128-bit value, which
+ * require disabling and flushing.
+ */
+ struct dev_table_entry clear = {};
+
+ /* First disable DTE */
+ write_dte_lower128(ptr, &clear);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+
+ /* Then update DTE */
+ write_dte_upper128(ptr, new);
+ write_dte_lower128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else {
+ /*
+ * Both DTEs are valid and have guest page table,
+ * and same number of levels. We just need to only
+ * update the lower 128-bit. So no need to disable DTE.
+ */
+ write_dte_lower128(ptr, new);
+ }
+
+ spin_unlock_irqrestore(&dev_data->dte_lock, flags);
+}
+
+static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
+ struct dev_table_entry *dte)
+{
+ unsigned long flags;
+ struct dev_table_entry *ptr;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ ptr = &dev_table[dev_data->devid];
+
+ spin_lock_irqsave(&dev_data->dte_lock, flags);
+ dte->data128[0] = ptr->data128[0];
+ dte->data128[1] = ptr->data128[1];
+ spin_unlock_irqrestore(&dev_data->dte_lock, flags);
+}
+
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
return (pdom && (pdom->pd_mode == PD_MODE_V2));
@@ -209,6 +339,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
return NULL;
mutex_init(&dev_data->mutex);
+ spin_lock_init(&dev_data->dte_lock);
dev_data->devid = devid;
ratelimit_default_init(&dev_data->rs);
@@ -216,7 +347,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
return dev_data;
}
-static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
+struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
struct llist_node *node;
@@ -236,9 +367,11 @@ static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct dev_table_entry new;
struct amd_iommu *iommu;
- struct dev_table_entry *dev_table;
+ struct iommu_dev_data *dev_data, *alias_data;
u16 devid = pci_dev_id(pdev);
+ int ret = 0;
if (devid == alias)
return 0;
@@ -247,13 +380,27 @@ static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
if (!iommu)
return 0;
- amd_iommu_set_rlookup_table(iommu, alias);
- dev_table = get_dev_table(iommu);
- memcpy(dev_table[alias].data,
- dev_table[devid].data,
- sizeof(dev_table[alias].data));
+ /* Copy the data from pdev */
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ if (!dev_data) {
+ pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid);
+ ret = -EINVAL;
+ goto out;
+ }
+ get_dte256(iommu, dev_data, &new);
- return 0;
+ /* Setup alias */
+ alias_data = find_dev_data(iommu, alias);
+ if (!alias_data) {
+ pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias);
+ ret = -EINVAL;
+ goto out;
+ }
+ update_dte256(iommu, alias_data, &new);
+
+ amd_iommu_set_rlookup_table(iommu, alias);
+out:
+ return ret;
}
static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
@@ -526,6 +673,12 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
return -ENOMEM;
dev_data->dev = dev;
+
+ /*
+ * The dev_iommu_priv_set() needes to be called before setup_aliases.
+ * Otherwise, subsequent call to dev_iommu_priv_get() will fail.
+ */
+ dev_iommu_priv_set(dev, dev_data);
setup_aliases(iommu, dev);
/*
@@ -539,8 +692,6 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
dev_data->flags = pdev_get_caps(to_pci_dev(dev));
}
- dev_iommu_priv_set(dev, dev_data);
-
return 0;
}
@@ -571,10 +722,13 @@ static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{
int i;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct dev_table_entry dte;
+ struct iommu_dev_data *dev_data = find_dev_data(iommu, devid);
+
+ get_dte256(iommu, dev_data, &dte);
for (i = 0; i < 4; ++i)
- pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
+ pr_err("DTE[%d]: %016llx\n", i, dte.data[i]);
}
static void dump_command(unsigned long phys_addr)
@@ -1261,6 +1415,15 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
return iommu_queue_command(iommu, &cmd);
}
+static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid)
+{
+ int ret;
+
+ ret = iommu_flush_dte(iommu, devid);
+ if (!ret)
+ iommu_completion_wait(iommu);
+}
+
static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
{
u32 devid;
@@ -1603,15 +1766,6 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
domain_flush_complete(domain);
}
-void amd_iommu_domain_update(struct protection_domain *domain)
-{
- /* Update device table */
- amd_iommu_update_and_flush_device_table(domain);
-
- /* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_all(domain);
-}
-
int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
{
struct iommu_dev_data *dev_data;
@@ -1826,90 +1980,109 @@ int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
return ret;
}
+static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr,
+ struct dev_table_entry *new)
+{
+ /* All existing DTE must have V bit set */
+ new->data128[0] = DTE_FLAG_V;
+ new->data128[1] = 0;
+}
+
+/*
+ * Note:
+ * The old value for GCR3 table and GPT have been cleared from caller.
+ */
+static void set_dte_gcr3_table(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data,
+ struct dev_table_entry *target)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ u64 gcr3;
+
+ if (!gcr3_info->gcr3_tbl)
+ return;
+
+ pr_debug("%s: devid=%#x, glx=%#x, gcr3_tbl=%#llx\n",
+ __func__, dev_data->devid, gcr3_info->glx,
+ (unsigned long long)gcr3_info->gcr3_tbl);
+
+ gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
+
+ target->data[0] |= DTE_FLAG_GV |
+ FIELD_PREP(DTE_GLX, gcr3_info->glx) |
+ FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12);
+ if (pdom_is_v2_pgtbl_mode(dev_data->domain))
+ target->data[0] |= DTE_FLAG_GIOV;
+
+ target->data[1] |= FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) |
+ FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31);
+
+ /* Guest page table can only support 4 and 5 levels */
+ if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL)
+ target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL);
+ else
+ target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL);
+}
+
static void set_dte_entry(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data)
{
- u64 pte_root = 0;
- u64 flags = 0;
- u32 old_domid;
- u16 devid = dev_data->devid;
u16 domid;
+ u32 old_domid;
+ struct dev_table_entry *initial_dte;
+ struct dev_table_entry new = {};
struct protection_domain *domain = dev_data->domain;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
if (gcr3_info && gcr3_info->gcr3_tbl)
domid = dev_data->gcr3_info.domid;
else
domid = domain->id;
+ make_clear_dte(dev_data, dte, &new);
+
if (domain->iop.mode != PAGE_MODE_NONE)
- pte_root = iommu_virt_to_phys(domain->iop.root);
+ new.data[0] = iommu_virt_to_phys(domain->iop.root);
- pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
+ new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+ new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
/*
- * When SNP is enabled, Only set TV bit when IOMMU
- * page translation is in use.
+ * When SNP is enabled, we can only support TV=1 with non-zero domain ID.
+ * This is prevented by the SNP-enable and IOMMU_DOMAIN_IDENTITY check in
+ * do_iommu_domain_alloc().
*/
- if (!amd_iommu_snp_en || (domid != 0))
- pte_root |= DTE_FLAG_TV;
-
- flags = dev_table[devid].data[1];
-
- if (dev_data->ats_enabled)
- flags |= DTE_FLAG_IOTLB;
+ WARN_ON(amd_iommu_snp_en && (domid == 0));
+ new.data[0] |= DTE_FLAG_TV;
if (dev_data->ppr)
- pte_root |= 1ULL << DEV_ENTRY_PPR;
+ new.data[0] |= 1ULL << DEV_ENTRY_PPR;
if (domain->dirty_tracking)
- pte_root |= DTE_FLAG_HAD;
-
- if (gcr3_info && gcr3_info->gcr3_tbl) {
- u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
- u64 glx = gcr3_info->glx;
- u64 tmp;
+ new.data[0] |= DTE_FLAG_HAD;
- pte_root |= DTE_FLAG_GV;
- pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
-
- /* First mask out possible old values for GCR3 table */
- tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
- flags &= ~tmp;
-
- tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
- flags &= ~tmp;
-
- /* Encode GCR3 table into DTE */
- tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
- pte_root |= tmp;
-
- tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
- flags |= tmp;
-
- tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
- flags |= tmp;
+ if (dev_data->ats_enabled)
+ new.data[1] |= DTE_FLAG_IOTLB;
- if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) {
- dev_table[devid].data[2] |=
- ((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
- }
+ old_domid = READ_ONCE(dte->data[1]) & DEV_DOMID_MASK;
+ new.data[1] |= domid;
- /* GIOV is supported with V2 page table mode only */
- if (pdom_is_v2_pgtbl_mode(domain))
- pte_root |= DTE_FLAG_GIOV;
+ /*
+ * Restore cached persistent DTE bits, which can be set by information
+ * in IVRS table. See set_dev_entry_from_acpi().
+ */
+ initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid);
+ if (initial_dte) {
+ new.data128[0] |= initial_dte->data128[0];
+ new.data128[1] |= initial_dte->data128[1];
}
- flags &= ~DEV_DOMID_MASK;
- flags |= domid;
+ set_dte_gcr3_table(iommu, dev_data, &new);
- old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
- dev_table[devid].data[1] = flags;
- dev_table[devid].data[0] = pte_root;
+ update_dte256(iommu, dev_data, &new);
/*
* A kdump kernel might be replacing a domain ID that was copied from
@@ -1921,19 +2094,16 @@ static void set_dte_entry(struct amd_iommu *iommu,
}
}
-static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
+/*
+ * Clear DMA-remap related flags to block all DMA (blockeded domain)
+ */
+static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data)
{
- struct dev_table_entry *dev_table = get_dev_table(iommu);
-
- /* remove entry from the device table seen by the hardware */
- dev_table[devid].data[0] = DTE_FLAG_V;
-
- if (!amd_iommu_snp_en)
- dev_table[devid].data[0] |= DTE_FLAG_TV;
+ struct dev_table_entry new = {};
+ struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
- dev_table[devid].data[1] &= DTE_FLAG_MASK;
-
- amd_iommu_apply_erratum_63(iommu, devid);
+ make_clear_dte(dev_data, dte, &new);
+ update_dte256(iommu, dev_data, &new);
}
/* Update and flush DTE for the given device */
@@ -1944,7 +2114,7 @@ static void dev_update_dte(struct iommu_dev_data *dev_data, bool set)
if (set)
set_dte_entry(iommu, dev_data);
else
- clear_dte_entry(iommu, dev_data->devid);
+ clear_dte_entry(iommu, dev_data);
clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
@@ -2007,7 +2177,6 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
struct protection_domain *pdom)
{
struct pdom_iommu_info *pdom_iommu_info, *curr;
- struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg;
unsigned long flags;
int ret = 0;
@@ -2036,10 +2205,6 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
goto out_unlock;
}
- /* Update NUMA Node ID */
- if (cfg->amd.nid == NUMA_NO_NODE)
- cfg->amd.nid = dev_to_node(&iommu->dev->dev);
-
out_unlock:
spin_unlock_irqrestore(&pdom->lock, flags);
return ret;
@@ -2276,16 +2441,15 @@ void protection_domain_free(struct protection_domain *domain)
kfree(domain);
}
-static void protection_domain_init(struct protection_domain *domain, int nid)
+static void protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
INIT_LIST_HEAD(&domain->dev_list);
INIT_LIST_HEAD(&domain->dev_data_list);
xa_init(&domain->iommu_array);
- domain->iop.pgtbl.cfg.amd.nid = nid;
}
-struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
+struct protection_domain *protection_domain_alloc(void)
{
struct protection_domain *domain;
int domid;
@@ -2301,42 +2465,37 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
}
domain->id = domid;
- protection_domain_init(domain, nid);
+ protection_domain_init(domain);
return domain;
}
static int pdom_setup_pgtable(struct protection_domain *domain,
- unsigned int type, int pgtable)
+ struct device *dev)
{
struct io_pgtable_ops *pgtbl_ops;
+ enum io_pgtable_fmt fmt;
- /* No need to allocate io pgtable ops in passthrough mode */
- if (!(type & __IOMMU_DOMAIN_PAGING))
- return 0;
-
- switch (pgtable) {
- case AMD_IOMMU_V1:
- domain->pd_mode = PD_MODE_V1;
+ switch (domain->pd_mode) {
+ case PD_MODE_V1:
+ fmt = AMD_IOMMU_V1;
break;
- case AMD_IOMMU_V2:
- domain->pd_mode = PD_MODE_V2;
+ case PD_MODE_V2:
+ fmt = AMD_IOMMU_V2;
break;
- default:
- return -EINVAL;
}
- pgtbl_ops =
- alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain);
+ domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev);
+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &domain->iop.pgtbl.cfg, domain);
if (!pgtbl_ops)
return -ENOMEM;
return 0;
}
-static inline u64 dma_max_address(int pgtable)
+static inline u64 dma_max_address(enum protection_domain_mode pgtable)
{
- if (pgtable == AMD_IOMMU_V1)
+ if (pgtable == PD_MODE_V1)
return ~0ULL;
/* V2 with 4/5 level page table */
@@ -2348,31 +2507,21 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
return iommu && (iommu->features & FEATURE_HDSUP);
}
-static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
- struct device *dev,
- u32 flags, int pgtable)
+static struct iommu_domain *
+do_iommu_domain_alloc(struct device *dev, u32 flags,
+ enum protection_domain_mode pgtable)
{
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
struct protection_domain *domain;
- struct amd_iommu *iommu = NULL;
int ret;
- if (dev)
- iommu = get_amd_iommu_from_dev(dev);
-
- /*
- * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
- * default to use IOMMU_DOMAIN_DMA[_FQ].
- */
- if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
- return ERR_PTR(-EINVAL);
-
- domain = protection_domain_alloc(type,
- dev ? dev_to_node(dev) : NUMA_NO_NODE);
+ domain = protection_domain_alloc();
if (!domain)
return ERR_PTR(-ENOMEM);
- ret = pdom_setup_pgtable(domain, type, pgtable);
+ domain->pd_mode = pgtable;
+ ret = pdom_setup_pgtable(domain, dev);
if (ret) {
pdom_id_free(domain->id);
kfree(domain);
@@ -2384,72 +2533,45 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
domain->domain.geometry.force_aperture = true;
domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
- if (iommu) {
- domain->domain.type = type;
- domain->domain.ops = iommu->iommu.ops->default_domain_ops;
+ domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
+ domain->domain.ops = iommu->iommu.ops->default_domain_ops;
- if (dirty_tracking)
- domain->domain.dirty_ops = &amd_dirty_ops;
- }
+ if (dirty_tracking)
+ domain->domain.dirty_ops = &amd_dirty_ops;
return &domain->domain;
}
-static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
-{
- struct iommu_domain *domain;
- int pgtable = amd_iommu_pgtable;
-
- /*
- * Force IOMMU v1 page table when allocating
- * domain for pass-through devices.
- */
- if (type == IOMMU_DOMAIN_UNMANAGED)
- pgtable = AMD_IOMMU_V1;
-
- domain = do_iommu_domain_alloc(type, NULL, 0, pgtable);
- if (IS_ERR(domain))
- return NULL;
-
- return domain;
-}
-
static struct iommu_domain *
amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
const struct iommu_user_data *user_data)
{
- unsigned int type = IOMMU_DOMAIN_UNMANAGED;
- struct amd_iommu *iommu = NULL;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID;
- if (dev)
- iommu = get_amd_iommu_from_dev(dev);
-
if ((flags & ~supported_flags) || user_data)
return ERR_PTR(-EOPNOTSUPP);
- /* Allocate domain with v2 page table if IOMMU supports PASID. */
- if (flags & IOMMU_HWPT_ALLOC_PASID) {
+ switch (flags & supported_flags) {
+ case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
+ /* Allocate domain with v1 page table for dirty tracking */
+ if (!amd_iommu_hd_support(iommu))
+ break;
+ return do_iommu_domain_alloc(dev, flags, PD_MODE_V1);
+ case IOMMU_HWPT_ALLOC_PASID:
+ /* Allocate domain with v2 page table if IOMMU supports PASID. */
if (!amd_iommu_pasid_supported())
- return ERR_PTR(-EOPNOTSUPP);
-
- return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V2);
- }
-
- /* Allocate domain with v1 page table for dirty tracking */
- if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) {
- if (iommu && amd_iommu_hd_support(iommu)) {
- return do_iommu_domain_alloc(type, dev,
- flags, AMD_IOMMU_V1);
- }
-
- return ERR_PTR(-EOPNOTSUPP);
+ break;
+ return do_iommu_domain_alloc(dev, flags, PD_MODE_V2);
+ case 0:
+ /* If nothing specific is required use the kernel commandline default */
+ return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable);
+ default:
+ break;
}
-
- /* If nothing specific is required use the kernel commandline default */
- return do_iommu_domain_alloc(type, dev, 0, amd_iommu_pgtable);
+ return ERR_PTR(-EOPNOTSUPP);
}
void amd_iommu_domain_free(struct iommu_domain *dom)
@@ -2475,10 +2597,19 @@ static int blocked_domain_attach_device(struct iommu_domain *domain,
return 0;
}
+static int blocked_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old)
+{
+ amd_iommu_remove_dev_pasid(dev, pasid, old);
+ return 0;
+}
+
static struct iommu_domain blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
.attach_dev = blocked_domain_attach_device,
+ .set_dev_pasid = blocked_domain_set_dev_pasid,
}
};
@@ -2498,7 +2629,7 @@ void amd_iommu_init_identity_domain(void)
identity_domain.id = pdom_id_alloc();
- protection_domain_init(&identity_domain, NUMA_NO_NODE);
+ protection_domain_init(&identity_domain);
}
/* Same as blocked domain except it supports only ops->attach_dev() */
@@ -2666,12 +2797,12 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct protection_domain *pdomain = to_pdomain(domain);
- struct dev_table_entry *dev_table;
+ struct dev_table_entry *dte;
struct iommu_dev_data *dev_data;
bool domain_flush = false;
struct amd_iommu *iommu;
unsigned long flags;
- u64 pte_root;
+ u64 new;
spin_lock_irqsave(&pdomain->lock, flags);
if (!(pdomain->dirty_tracking ^ enable)) {
@@ -2680,16 +2811,15 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
+ spin_lock(&dev_data->dte_lock);
iommu = get_amd_iommu_from_dev_data(dev_data);
-
- dev_table = get_dev_table(iommu);
- pte_root = dev_table[dev_data->devid].data[0];
-
- pte_root = (enable ? pte_root | DTE_FLAG_HAD :
- pte_root & ~DTE_FLAG_HAD);
+ dte = &get_dev_table(iommu)[dev_data->devid];
+ new = dte->data[0];
+ new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD);
+ dte->data[0] = new;
+ spin_unlock(&dev_data->dte_lock);
/* Flush device DTE */
- dev_table[dev_data->devid].data[0] = pte_root;
device_flush_dte(dev_data);
domain_flush = true;
}
@@ -2890,7 +3020,6 @@ const struct iommu_ops amd_iommu_ops = {
.blocked_domain = &blocked_domain,
.release_domain = &release_domain,
.identity_domain = &identity_domain.domain,
- .domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = amd_iommu_domain_alloc_sva,
.probe_device = amd_iommu_probe_device,
@@ -2901,7 +3030,6 @@ const struct iommu_ops amd_iommu_ops = {
.def_domain_type = amd_iommu_def_domain_type,
.dev_enable_feat = amd_iommu_dev_enable_feature,
.dev_disable_feat = amd_iommu_dev_disable_feature,
- .remove_dev_pasid = amd_iommu_remove_dev_pasid,
.page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
@@ -2956,17 +3084,23 @@ out:
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
- u64 dte;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
+ u64 new;
+ struct dev_table_entry *dte = &get_dev_table(iommu)[devid];
+ struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
+
+ if (dev_data)
+ spin_lock(&dev_data->dte_lock);
- dte = dev_table[devid].data[2];
- dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
- dte |= iommu_virt_to_phys(table->table);
- dte |= DTE_IRQ_REMAP_INTCTL;
- dte |= DTE_INTTABLEN;
- dte |= DTE_IRQ_REMAP_ENABLE;
+ new = READ_ONCE(dte->data[2]);
+ new &= ~DTE_IRQ_PHYS_ADDR_MASK;
+ new |= iommu_virt_to_phys(table->table);
+ new |= DTE_IRQ_REMAP_INTCTL;
+ new |= DTE_INTTABLEN;
+ new |= DTE_IRQ_REMAP_ENABLE;
+ WRITE_ONCE(dte->data[2], new);
- dev_table[devid].data[2] = dte;
+ if (dev_data)
+ spin_unlock(&dev_data->dte_lock);
}
static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
@@ -3540,7 +3674,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip;
irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
- irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
return 0;
diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c
index 8c73a30c2800..11150cfd6718 100644
--- a/drivers/iommu/amd/pasid.c
+++ b/drivers/iommu/amd/pasid.c
@@ -185,12 +185,13 @@ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct protection_domain *pdom;
int ret;
- pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA, dev_to_node(dev));
+ pdom = protection_domain_alloc();
if (!pdom)
return ERR_PTR(-ENOMEM);
pdom->domain.ops = &amd_sva_domain_ops;
pdom->mn.ops = &sva_mn;
+ pdom->domain.type = IOMMU_DOMAIN_SVA;
ret = mmu_notifier_register(&pdom->mn, mm);
if (ret) {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index c7cc613050d9..5aa2e7af58b4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -178,18 +178,12 @@ arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
const struct iommu_user_data *user_data)
{
struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
- const u32 SUPPORTED_FLAGS = IOMMU_HWPT_FAULT_ID_VALID;
struct arm_smmu_nested_domain *nested_domain;
struct iommu_hwpt_arm_smmuv3 arg;
bool enable_ats = false;
int ret;
- /*
- * Faults delivered to the nested domain are faults that originated by
- * the S1 in the domain. The core code will match all PASIDs when
- * delivering the fault due to user_pasid_table
- */
- if (flags & ~SUPPORTED_FLAGS)
+ if (flags)
return ERR_PTR(-EOPNOTSUPP);
ret = iommu_copy_struct_from_user(&arg, user_data,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 1d3e71569775..9ba596430e7c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -112,6 +112,15 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
* from the current CPU register
*/
target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
+
+ /*
+ * Note that we don't bother with S1PIE on the SMMU, we just rely on
+ * our default encoding scheme matching direct permissions anyway.
+ * SMMU has no notion of S1POE nor GCS, so make sure that is clear if
+ * either is enabled for CPUs, just in case anyone imagines otherwise.
+ */
+ if (system_supports_poe() || system_supports_gcs())
+ dev_warn_once(master->smmu->dev, "SVA devices ignore permission overlays and GCS\n");
}
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
@@ -206,8 +215,12 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
unsigned long asid_bits;
u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
- if (vabits_actual == 52)
+ if (vabits_actual == 52) {
+ /* We don't support LPA2 */
+ if (PAGE_SIZE != SZ_64K)
+ return false;
feat_mask |= ARM_SMMU_FEAT_VAX;
+ }
if ((smmu->features & feat_mask) != feat_mask)
return false;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index a5c7002ff75b..358072b4e293 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <kunit/visibility.h>
#include <uapi/linux/iommufd.h>
@@ -83,8 +84,28 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
-static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_device *smmu, u32 flags);
+static const char * const event_str[] = {
+ [EVT_ID_BAD_STREAMID_CONFIG] = "C_BAD_STREAMID",
+ [EVT_ID_STE_FETCH_FAULT] = "F_STE_FETCH",
+ [EVT_ID_BAD_STE_CONFIG] = "C_BAD_STE",
+ [EVT_ID_STREAM_DISABLED_FAULT] = "F_STREAM_DISABLED",
+ [EVT_ID_BAD_SUBSTREAMID_CONFIG] = "C_BAD_SUBSTREAMID",
+ [EVT_ID_CD_FETCH_FAULT] = "F_CD_FETCH",
+ [EVT_ID_BAD_CD_CONFIG] = "C_BAD_CD",
+ [EVT_ID_TRANSLATION_FAULT] = "F_TRANSLATION",
+ [EVT_ID_ADDR_SIZE_FAULT] = "F_ADDR_SIZE",
+ [EVT_ID_ACCESS_FAULT] = "F_ACCESS",
+ [EVT_ID_PERMISSION_FAULT] = "F_PERMISSION",
+ [EVT_ID_VMS_FETCH_FAULT] = "F_VMS_FETCH",
+};
+
+static const char * const event_class_str[] = {
+ [0] = "CD fetch",
+ [1] = "Stage 1 translation table fetch",
+ [2] = "Input address caused fault",
+ [3] = "Reserved",
+};
+
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master);
static void parse_driver_options(struct arm_smmu_device *smmu)
@@ -1759,17 +1780,49 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
}
/* IRQ and event handlers */
-static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
+static void arm_smmu_decode_event(struct arm_smmu_device *smmu, u64 *raw,
+ struct arm_smmu_event *event)
+{
+ struct arm_smmu_master *master;
+
+ event->id = FIELD_GET(EVTQ_0_ID, raw[0]);
+ event->sid = FIELD_GET(EVTQ_0_SID, raw[0]);
+ event->ssv = FIELD_GET(EVTQ_0_SSV, raw[0]);
+ event->ssid = event->ssv ? FIELD_GET(EVTQ_0_SSID, raw[0]) : IOMMU_NO_PASID;
+ event->privileged = FIELD_GET(EVTQ_1_PnU, raw[1]);
+ event->instruction = FIELD_GET(EVTQ_1_InD, raw[1]);
+ event->s2 = FIELD_GET(EVTQ_1_S2, raw[1]);
+ event->read = FIELD_GET(EVTQ_1_RnW, raw[1]);
+ event->stag = FIELD_GET(EVTQ_1_STAG, raw[1]);
+ event->stall = FIELD_GET(EVTQ_1_STALL, raw[1]);
+ event->class = FIELD_GET(EVTQ_1_CLASS, raw[1]);
+ event->iova = FIELD_GET(EVTQ_2_ADDR, raw[2]);
+ event->ipa = raw[3] & EVTQ_3_IPA;
+ event->fetch_addr = raw[3] & EVTQ_3_FETCH_ADDR;
+ event->ttrnw = FIELD_GET(EVTQ_1_TT_READ, raw[1]);
+ event->class_tt = false;
+ event->dev = NULL;
+
+ if (event->id == EVT_ID_PERMISSION_FAULT)
+ event->class_tt = (event->class == EVTQ_1_CLASS_TT);
+
+ mutex_lock(&smmu->streams_mutex);
+ master = arm_smmu_find_master(smmu, event->sid);
+ if (master)
+ event->dev = get_device(master->dev);
+ mutex_unlock(&smmu->streams_mutex);
+}
+
+static int arm_smmu_handle_event(struct arm_smmu_device *smmu,
+ struct arm_smmu_event *event)
{
int ret = 0;
u32 perm = 0;
struct arm_smmu_master *master;
- bool ssid_valid = evt[0] & EVTQ_0_SSV;
- u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
struct iopf_fault fault_evt = { };
struct iommu_fault *flt = &fault_evt.fault;
- switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
+ switch (event->id) {
case EVT_ID_TRANSLATION_FAULT:
case EVT_ID_ADDR_SIZE_FAULT:
case EVT_ID_ACCESS_FAULT:
@@ -1779,35 +1832,35 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
return -EOPNOTSUPP;
}
- if (!(evt[1] & EVTQ_1_STALL))
+ if (!event->stall)
return -EOPNOTSUPP;
- if (evt[1] & EVTQ_1_RnW)
+ if (event->read)
perm |= IOMMU_FAULT_PERM_READ;
else
perm |= IOMMU_FAULT_PERM_WRITE;
- if (evt[1] & EVTQ_1_InD)
+ if (event->instruction)
perm |= IOMMU_FAULT_PERM_EXEC;
- if (evt[1] & EVTQ_1_PnU)
+ if (event->privileged)
perm |= IOMMU_FAULT_PERM_PRIV;
flt->type = IOMMU_FAULT_PAGE_REQ;
flt->prm = (struct iommu_fault_page_request) {
.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
- .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+ .grpid = event->stag,
.perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ .addr = event->iova,
};
- if (ssid_valid) {
+ if (event->ssv) {
flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
+ flt->prm.pasid = event->ssid;
}
mutex_lock(&smmu->streams_mutex);
- master = arm_smmu_find_master(smmu, sid);
+ master = arm_smmu_find_master(smmu, event->sid);
if (!master) {
ret = -EINVAL;
goto out_unlock;
@@ -1819,29 +1872,82 @@ out_unlock:
return ret;
}
+static void arm_smmu_dump_raw_event(struct arm_smmu_device *smmu, u64 *raw,
+ struct arm_smmu_event *event)
+{
+ int i;
+
+ dev_err(smmu->dev, "event 0x%02x received:\n", event->id);
+
+ for (i = 0; i < EVTQ_ENT_DWORDS; ++i)
+ dev_err(smmu->dev, "\t0x%016llx\n", raw[i]);
+}
+
+#define ARM_SMMU_EVT_KNOWN(e) ((e)->id < ARRAY_SIZE(event_str) && event_str[(e)->id])
+#define ARM_SMMU_LOG_EVT_STR(e) ARM_SMMU_EVT_KNOWN(e) ? event_str[(e)->id] : "UNKNOWN"
+#define ARM_SMMU_LOG_CLIENT(e) (e)->dev ? dev_name((e)->dev) : "(unassigned sid)"
+
+static void arm_smmu_dump_event(struct arm_smmu_device *smmu, u64 *raw,
+ struct arm_smmu_event *evt,
+ struct ratelimit_state *rs)
+{
+ if (!__ratelimit(rs))
+ return;
+
+ arm_smmu_dump_raw_event(smmu, raw, evt);
+
+ switch (evt->id) {
+ case EVT_ID_TRANSLATION_FAULT:
+ case EVT_ID_ADDR_SIZE_FAULT:
+ case EVT_ID_ACCESS_FAULT:
+ case EVT_ID_PERMISSION_FAULT:
+ dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x iova: %#llx ipa: %#llx",
+ ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt),
+ evt->sid, evt->ssid, evt->iova, evt->ipa);
+
+ dev_err(smmu->dev, "%s %s %s %s \"%s\"%s%s stag: %#x",
+ evt->privileged ? "priv" : "unpriv",
+ evt->instruction ? "inst" : "data",
+ str_read_write(evt->read),
+ evt->s2 ? "s2" : "s1", event_class_str[evt->class],
+ evt->class_tt ? (evt->ttrnw ? " ttd_read" : " ttd_write") : "",
+ evt->stall ? " stall" : "", evt->stag);
+
+ break;
+
+ case EVT_ID_STE_FETCH_FAULT:
+ case EVT_ID_CD_FETCH_FAULT:
+ case EVT_ID_VMS_FETCH_FAULT:
+ dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x fetch_addr: %#llx",
+ ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt),
+ evt->sid, evt->ssid, evt->fetch_addr);
+
+ break;
+
+ default:
+ dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x",
+ ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt),
+ evt->sid, evt->ssid);
+ }
+}
+
static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
{
- int i, ret;
+ u64 evt[EVTQ_ENT_DWORDS];
+ struct arm_smmu_event event = {0};
struct arm_smmu_device *smmu = dev;
struct arm_smmu_queue *q = &smmu->evtq.q;
struct arm_smmu_ll_queue *llq = &q->llq;
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- u64 evt[EVTQ_ENT_DWORDS];
do {
while (!queue_remove_raw(q, evt)) {
- u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
-
- ret = arm_smmu_handle_evt(smmu, evt);
- if (!ret || !__ratelimit(&rs))
- continue;
-
- dev_info(smmu->dev, "event 0x%02x received:\n", id);
- for (i = 0; i < ARRAY_SIZE(evt); ++i)
- dev_info(smmu->dev, "\t0x%016llx\n",
- (unsigned long long)evt[i]);
+ arm_smmu_decode_event(smmu, evt, &event);
+ if (arm_smmu_handle_event(smmu, &event))
+ arm_smmu_dump_event(smmu, evt, &event, &rs);
+ put_device(event.dev);
cond_resched();
}
@@ -2353,39 +2459,12 @@ struct arm_smmu_domain *arm_smmu_domain_alloc(void)
if (!smmu_domain)
return ERR_PTR(-ENOMEM);
- mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
return smmu_domain;
}
-static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
-{
- struct arm_smmu_domain *smmu_domain;
-
- /*
- * Allocate the domain and initialise some of its data structures.
- * We can't really do anything meaningful until we've added a
- * master.
- */
- smmu_domain = arm_smmu_domain_alloc();
- if (IS_ERR(smmu_domain))
- return ERR_CAST(smmu_domain);
-
- if (dev) {
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- int ret;
-
- ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, 0);
- if (ret) {
- kfree(smmu_domain);
- return ERR_PTR(ret);
- }
- }
- return &smmu_domain->domain;
-}
-
static void arm_smmu_domain_free_paging(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2451,12 +2530,6 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_domain *smmu_domain);
bool enable_dirty = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
- /* Restrict the stage to what we can actually support */
- if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
- smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
- if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
@@ -2745,9 +2818,14 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
* Translation Requests and Translated transactions are denied
* as though ATS is disabled for the stream (STE.EATS == 0b00),
* causing F_BAD_ATS_TREQ and F_TRANSL_FORBIDDEN events
- * (IHI0070Ea 5.2 Stream Table Entry). Thus ATS can only be
- * enabled if we have arm_smmu_domain, those always have page
- * tables.
+ * (IHI0070Ea 5.2 Stream Table Entry).
+ *
+ * However, if we have installed a CD table and are using S1DSS
+ * then ATS will work in S1DSS bypass. See "13.6.4 Full ATS
+ * skipping stage 1".
+ *
+ * Disable ATS if we are going to create a normal 0b100 bypass
+ * STE.
*/
state->ats_enabled = !state->disable_ats &&
arm_smmu_ats_supported(master);
@@ -2853,15 +2931,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
state.master = master = dev_iommu_priv_get(dev);
smmu = master->smmu;
- mutex_lock(&smmu_domain->init_mutex);
-
- if (!smmu_domain->smmu) {
- ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0);
- } else if (smmu_domain->smmu != smmu)
- ret = -EINVAL;
-
- mutex_unlock(&smmu_domain->init_mutex);
- if (ret)
+ if (smmu_domain->smmu != smmu)
return ret;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
@@ -2918,16 +2988,9 @@ static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_cd target_cd;
- int ret = 0;
- mutex_lock(&smmu_domain->init_mutex);
- if (!smmu_domain->smmu)
- ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0);
- else if (smmu_domain->smmu != smmu)
- ret = -EINVAL;
- mutex_unlock(&smmu_domain->init_mutex);
- if (ret)
- return ret;
+ if (smmu_domain->smmu != smmu)
+ return -EINVAL;
if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
return -EINVAL;
@@ -3016,13 +3079,12 @@ out_unlock:
return ret;
}
-static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
- struct iommu_domain *domain)
+static int arm_smmu_blocking_set_dev_pasid(struct iommu_domain *new_domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old_domain)
{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(old_domain);
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- struct arm_smmu_domain *smmu_domain;
-
- smmu_domain = to_smmu_domain(domain);
mutex_lock(&arm_smmu_asid_lock);
arm_smmu_clear_cd(master, pasid);
@@ -3043,6 +3105,7 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
sid_domain->type == IOMMU_DOMAIN_BLOCKED)
sid_domain->ops->attach_dev(sid_domain, dev);
}
+ return 0;
}
static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
@@ -3070,8 +3133,10 @@ static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
if (arm_smmu_ssids_in_use(&master->cd_table)) {
/*
* If a CD table has to be present then we need to run with ATS
- * on even though the RID will fail ATS queries with UR. This is
- * because we have no idea what the PASID's need.
+ * on because we have to assume a PASID is using ATS. For
+ * IDENTITY this will setup things so that S1DSS=bypass which
+ * follows the explanation in "13.6.4 Full ATS skipping stage 1"
+ * and allows for ATS on the RID to work.
*/
state.cd_needs_ats = true;
arm_smmu_attach_prepare(&state, domain);
@@ -3124,6 +3189,7 @@ static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
static const struct iommu_domain_ops arm_smmu_blocked_ops = {
.attach_dev = arm_smmu_attach_dev_blocked,
+ .set_dev_pasid = arm_smmu_blocking_set_dev_pasid,
};
static struct iommu_domain arm_smmu_blocked_domain = {
@@ -3136,6 +3202,7 @@ arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags,
const struct iommu_user_data *user_data)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct arm_smmu_device *smmu = master->smmu;
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID |
IOMMU_HWPT_ALLOC_NEST_PARENT;
@@ -3147,25 +3214,43 @@ arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags,
if (user_data)
return ERR_PTR(-EOPNOTSUPP);
- if (flags & IOMMU_HWPT_ALLOC_PASID)
- return arm_smmu_domain_alloc_paging(dev);
-
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
- if (flags & IOMMU_HWPT_ALLOC_NEST_PARENT) {
- if (!(master->smmu->features & ARM_SMMU_FEAT_NESTING)) {
+ switch (flags) {
+ case 0:
+ /* Prefer S1 if available */
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+ break;
+ case IOMMU_HWPT_ALLOC_NEST_PARENT:
+ if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) {
ret = -EOPNOTSUPP;
goto err_free;
}
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
smmu_domain->nest_parent = true;
+ break;
+ case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
+ case IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_ALLOC_PASID:
+ case IOMMU_HWPT_ALLOC_PASID:
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) {
+ ret = -EOPNOTSUPP;
+ goto err_free;
+ }
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto err_free;
}
smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops;
- ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, flags);
+ ret = arm_smmu_domain_finalise(smmu_domain, smmu, flags);
if (ret)
goto err_free;
return &smmu_domain->domain;
@@ -3237,8 +3322,8 @@ static struct platform_driver arm_smmu_driver;
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
- fwnode);
+ struct device *dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -3543,7 +3628,6 @@ static struct iommu_ops arm_smmu_ops = {
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.hw_info = arm_smmu_hw_info,
- .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.domain_alloc_sva = arm_smmu_sva_domain_alloc,
.domain_alloc_paging_flags = arm_smmu_domain_alloc_paging_flags,
.probe_device = arm_smmu_probe_device,
@@ -3551,7 +3635,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .remove_dev_pasid = arm_smmu_remove_dev_pasid,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
.page_response = arm_smmu_page_response,
@@ -4239,7 +4322,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
*/
if (!!(reg & IDR0_COHACC) != coherent)
dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
- coherent ? "true" : "false");
+ str_true_false(coherent));
switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
case IDR0_STALL_MODEL_FORCE:
@@ -4663,7 +4746,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Initialise in-memory data structures */
ret = arm_smmu_init_structures(smmu);
if (ret)
- return ret;
+ goto err_free_iopf;
/* Record our private device structure */
platform_set_drvdata(pdev, smmu);
@@ -4674,22 +4757,29 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Reset the device */
ret = arm_smmu_device_reset(smmu);
if (ret)
- return ret;
+ goto err_disable;
/* And we're up. Go go go! */
ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
"smmu3.%pa", &ioaddr);
if (ret)
- return ret;
+ goto err_disable;
ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- iommu_device_sysfs_remove(&smmu->iommu);
- return ret;
+ goto err_free_sysfs;
}
return 0;
+
+err_free_sysfs:
+ iommu_device_sysfs_remove(&smmu->iommu);
+err_disable:
+ arm_smmu_device_disable(smmu);
+err_free_iopf:
+ iopf_queue_free(smmu->evtq.iopf);
+ return ret;
}
static void arm_smmu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 0107d3f333a1..bd9d7c85576a 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -452,10 +452,18 @@ static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
#define EVTQ_0_ID GENMASK_ULL(7, 0)
+#define EVT_ID_BAD_STREAMID_CONFIG 0x02
+#define EVT_ID_STE_FETCH_FAULT 0x03
+#define EVT_ID_BAD_STE_CONFIG 0x04
+#define EVT_ID_STREAM_DISABLED_FAULT 0x06
+#define EVT_ID_BAD_SUBSTREAMID_CONFIG 0x08
+#define EVT_ID_CD_FETCH_FAULT 0x09
+#define EVT_ID_BAD_CD_CONFIG 0x0a
#define EVT_ID_TRANSLATION_FAULT 0x10
#define EVT_ID_ADDR_SIZE_FAULT 0x11
#define EVT_ID_ACCESS_FAULT 0x12
#define EVT_ID_PERMISSION_FAULT 0x13
+#define EVT_ID_VMS_FETCH_FAULT 0x25
#define EVTQ_0_SSV (1UL << 11)
#define EVTQ_0_SSID GENMASK_ULL(31, 12)
@@ -467,9 +475,11 @@ static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
#define EVTQ_1_RnW (1UL << 35)
#define EVTQ_1_S2 (1UL << 39)
#define EVTQ_1_CLASS GENMASK_ULL(41, 40)
+#define EVTQ_1_CLASS_TT 0x01
#define EVTQ_1_TT_READ (1UL << 44)
#define EVTQ_2_ADDR GENMASK_ULL(63, 0)
#define EVTQ_3_IPA GENMASK_ULL(51, 12)
+#define EVTQ_3_FETCH_ADDR GENMASK_ULL(51, 3)
/* PRI queue */
#define PRIQ_ENT_SZ_SHIFT 4
@@ -789,6 +799,26 @@ struct arm_smmu_stream {
struct rb_node node;
};
+struct arm_smmu_event {
+ u8 stall : 1,
+ ssv : 1,
+ privileged : 1,
+ instruction : 1,
+ s2 : 1,
+ read : 1,
+ ttrnw : 1,
+ class_tt : 1;
+ u8 id;
+ u8 class;
+ u16 stag;
+ u32 sid;
+ u32 ssid;
+ u64 iova;
+ u64 ipa;
+ u64 fetch_addr;
+ struct device *dev;
+};
+
/* SMMU private data for each master */
struct arm_smmu_master {
struct arm_smmu_device *smmu;
@@ -813,7 +843,6 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
- struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
atomic_t nr_ats_masters;
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index 6e41ddaa24d6..d525ab43a4ae 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -79,7 +79,6 @@
#define TEGRA241_VCMDQ_PAGE1(q) (TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
#define VCMDQ_ADDR GENMASK(47, 5)
#define VCMDQ_LOG2SIZE GENMASK(4, 0)
-#define VCMDQ_LOG2SIZE_MAX 19
#define TEGRA241_VCMDQ_BASE 0x00000
#define TEGRA241_VCMDQ_CONS_INDX_BASE 0x00008
@@ -505,12 +504,15 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
struct arm_smmu_queue *q = &cmdq->q;
char name[16];
+ u32 regval;
int ret;
snprintf(name, 16, "vcmdq%u", vcmdq->idx);
- /* Queue size, capped to ensure natural alignment */
- q->llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, VCMDQ_LOG2SIZE_MAX);
+ /* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
+ regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
+ q->llq.max_n_shift =
+ min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
/* Use the common helper to init the VCMDQ, and then... */
ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 99030e6b16e7..db9b9a8e139c 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -110,7 +110,6 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
int arm_mmu500_reset(struct arm_smmu_device *smmu)
{
u32 reg, major;
- int i;
/*
* On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
* writes to the context bank ACTLRs will stick. And we just hope that
@@ -128,11 +127,12 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
+#ifdef CONFIG_ARM_SMMU_MMU_500_CPRE_ERRATA
/*
* Disable MMU-500's not-particularly-beneficial next-page
* prefetcher for the sake of at least 5 known errata.
*/
- for (i = 0; i < smmu->num_context_banks; ++i) {
+ for (int i = 0; i < smmu->num_context_banks; ++i) {
reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
reg &= ~ARM_MMU500_ACTLR_CPRE;
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
@@ -140,6 +140,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
if (reg & ARM_MMU500_ACTLR_CPRE)
dev_warn_once(smmu->dev, "Failed to disable prefetcher for errata workarounds, check SACR.CACHE_LOCK\n");
}
+#endif
return 0;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index 548783f3f8e8..d03b2239baad 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -73,7 +73,7 @@ void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
if (__ratelimit(&rs)) {
dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
- cfg = qsmmu->cfg;
+ cfg = qsmmu->data->cfg;
if (!cfg)
return;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 6372f3e25c4b..59d02687280e 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -16,6 +16,40 @@
#define QCOM_DUMMY_VAL -1
+/*
+ * SMMU-500 TRM defines BIT(0) as CMTLB (Enable context caching in the
+ * macro TLB) and BIT(1) as CPRE (Enable context caching in the prefetch
+ * buffer). The remaining bits are implementation defined and vary across
+ * SoCs.
+ */
+
+#define CPRE (1 << 1)
+#define CMTLB (1 << 0)
+#define PREFETCH_SHIFT 8
+#define PREFETCH_DEFAULT 0
+#define PREFETCH_SHALLOW (1 << PREFETCH_SHIFT)
+#define PREFETCH_MODERATE (2 << PREFETCH_SHIFT)
+#define PREFETCH_DEEP (3 << PREFETCH_SHIFT)
+#define GFX_ACTLR_PRR (1 << 5)
+
+static const struct of_device_id qcom_smmu_actlr_client_of_match[] = {
+ { .compatible = "qcom,adreno",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,adreno-gmu",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,adreno-smmu",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,fastrpc",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,sc7280-mdss",
+ .data = (const void *) (PREFETCH_SHALLOW | CPRE | CMTLB) },
+ { .compatible = "qcom,sc7280-venus",
+ .data = (const void *) (PREFETCH_SHALLOW | CPRE | CMTLB) },
+ { .compatible = "qcom,sm8550-mdss",
+ .data = (const void *) (PREFETCH_DEFAULT | CMTLB) },
+ { }
+};
+
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
@@ -99,6 +133,47 @@ static void qcom_adreno_smmu_resume_translation(const void *cookie, bool termina
arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
}
+static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ u32 reg = 0;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(smmu->dev);
+ if (ret < 0) {
+ dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+ return;
+ }
+
+ reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR);
+ reg &= ~GFX_ACTLR_PRR;
+ if (set)
+ reg |= FIELD_PREP(GFX_ACTLR_PRR, 1);
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR, reg);
+ pm_runtime_put_autosuspend(smmu->dev);
+}
+
+static void qcom_adreno_smmu_set_prr_addr(const void *cookie, phys_addr_t page_addr)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(smmu->dev);
+ if (ret < 0) {
+ dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+ return;
+ }
+
+ writel_relaxed(lower_32_bits(page_addr),
+ smmu->base + ARM_SMMU_GFX_PRR_CFG_LADDR);
+ writel_relaxed(upper_32_bits(page_addr),
+ smmu->base + ARM_SMMU_GFX_PRR_CFG_UADDR);
+ pm_runtime_put_autosuspend(smmu->dev);
+}
+
#define QCOM_ADRENO_SMMU_GPU_SID 0
static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
@@ -207,13 +282,37 @@ static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
return true;
}
+static void qcom_smmu_set_actlr_dev(struct device *dev, struct arm_smmu_device *smmu, int cbndx,
+ const struct of_device_id *client_match)
+{
+ const struct of_device_id *match =
+ of_match_device(client_match, dev);
+
+ if (!match) {
+ dev_dbg(dev, "no ACTLR settings present\n");
+ return;
+ }
+
+ arm_smmu_cb_write(smmu, cbndx, ARM_SMMU_CB_ACTLR, (unsigned long)match->data);
+}
+
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ const struct device_node *np = smmu_domain->smmu->dev->of_node;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct of_device_id *client_match;
+ int cbndx = smmu_domain->cfg.cbndx;
struct adreno_smmu_priv *priv;
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+ client_match = qsmmu->data->client_match;
+
+ if (client_match)
+ qcom_smmu_set_actlr_dev(dev, smmu, cbndx, client_match);
+
/* Only enable split pagetables for the GPU device (SID 0) */
if (!qcom_adreno_smmu_is_gpu_device(dev))
return 0;
@@ -239,6 +338,14 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
priv->resume_translation = qcom_adreno_smmu_resume_translation;
+ priv->set_prr_bit = NULL;
+ priv->set_prr_addr = NULL;
+
+ if (of_device_is_compatible(np, "qcom,smmu-500") &&
+ of_device_is_compatible(np, "qcom,adreno-smmu")) {
+ priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit;
+ priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr;
+ }
return 0;
}
@@ -269,8 +376,18 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct of_device_id *client_match;
+ int cbndx = smmu_domain->cfg.cbndx;
+
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+ client_match = qsmmu->data->client_match;
+
+ if (client_match)
+ qcom_smmu_set_actlr_dev(dev, smmu, cbndx, client_match);
+
return 0;
}
@@ -507,7 +624,7 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
return ERR_PTR(-ENOMEM);
qsmmu->smmu.impl = impl;
- qsmmu->cfg = data->cfg;
+ qsmmu->data = data;
return &qsmmu->smmu;
}
@@ -550,6 +667,7 @@ static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = {
.impl = &qcom_smmu_500_impl,
.adreno_impl = &qcom_adreno_smmu_500_impl,
.cfg = &qcom_smmu_impl0_cfg,
+ .client_match = qcom_smmu_actlr_client_of_match,
};
/*
@@ -567,6 +685,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sdm670-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
{ .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
index 3c134d1a6277..8addd453f5f1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -8,7 +8,7 @@
struct qcom_smmu {
struct arm_smmu_device smmu;
- const struct qcom_smmu_config *cfg;
+ const struct qcom_smmu_match_data *data;
bool bypass_quirk;
u8 bypass_cbndx;
u32 stall_enabled;
@@ -28,6 +28,7 @@ struct qcom_smmu_match_data {
const struct qcom_smmu_config *cfg;
const struct arm_smmu_impl *impl;
const struct arm_smmu_impl *adreno_impl;
+ const struct of_device_id * const client_match;
};
irqreturn_t qcom_smmu_context_fault(int irq, void *dev);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 650664e0f6e3..de205a34ffc6 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -34,6 +34,7 @@
#include <linux/pm_runtime.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/fsl/mc.h>
@@ -1411,8 +1412,8 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
- fwnode);
+ struct device *dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -1437,17 +1438,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
goto out_free;
} else {
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
-
- /*
- * Defer probe if the relevant SMMU instance hasn't finished
- * probing yet. This is a fragile hack and we'd ideally
- * avoid this race in the core code. Until that's ironed
- * out, however, this is the most pragmatic option on the
- * table.
- */
- if (!smmu)
- return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER,
- "smmu dev has not bound yet\n"));
}
ret = -EINVAL;
@@ -2117,7 +2107,7 @@ static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
}
dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
- cnt == 1 ? "" : "s");
+ str_plural(cnt));
iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
}
@@ -2227,29 +2217,26 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
i, irq);
}
+ platform_set_drvdata(pdev, smmu);
+
+ /* Check for RMRs and install bypass SMRs if any */
+ arm_smmu_rmr_install_bypass_smr(smmu);
+
+ arm_smmu_device_reset(smmu);
+ arm_smmu_test_smr_masks(smmu);
+
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
"smmu.%pa", &smmu->ioaddr);
- if (err) {
- dev_err(dev, "Failed to register iommu in sysfs\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register iommu in sysfs\n");
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
using_legacy_binding ? NULL : dev);
if (err) {
- dev_err(dev, "Failed to register iommu\n");
iommu_device_sysfs_remove(&smmu->iommu);
- return err;
+ return dev_err_probe(dev, err, "Failed to register iommu\n");
}
- platform_set_drvdata(pdev, smmu);
-
- /* Check for RMRs and install bypass SMRs if any */
- arm_smmu_rmr_install_bypass_smr(smmu);
-
- arm_smmu_device_reset(smmu);
- arm_smmu_test_smr_masks(smmu);
-
/*
* We want to avoid touching dev->power.lock in fastpaths unless
* it's really going to do something useful - pm_runtime_enabled()
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index e2aeb511ae90..2dbf3243b5ad 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -154,6 +154,8 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_SCTLR_M BIT(0)
#define ARM_SMMU_CB_ACTLR 0x4
+#define ARM_SMMU_GFX_PRR_CFG_LADDR 0x6008
+#define ARM_SMMU_GFX_PRR_CFG_UADDR 0x600C
#define ARM_SMMU_CB_RESUME 0x8
#define ARM_SMMU_RESUME_TERMINATE BIT(0)
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index d3bb0798092d..6c7528130cf9 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o
-obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
+obj-$(CONFIG_DMAR_TABLE) += trace.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 09694cca8752..fc35cba59145 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -47,6 +47,7 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct cache_tag *tag, *temp;
+ struct list_head *prev;
unsigned long flags;
tag = kzalloc(sizeof(*tag), GFP_KERNEL);
@@ -65,6 +66,7 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
tag->dev = iommu->iommu.dev;
spin_lock_irqsave(&domain->cache_lock, flags);
+ prev = &domain->cache_tags;
list_for_each_entry(temp, &domain->cache_tags, node) {
if (cache_tage_match(temp, did, iommu, dev, pasid, type)) {
temp->users++;
@@ -73,8 +75,15 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
trace_cache_tag_assign(temp);
return 0;
}
+ if (temp->iommu == iommu)
+ prev = &temp->node;
}
- list_add_tail(&tag->node, &domain->cache_tags);
+ /*
+ * Link cache tags of same iommu unit together, so corresponding
+ * flush ops can be batched for iommu unit.
+ */
+ list_add(&tag->node, prev);
+
spin_unlock_irqrestore(&domain->cache_lock, flags);
trace_cache_tag_assign(tag);
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
deleted file mode 100644
index 9862dc20b35e..000000000000
--- a/drivers/iommu/intel/cap_audit.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * cap_audit.c - audit iommu capabilities for boot time and hot plug
- *
- * Copyright (C) 2021 Intel Corporation
- *
- * Author: Kyung Min Park <kyung.min.park@intel.com>
- * Lu Baolu <baolu.lu@linux.intel.com>
- */
-
-#define pr_fmt(fmt) "DMAR: " fmt
-
-#include "iommu.h"
-#include "cap_audit.h"
-
-static u64 intel_iommu_cap_sanity;
-static u64 intel_iommu_ecap_sanity;
-
-static inline void check_irq_capabilities(struct intel_iommu *a,
- struct intel_iommu *b)
-{
- CHECK_FEATURE_MISMATCH(a, b, cap, pi_support, CAP_PI_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, eim_support, ECAP_EIM_MASK);
-}
-
-static inline void check_dmar_capabilities(struct intel_iommu *a,
- struct intel_iommu *b)
-{
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_MAMV_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_NFR_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_SLLPS_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_FRO_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_MGAW_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_SAGAW_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_NDOMS_MASK);
- MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_PSS_MASK);
- MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
- MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
-
- CHECK_FEATURE_MISMATCH(a, b, cap, fl5lp_support, CAP_FL5LP_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, pgsel_inv, CAP_PSI_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, zlr, CAP_ZLR_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, caching_mode, CAP_CM_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, phmr, CAP_PHMR_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, plmr, CAP_PLMR_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, rwbf, CAP_RWBF_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, afl, CAP_AFL_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, rps, ECAP_RPS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, smpwc, ECAP_SMPWC_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, flts, ECAP_FLTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, slts, ECAP_SLTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, nwfs, ECAP_NWFS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, smts, ECAP_SMTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, pds, ECAP_PDS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, dit, ECAP_DIT_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, pasid, ECAP_PASID_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, eafs, ECAP_EAFS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, srs, ECAP_SRS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, ers, ECAP_ERS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, prs, ECAP_PRS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, nest, ECAP_NEST_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, mts, ECAP_MTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, sc_support, ECAP_SC_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, pass_through, ECAP_PT_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, dev_iotlb_support, ECAP_DT_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, qis, ECAP_QI_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, coherent, ECAP_C_MASK);
-}
-
-static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type)
-{
- bool mismatch = false;
- u64 old_cap = intel_iommu_cap_sanity;
- u64 old_ecap = intel_iommu_ecap_sanity;
-
- if (type == CAP_AUDIT_HOTPLUG_IRQR) {
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pi_support, CAP_PI_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eim_support, ECAP_EIM_MASK);
- goto out;
- }
-
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl5lp_support, CAP_FL5LP_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pgsel_inv, CAP_PSI_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, zlr, CAP_ZLR_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, caching_mode, CAP_CM_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, phmr, CAP_PHMR_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, plmr, CAP_PLMR_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, rwbf, CAP_RWBF_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, afl, CAP_AFL_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, rps, ECAP_RPS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smpwc, ECAP_SMPWC_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, flts, ECAP_FLTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slts, ECAP_SLTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nwfs, ECAP_NWFS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smts, ECAP_SMTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pds, ECAP_PDS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dit, ECAP_DIT_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pasid, ECAP_PASID_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eafs, ECAP_EAFS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, srs, ECAP_SRS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, ers, ECAP_ERS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, prs, ECAP_PRS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nest, ECAP_NEST_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, mts, ECAP_MTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, sc_support, ECAP_SC_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pass_through, ECAP_PT_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dev_iotlb_support, ECAP_DT_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, qis, ECAP_QI_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, coherent, ECAP_C_MASK);
-
- /* Abort hot plug if the hot plug iommu feature is smaller than global */
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, max_amask_val, CAP_MAMV_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, num_fault_regs, CAP_NFR_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, super_page_val, CAP_SLLPS_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, fault_reg_offset, CAP_FRO_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, mgaw, CAP_MGAW_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, sagaw, CAP_SAGAW_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, ndoms, CAP_NDOMS_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, ecap, pss, ECAP_PSS_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, ecap, max_handle_mask, ECAP_MHMV_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, ecap, iotlb_offset, ECAP_IRO_MASK, mismatch);
-
-out:
- if (mismatch) {
- intel_iommu_cap_sanity = old_cap;
- intel_iommu_ecap_sanity = old_ecap;
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
-{
- struct dmar_drhd_unit *d;
- struct intel_iommu *i;
- int rc = 0;
-
- rcu_read_lock();
- if (list_empty(&dmar_drhd_units))
- goto out;
-
- for_each_active_iommu(i, d) {
- if (!iommu) {
- intel_iommu_ecap_sanity = i->ecap;
- intel_iommu_cap_sanity = i->cap;
- iommu = i;
- continue;
- }
-
- if (type == CAP_AUDIT_STATIC_DMAR)
- check_dmar_capabilities(iommu, i);
- else
- check_irq_capabilities(iommu, i);
- }
-
- /*
- * If the system is sane to support scalable mode, either SL or FL
- * should be sane.
- */
- if (intel_cap_smts_sanity() &&
- !intel_cap_flts_sanity() && !intel_cap_slts_sanity())
- rc = -EOPNOTSUPP;
-
-out:
- rcu_read_unlock();
- return rc;
-}
-
-int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu)
-{
- switch (type) {
- case CAP_AUDIT_STATIC_DMAR:
- case CAP_AUDIT_STATIC_IRQR:
- return cap_audit_static(iommu, type);
- case CAP_AUDIT_HOTPLUG_DMAR:
- case CAP_AUDIT_HOTPLUG_IRQR:
- return cap_audit_hotplug(iommu, type);
- default:
- break;
- }
-
- return -EFAULT;
-}
-
-bool intel_cap_smts_sanity(void)
-{
- return ecap_smts(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_pasid_sanity(void)
-{
- return ecap_pasid(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_nest_sanity(void)
-{
- return ecap_nest(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_flts_sanity(void)
-{
- return ecap_flts(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_slts_sanity(void)
-{
- return ecap_slts(intel_iommu_ecap_sanity);
-}
diff --git a/drivers/iommu/intel/cap_audit.h b/drivers/iommu/intel/cap_audit.h
deleted file mode 100644
index d07b75938961..000000000000
--- a/drivers/iommu/intel/cap_audit.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * cap_audit.h - audit iommu capabilities header
- *
- * Copyright (C) 2021 Intel Corporation
- *
- * Author: Kyung Min Park <kyung.min.park@intel.com>
- */
-
-/*
- * Capability Register Mask
- */
-#define CAP_FL5LP_MASK BIT_ULL(60)
-#define CAP_PI_MASK BIT_ULL(59)
-#define CAP_FL1GP_MASK BIT_ULL(56)
-#define CAP_RD_MASK BIT_ULL(55)
-#define CAP_WD_MASK BIT_ULL(54)
-#define CAP_MAMV_MASK GENMASK_ULL(53, 48)
-#define CAP_NFR_MASK GENMASK_ULL(47, 40)
-#define CAP_PSI_MASK BIT_ULL(39)
-#define CAP_SLLPS_MASK GENMASK_ULL(37, 34)
-#define CAP_FRO_MASK GENMASK_ULL(33, 24)
-#define CAP_ZLR_MASK BIT_ULL(22)
-#define CAP_MGAW_MASK GENMASK_ULL(21, 16)
-#define CAP_SAGAW_MASK GENMASK_ULL(12, 8)
-#define CAP_CM_MASK BIT_ULL(7)
-#define CAP_PHMR_MASK BIT_ULL(6)
-#define CAP_PLMR_MASK BIT_ULL(5)
-#define CAP_RWBF_MASK BIT_ULL(4)
-#define CAP_AFL_MASK BIT_ULL(3)
-#define CAP_NDOMS_MASK GENMASK_ULL(2, 0)
-
-/*
- * Extended Capability Register Mask
- */
-#define ECAP_RPS_MASK BIT_ULL(49)
-#define ECAP_SMPWC_MASK BIT_ULL(48)
-#define ECAP_FLTS_MASK BIT_ULL(47)
-#define ECAP_SLTS_MASK BIT_ULL(46)
-#define ECAP_SLADS_MASK BIT_ULL(45)
-#define ECAP_VCS_MASK BIT_ULL(44)
-#define ECAP_SMTS_MASK BIT_ULL(43)
-#define ECAP_PDS_MASK BIT_ULL(42)
-#define ECAP_DIT_MASK BIT_ULL(41)
-#define ECAP_PASID_MASK BIT_ULL(40)
-#define ECAP_PSS_MASK GENMASK_ULL(39, 35)
-#define ECAP_EAFS_MASK BIT_ULL(34)
-#define ECAP_NWFS_MASK BIT_ULL(33)
-#define ECAP_SRS_MASK BIT_ULL(31)
-#define ECAP_ERS_MASK BIT_ULL(30)
-#define ECAP_PRS_MASK BIT_ULL(29)
-#define ECAP_NEST_MASK BIT_ULL(26)
-#define ECAP_MTS_MASK BIT_ULL(25)
-#define ECAP_MHMV_MASK GENMASK_ULL(23, 20)
-#define ECAP_IRO_MASK GENMASK_ULL(17, 8)
-#define ECAP_SC_MASK BIT_ULL(7)
-#define ECAP_PT_MASK BIT_ULL(6)
-#define ECAP_EIM_MASK BIT_ULL(4)
-#define ECAP_DT_MASK BIT_ULL(2)
-#define ECAP_QI_MASK BIT_ULL(1)
-#define ECAP_C_MASK BIT_ULL(0)
-
-/*
- * u64 intel_iommu_cap_sanity, intel_iommu_ecap_sanity will be adjusted as each
- * IOMMU gets audited.
- */
-#define DO_CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
-do { \
- if (cap##_##feature(a) != cap##_##feature(b)) { \
- intel_iommu_##cap##_sanity &= ~(MASK); \
- pr_info("IOMMU feature %s inconsistent", #feature); \
- } \
-} while (0)
-
-#define CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
- DO_CHECK_FEATURE_MISMATCH((a)->cap, (b)->cap, cap, feature, MASK)
-
-#define CHECK_FEATURE_MISMATCH_HOTPLUG(b, cap, feature, MASK) \
-do { \
- if (cap##_##feature(intel_iommu_##cap##_sanity)) \
- DO_CHECK_FEATURE_MISMATCH(intel_iommu_##cap##_sanity, \
- (b)->cap, cap, feature, MASK); \
-} while (0)
-
-#define MINIMAL_FEATURE_IOMMU(iommu, cap, MASK) \
-do { \
- u64 min_feature = intel_iommu_##cap##_sanity & (MASK); \
- min_feature = min_t(u64, min_feature, (iommu)->cap & (MASK)); \
- intel_iommu_##cap##_sanity = (intel_iommu_##cap##_sanity & ~(MASK)) | \
- min_feature; \
-} while (0)
-
-#define MINIMAL_FEATURE_HOTPLUG(iommu, cap, feature, MASK, mismatch) \
-do { \
- if ((intel_iommu_##cap##_sanity & (MASK)) > \
- (cap##_##feature((iommu)->cap))) \
- mismatch = true; \
- else \
- (iommu)->cap = ((iommu)->cap & ~(MASK)) | \
- (intel_iommu_##cap##_sanity & (MASK)); \
-} while (0)
-
-enum cap_audit_type {
- CAP_AUDIT_STATIC_DMAR,
- CAP_AUDIT_STATIC_IRQR,
- CAP_AUDIT_HOTPLUG_DMAR,
- CAP_AUDIT_HOTPLUG_IRQR,
-};
-
-bool intel_cap_smts_sanity(void);
-bool intel_cap_pasid_sanity(void);
-bool intel_cap_nest_sanity(void);
-bool intel_cap_flts_sanity(void);
-bool intel_cap_slts_sanity(void);
-
-static inline bool scalable_mode_support(void)
-{
- return (intel_iommu_sm && intel_cap_smts_sanity());
-}
-
-static inline bool pasid_mode_support(void)
-{
- return scalable_mode_support() && intel_cap_pasid_sanity();
-}
-
-static inline bool nested_mode_support(void)
-{
- return scalable_mode_support() && intel_cap_nest_sanity();
-}
-
-int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 79e0da9eb626..cc46098f875b 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -29,7 +29,6 @@
#include "../irq_remapping.h"
#include "../iommu-pages.h"
#include "pasid.h"
-#include "cap_audit.h"
#include "perfmon.h"
#define ROOT_SIZE VTD_PAGE_SIZE
@@ -2118,10 +2117,6 @@ static int __init init_dmars(void)
struct intel_iommu *iommu;
int ret;
- ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
- if (ret)
- goto free_iommu;
-
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
iommu_disable_translation(iommu);
@@ -2617,10 +2612,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
struct intel_iommu *iommu = dmaru->iommu;
int ret;
- ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
- if (ret)
- goto out;
-
/*
* Disable translation if already enabled prior to OS handover.
*/
@@ -3250,10 +3241,15 @@ static int blocking_domain_attach_dev(struct iommu_domain *domain,
return 0;
}
+static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old);
+
static struct iommu_domain blocking_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
.attach_dev = blocking_domain_attach_dev,
+ .set_dev_pasid = blocking_domain_set_dev_pasid,
}
};
@@ -3342,8 +3338,7 @@ intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
bool first_stage;
if (flags &
- (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING
- | IOMMU_HWPT_FAULT_ID_VALID)))
+ (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING)))
return ERR_PTR(-EOPNOTSUPP);
if (nested_parent && !nested_supported(iommu))
return ERR_PTR(-EOPNOTSUPP);
@@ -4090,22 +4085,26 @@ void domain_remove_dev_pasid(struct iommu_domain *domain,
break;
}
}
- WARN_ON_ONCE(!dev_pasid);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
cache_tag_unassign_domain(dmar_domain, dev, pasid);
domain_detach_iommu(dmar_domain, iommu);
- intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
- kfree(dev_pasid);
+ if (!WARN_ON_ONCE(!dev_pasid)) {
+ intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
+ kfree(dev_pasid);
+ }
}
-static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
- struct iommu_domain *domain)
+static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
intel_pasid_tear_down_entry(info->iommu, dev, pasid, false);
- domain_remove_dev_pasid(domain, dev, pasid);
+ domain_remove_dev_pasid(old, dev, pasid);
+
+ return 0;
}
struct dev_pasid_info *
@@ -4445,21 +4444,6 @@ static struct iommu_domain identity_domain = {
},
};
-static struct iommu_domain *intel_iommu_domain_alloc_paging(struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu = info->iommu;
- struct dmar_domain *dmar_domain;
- bool first_stage;
-
- first_stage = first_level_by_default(iommu);
- dmar_domain = paging_domain_alloc(dev, first_stage);
- if (IS_ERR(dmar_domain))
- return ERR_CAST(dmar_domain);
-
- return &dmar_domain->domain;
-}
-
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
.release_domain = &blocking_domain,
@@ -4468,7 +4452,6 @@ const struct iommu_ops intel_iommu_ops = {
.hw_info = intel_iommu_hw_info,
.domain_alloc_paging_flags = intel_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = intel_svm_domain_alloc,
- .domain_alloc_paging = intel_iommu_domain_alloc_paging,
.domain_alloc_nested = intel_iommu_domain_alloc_nested,
.probe_device = intel_iommu_probe_device,
.release_device = intel_iommu_release_device,
@@ -4478,7 +4461,6 @@ const struct iommu_ops intel_iommu_ops = {
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
- .remove_dev_pasid = intel_iommu_remove_dev_pasid,
.pgsize_bitmap = SZ_4K,
.page_response = intel_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 466c1412dd45..ad795c772f21 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -24,7 +24,6 @@
#include "iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"
-#include "cap_audit.h"
enum irq_mode {
IRQ_REMAPPING,
@@ -727,9 +726,6 @@ static int __init intel_prepare_irq_remapping(void)
if (dmar_table_init() < 0)
return -ENODEV;
- if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
- return -ENODEV;
-
if (!dmar_ir_support())
return -ENODEV;
@@ -1463,7 +1459,6 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
else
irq_data->chip = &intel_ir_chip;
intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
- irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
return 0;
@@ -1534,10 +1529,6 @@ static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
int ret;
int eim = x2apic_enabled();
- ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
- if (ret)
- return ret;
-
if (eim && !ecap_eim_support(iommu->ecap)) {
pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
iommu->reg_phys, iommu->ecap);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 5b7d85f1e143..fb59a7d35958 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -244,11 +244,31 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
+ if (WARN_ON(!pte)) {
spin_unlock(&iommu->lock);
return;
}
+ if (!pasid_pte_is_present(pte)) {
+ if (!pasid_pte_is_fault_disabled(pte)) {
+ WARN_ON(READ_ONCE(pte->val[0]) != 0);
+ spin_unlock(&iommu->lock);
+ return;
+ }
+
+ /*
+ * When a PASID is used for SVA by a device, it's possible
+ * that the pasid entry is non-present with the Fault
+ * Processing Disabled bit set. Clear the pasid entry and
+ * drain the PRQ for the PASID before return.
+ */
+ pasid_clear_entry(pte);
+ spin_unlock(&iommu->lock);
+ intel_iommu_drain_pasid_prq(dev, pasid);
+
+ return;
+ }
+
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
intel_pasid_clear_entry(dev, pasid, fault_ignore);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 082f4fe20216..668d8ece6b14 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -73,6 +73,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
+/* Get FPD(Fault Processing Disable) bit of a PASID table entry */
+static inline bool pasid_pte_is_fault_disabled(struct pasid_entry *pte)
+{
+ return READ_ONCE(pte->val[0]) & PASID_PTE_FPD;
+}
+
/* Get PGTT field of a PASID table entry */
static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
{
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 6b9bb58a414f..7632c80edea6 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -223,6 +223,34 @@ static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
return ptes_per_table - (i & (ptes_per_table - 1));
}
+/*
+ * Check if concatenated PGDs are mandatory according to Arm DDI0487 (K.a)
+ * 1) R_DXBSH: For 16KB, and 48-bit input size, use level 1 instead of 0.
+ * 2) R_SRKBC: After de-ciphering the table for PA size and valid initial lookup
+ * a) 40 bits PA size with 4K: use level 1 instead of level 0 (2 tables for ias = oas)
+ * b) 40 bits PA size with 16K: use level 2 instead of level 1 (16 tables for ias = oas)
+ * c) 42 bits PA size with 4K: use level 1 instead of level 0 (8 tables for ias = oas)
+ * d) 48 bits PA size with 16K: use level 1 instead of level 0 (2 tables for ias = oas)
+ */
+static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
+ struct arm_lpae_io_pgtable *data)
+{
+ unsigned int ias = cfg->ias;
+ unsigned int oas = cfg->oas;
+
+ /* Covers 1 and 2.d */
+ if ((ARM_LPAE_GRANULE(data) == SZ_16K) && (data->start_level == 0))
+ return (oas == 48) || (ias == 48);
+
+ /* Covers 2.a and 2.c */
+ if ((ARM_LPAE_GRANULE(data) == SZ_4K) && (data->start_level == 0))
+ return (oas == 40) || (oas == 42);
+
+ /* Case 2.b */
+ return (ARM_LPAE_GRANULE(data) == SZ_16K) &&
+ (data->start_level == 1) && (oas == 40);
+}
+
static bool selftest_running = false;
static dma_addr_t __arm_lpae_dma_addr(void *pages)
@@ -676,85 +704,107 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov
data->start_level, ptep);
}
+struct io_pgtable_walk_data {
+ struct io_pgtable *iop;
+ void *data;
+ int (*visit)(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size);
+ unsigned long flags;
+ u64 addr;
+ const u64 end;
+};
+
+static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl);
+
+struct iova_to_phys_data {
+ arm_lpae_iopte pte;
+ int lvl;
+};
+
+static int visit_iova_to_phys(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct iova_to_phys_data *data = walk_data->data;
+ data->pte = *ptep;
+ data->lvl = lvl;
+ return 0;
+}
+
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- arm_lpae_iopte pte, *ptep = data->pgd;
- int lvl = data->start_level;
-
- do {
- /* Valid IOPTE pointer? */
- if (!ptep)
- return 0;
-
- /* Grab the IOPTE we're interested in */
- ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
- pte = READ_ONCE(*ptep);
-
- /* Valid entry? */
- if (!pte)
- return 0;
+ struct iova_to_phys_data d;
+ struct io_pgtable_walk_data walk_data = {
+ .data = &d,
+ .visit = visit_iova_to_phys,
+ .addr = iova,
+ .end = iova + 1,
+ };
+ int ret;
- /* Leaf entry? */
- if (iopte_leaf(pte, lvl, data->iop.fmt))
- goto found_translation;
+ ret = __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
+ if (ret)
+ return 0;
- /* Take it to the next level */
- ptep = iopte_deref(pte, data);
- } while (++lvl < ARM_LPAE_MAX_LEVELS);
+ iova &= (ARM_LPAE_BLOCK_SIZE(d.lvl, data) - 1);
+ return iopte_to_paddr(d.pte, data) | iova;
+}
- /* Ran out of page tables to walk */
+static int visit_pgtable_walk(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct arm_lpae_io_pgtable_walk_data *data = walk_data->data;
+ data->ptes[lvl] = *ptep;
return 0;
-
-found_translation:
- iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
- return iopte_to_paddr(pte, data) | iova;
}
-struct io_pgtable_walk_data {
- struct iommu_dirty_bitmap *dirty;
- unsigned long flags;
- u64 addr;
- const u64 end;
-};
+static int arm_lpae_pgtable_walk(struct io_pgtable_ops *ops, unsigned long iova,
+ void *wd)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_walk_data walk_data = {
+ .data = wd,
+ .visit = visit_pgtable_walk,
+ .addr = iova,
+ .end = iova + 1,
+ };
-static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep,
- int lvl);
+ return __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
+}
-static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep, int lvl)
+static int io_pgtable_visit(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep, int lvl)
{
struct io_pgtable *iop = &data->iop;
arm_lpae_iopte pte = READ_ONCE(*ptep);
- if (iopte_leaf(pte, lvl, iop->fmt)) {
- size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ int ret = walk_data->visit(walk_data, lvl, ptep, size);
+ if (ret)
+ return ret;
- if (iopte_writeable_dirty(pte)) {
- iommu_dirty_bitmap_record(walk_data->dirty,
- walk_data->addr, size);
- if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
- iopte_set_writeable_clean(ptep);
- }
+ if (iopte_leaf(pte, lvl, iop->fmt)) {
walk_data->addr += size;
return 0;
}
- if (WARN_ON(!iopte_table(pte, lvl)))
+ if (!iopte_table(pte, lvl)) {
return -EINVAL;
+ }
ptep = iopte_deref(pte, data);
- return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
+ return __arm_lpae_iopte_walk(data, walk_data, ptep, lvl + 1);
}
-static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep,
- int lvl)
+static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl)
{
u32 idx;
int max_entries, ret;
@@ -769,7 +819,7 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
(idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
- ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
+ ret = io_pgtable_visit(data, walk_data, ptep + idx, lvl);
if (ret)
return ret;
}
@@ -777,6 +827,23 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
return 0;
}
+static int visit_dirty(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct iommu_dirty_bitmap *dirty = walk_data->data;
+
+ if (!iopte_leaf(*ptep, lvl, walk_data->iop->fmt))
+ return 0;
+
+ if (iopte_writeable_dirty(*ptep)) {
+ iommu_dirty_bitmap_record(dirty, walk_data->addr, size);
+ if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
+ iopte_set_writeable_clean(ptep);
+ }
+
+ return 0;
+}
+
static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long flags,
@@ -785,7 +852,9 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct io_pgtable_walk_data walk_data = {
- .dirty = dirty,
+ .iop = &data->iop,
+ .data = dirty,
+ .visit = visit_dirty,
.flags = flags,
.addr = iova,
.end = iova + size,
@@ -800,7 +869,7 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
if (data->iop.fmt != ARM_64_LPAE_S1)
return -EINVAL;
- return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
+ return __arm_lpae_iopte_walk(data, &walk_data, ptep, lvl);
}
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
@@ -882,6 +951,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.unmap_pages = arm_lpae_unmap_pages,
.iova_to_phys = arm_lpae_iova_to_phys,
.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
+ .pgtable_walk = arm_lpae_pgtable_walk,
};
return data;
@@ -1006,18 +1076,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
if (!data)
return NULL;
- /*
- * Concatenate PGDs at level 1 if possible in order to reduce
- * the depth of the stage-2 walk.
- */
- if (data->start_level == 0) {
- unsigned long pgd_pages;
-
- pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
- if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
- data->pgd_bits += data->bits_per_level;
- data->start_level++;
- }
+ if (arm_lpae_concat_mandatory(cfg, data)) {
+ if (WARN_ON((ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte)) >
+ ARM_LPAE_S2_MAX_CONCAT_PAGES))
+ return NULL;
+ data->pgd_bits += data->bits_per_level;
+ data->start_level++;
}
/* VTCR */
@@ -1364,15 +1428,14 @@ static int __init arm_lpae_do_selftests(void)
SZ_64K | SZ_512M,
};
- static const unsigned int ias[] __initconst = {
+ static const unsigned int address_size[] __initconst = {
32, 36, 40, 42, 44, 48,
};
- int i, j, pass = 0, fail = 0;
+ int i, j, k, pass = 0, fail = 0;
struct device dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
- .oas = 48,
.coherent_walk = true,
.iommu_dev = &dev,
};
@@ -1381,15 +1444,19 @@ static int __init arm_lpae_do_selftests(void)
set_dev_node(&dev, NUMA_NO_NODE);
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
- for (j = 0; j < ARRAY_SIZE(ias); ++j) {
- cfg.pgsize_bitmap = pgsize[i];
- cfg.ias = ias[j];
- pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
- pgsize[i], ias[j]);
- if (arm_lpae_run_tests(&cfg))
- fail++;
- else
- pass++;
+ for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
+ /* Don't use ias > oas as it is not valid for stage-2. */
+ for (k = 0; k <= j; ++k) {
+ cfg.pgsize_bitmap = pgsize[i];
+ cfg.ias = address_size[k];
+ cfg.oas = address_size[j];
+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u OAS %u\n",
+ pgsize[i], cfg.ias, cfg.oas);
+ if (arm_lpae_run_tests(&cfg))
+ fail++;
+ else
+ pass++;
+ }
}
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 599030e1e890..870c3cdbd0f6 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2819,7 +2819,7 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (!ops)
- return -EPROBE_DEFER;
+ return driver_deferred_probe_check_state(dev);
if (fwspec)
return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
@@ -3312,6 +3312,16 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+static void iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
+ struct iommu_domain *domain)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_domain *blocked_domain = ops->blocked_domain;
+
+ WARN_ON(blocked_domain->ops->set_dev_pasid(blocked_domain,
+ dev, pasid, domain));
+}
+
static int __iommu_set_group_pasid(struct iommu_domain *domain,
struct iommu_group *group, ioasid_t pasid)
{
@@ -3330,11 +3340,9 @@ static int __iommu_set_group_pasid(struct iommu_domain *domain,
err_revert:
last_gdev = device;
for_each_group_device(group, device) {
- const struct iommu_ops *ops = dev_iommu_ops(device->dev);
-
if (device == last_gdev)
break;
- ops->remove_dev_pasid(device->dev, pasid, domain);
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
}
return ret;
}
@@ -3344,12 +3352,9 @@ static void __iommu_remove_group_pasid(struct iommu_group *group,
struct iommu_domain *domain)
{
struct group_device *device;
- const struct iommu_ops *ops;
- for_each_group_device(group, device) {
- ops = dev_iommu_ops(device->dev);
- ops->remove_dev_pasid(device->dev, pasid, domain);
- }
+ for_each_group_device(group, device)
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
}
/*
@@ -3368,16 +3373,20 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
/* Caller must be a probed driver on dev */
struct iommu_group *group = dev->iommu_group;
struct group_device *device;
+ const struct iommu_ops *ops;
int ret;
- if (!domain->ops->set_dev_pasid)
- return -EOPNOTSUPP;
-
if (!group)
return -ENODEV;
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner ||
- pasid == IOMMU_NO_PASID)
+ ops = dev_iommu_ops(dev);
+
+ if (!domain->ops->set_dev_pasid ||
+ !ops->blocked_domain ||
+ !ops->blocked_domain->ops->set_dev_pasid)
+ return -EOPNOTSUPP;
+
+ if (ops != domain->owner || pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index 1fe804e28a86..d9a937450e55 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -103,15 +103,23 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
{
struct iommufd_fault *fault = hwpt->fault;
struct iopf_group *group, *next;
+ struct list_head free_list;
unsigned long index;
if (!fault)
return;
+ INIT_LIST_HEAD(&free_list);
mutex_lock(&fault->mutex);
+ spin_lock(&fault->lock);
list_for_each_entry_safe(group, next, &fault->deliver, node) {
if (group->attach_handle != &handle->handle)
continue;
+ list_move(&group->node, &free_list);
+ }
+ spin_unlock(&fault->lock);
+
+ list_for_each_entry_safe(group, next, &free_list, node) {
list_del(&group->node);
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
iopf_free_group(group);
@@ -213,6 +221,7 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
{
struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
struct iopf_group *group, *next;
+ unsigned long index;
/*
* The iommufd object's reference count is zero at this point.
@@ -225,6 +234,13 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
iopf_free_group(group);
}
+ xa_for_each(&fault->response, index, group) {
+ xa_erase(&fault->response, index);
+ iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ iopf_free_group(group);
+ }
+ xa_destroy(&fault->response);
+ mutex_destroy(&fault->mutex);
}
static void iommufd_compose_fault_message(struct iommu_fault *fault,
@@ -247,7 +263,7 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
{
size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
struct iommufd_fault *fault = filep->private_data;
- struct iommu_hwpt_pgfault data;
+ struct iommu_hwpt_pgfault data = {};
struct iommufd_device *idev;
struct iopf_group *group;
struct iopf_fault *iopf;
@@ -258,17 +274,19 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
return -ESPIPE;
mutex_lock(&fault->mutex);
- while (!list_empty(&fault->deliver) && count > done) {
- group = list_first_entry(&fault->deliver,
- struct iopf_group, node);
-
- if (group->fault_count * fault_size > count - done)
+ while ((group = iommufd_fault_deliver_fetch(fault))) {
+ if (done >= count ||
+ group->fault_count * fault_size > count - done) {
+ iommufd_fault_deliver_restore(fault, group);
break;
+ }
rc = xa_alloc(&fault->response, &group->cookie, group,
xa_limit_32b, GFP_KERNEL);
- if (rc)
+ if (rc) {
+ iommufd_fault_deliver_restore(fault, group);
break;
+ }
idev = to_iommufd_handle(group->attach_handle)->idev;
list_for_each_entry(iopf, &group->faults, list) {
@@ -277,13 +295,12 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
group->cookie);
if (copy_to_user(buf + done, &data, fault_size)) {
xa_erase(&fault->response, group->cookie);
+ iommufd_fault_deliver_restore(fault, group);
rc = -EFAULT;
break;
}
done += fault_size;
}
-
- list_del(&group->node);
}
mutex_unlock(&fault->mutex);
@@ -341,10 +358,10 @@ static __poll_t iommufd_fault_fops_poll(struct file *filep,
__poll_t pollflags = EPOLLOUT;
poll_wait(filep, &fault->wait_queue, wait);
- mutex_lock(&fault->mutex);
+ spin_lock(&fault->lock);
if (!list_empty(&fault->deliver))
pollflags |= EPOLLIN | EPOLLRDNORM;
- mutex_unlock(&fault->mutex);
+ spin_unlock(&fault->lock);
return pollflags;
}
@@ -386,6 +403,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
INIT_LIST_HEAD(&fault->deliver);
xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
mutex_init(&fault->mutex);
+ spin_lock_init(&fault->lock);
init_waitqueue_head(&fault->wait_queue);
filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
@@ -434,9 +452,9 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
hwpt = group->attach_handle->domain->fault_data;
fault = hwpt->fault;
- mutex_lock(&fault->mutex);
+ spin_lock(&fault->lock);
list_add_tail(&group->node, &fault->deliver);
- mutex_unlock(&fault->mutex);
+ spin_unlock(&fault->lock);
wake_up_interruptible(&fault->wait_queue);
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index ce03c3804651..598be26a14e2 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -140,8 +140,8 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
if (ops->domain_alloc_paging_flags) {
- hwpt->domain = ops->domain_alloc_paging_flags(idev->dev, flags,
- user_data);
+ hwpt->domain = ops->domain_alloc_paging_flags(idev->dev,
+ flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
@@ -280,6 +280,8 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
struct iommufd_hw_pagetable *hwpt;
int rc;
+ if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
+ return ERR_PTR(-EOPNOTSUPP);
if (!user_data->len)
return ERR_PTR(-EOPNOTSUPP);
if (!viommu->ops || !viommu->ops->alloc_domain_nested)
@@ -296,7 +298,9 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
hwpt_nested->parent = viommu->hwpt;
hwpt->domain =
- viommu->ops->alloc_domain_nested(viommu, flags, user_data);
+ viommu->ops->alloc_domain_nested(viommu,
+ flags & ~IOMMU_HWPT_FAULT_ID_VALID,
+ user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index b6d706cf2c66..0b1bafc7fd99 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -443,14 +443,39 @@ struct iommufd_fault {
struct iommufd_ctx *ictx;
struct file *filep;
- /* The lists of outstanding faults protected by below mutex. */
- struct mutex mutex;
+ spinlock_t lock; /* protects the deliver list */
struct list_head deliver;
+ struct mutex mutex; /* serializes response flows */
struct xarray response;
struct wait_queue_head wait_queue;
};
+/* Fetch the first node out of the fault->deliver list */
+static inline struct iopf_group *
+iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
+{
+ struct list_head *list = &fault->deliver;
+ struct iopf_group *group = NULL;
+
+ spin_lock(&fault->lock);
+ if (!list_empty(list)) {
+ group = list_first_entry(list, struct iopf_group, node);
+ list_del(&group->node);
+ }
+ spin_unlock(&fault->lock);
+ return group;
+}
+
+/* Restore a node back to the head of the fault->deliver list */
+static inline void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
+ struct iopf_group *group)
+{
+ spin_lock(&fault->lock);
+ list_add(&group->node, &fault->deliver);
+ spin_unlock(&fault->lock);
+}
+
struct iommufd_attach_handle {
struct iommu_attach_handle handle;
struct iommufd_device *idev;
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index ab665cf38ef4..39a86a4a1d3a 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -130,7 +130,7 @@ struct iova_bitmap {
static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
unsigned long iova)
{
- unsigned long pgsize = 1 << bitmap->mapped.pgshift;
+ unsigned long pgsize = 1UL << bitmap->mapped.pgshift;
return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
}
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 97c5e3567d33..ccf616462a1c 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -104,7 +104,7 @@ static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx,
if (wait_event_timeout(ictx->destroy_wait,
refcount_read(&to_destroy->shortterm_users) ==
0,
- msecs_to_jiffies(10000)))
+ msecs_to_jiffies(60000)))
return 0;
pr_crit("Time out waiting for iommufd object to become free\n");
@@ -307,9 +307,9 @@ union ucmd_buffer {
struct iommu_ioas_map map;
struct iommu_ioas_unmap unmap;
struct iommu_option option;
+ struct iommu_vdevice_alloc vdev;
struct iommu_vfio_ioas vfio_ioas;
struct iommu_viommu_alloc viommu;
- struct iommu_vdevice_alloc vdev;
#ifdef CONFIG_IOMMUFD_TEST
struct iommu_test_cmd test;
#endif
@@ -333,8 +333,8 @@ struct iommufd_ioctl_op {
}
static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
- IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc, struct iommu_fault_alloc,
- out_fault_fd),
+ IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc,
+ struct iommu_fault_alloc, out_fault_fd),
IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info,
__reserved),
IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
@@ -355,20 +355,18 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
src_iova),
IOCTL_OP(IOMMU_IOAS_IOVA_RANGES, iommufd_ioas_iova_ranges,
struct iommu_ioas_iova_ranges, out_iova_alignment),
- IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map,
- iova),
+ IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map, iova),
IOCTL_OP(IOMMU_IOAS_MAP_FILE, iommufd_ioas_map_file,
struct iommu_ioas_map_file, iova),
IOCTL_OP(IOMMU_IOAS_UNMAP, iommufd_ioas_unmap, struct iommu_ioas_unmap,
length),
- IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option,
- val64),
+ IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option, val64),
+ IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
+ struct iommu_vdevice_alloc, virt_id),
IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas,
__reserved),
IOCTL_OP(IOMMU_VIOMMU_ALLOC, iommufd_viommu_alloc_ioctl,
struct iommu_viommu_alloc, out_viommu_id),
- IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
- struct iommu_vdevice_alloc, virt_id),
#ifdef CONFIG_IOMMUFD_TEST
IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last),
#endif
@@ -490,8 +488,8 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_DEVICE] = {
.destroy = iommufd_device_destroy,
},
- [IOMMUFD_OBJ_IOAS] = {
- .destroy = iommufd_ioas_destroy,
+ [IOMMUFD_OBJ_FAULT] = {
+ .destroy = iommufd_fault_destroy,
},
[IOMMUFD_OBJ_HWPT_PAGING] = {
.destroy = iommufd_hwpt_paging_destroy,
@@ -501,15 +499,15 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
.destroy = iommufd_hwpt_nested_destroy,
.abort = iommufd_hwpt_nested_abort,
},
- [IOMMUFD_OBJ_FAULT] = {
- .destroy = iommufd_fault_destroy,
- },
- [IOMMUFD_OBJ_VIOMMU] = {
- .destroy = iommufd_viommu_destroy,
+ [IOMMUFD_OBJ_IOAS] = {
+ .destroy = iommufd_ioas_destroy,
},
[IOMMUFD_OBJ_VDEVICE] = {
.destroy = iommufd_vdevice_destroy,
},
+ [IOMMUFD_OBJ_VIOMMU] = {
+ .destroy = iommufd_viommu_destroy,
+ },
#ifdef CONFIG_IOMMUFD_TEST
[IOMMUFD_OBJ_SELFTEST] = {
.destroy = iommufd_selftest_destroy,
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index a0de6d6d4e68..d40deb0a4f06 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -311,25 +311,6 @@ static const struct iommu_dirty_ops dirty_ops = {
.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
};
-static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
-{
- struct mock_dev *mdev = to_mock_dev(dev);
- struct mock_iommu_domain *mock;
-
- mock = kzalloc(sizeof(*mock), GFP_KERNEL);
- if (!mock)
- return NULL;
- mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
- mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
- mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
- if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
- mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
- mock->domain.ops = mock_ops.default_domain_ops;
- mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
- xa_init(&mock->pfns);
- return &mock->domain;
-}
-
static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data *user_data)
{
@@ -385,21 +366,30 @@ mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_NEST_PARENT;
- bool no_dirty_ops = to_mock_dev(dev)->flags &
- MOCK_FLAGS_DEVICE_NO_DIRTY;
- struct iommu_domain *domain;
+ struct mock_dev *mdev = to_mock_dev(dev);
+ bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
+ struct mock_iommu_domain *mock;
if (user_data)
return ERR_PTR(-EOPNOTSUPP);
if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
return ERR_PTR(-EOPNOTSUPP);
- domain = mock_domain_alloc_paging(dev);
- if (!domain)
+ mock = kzalloc(sizeof(*mock), GFP_KERNEL);
+ if (!mock)
return ERR_PTR(-ENOMEM);
+ mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
+ mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
+ mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
+ if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
+ mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
+ mock->domain.ops = mock_ops.default_domain_ops;
+ mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
+ xa_init(&mock->pfns);
+
if (has_dirty_flag)
- domain->dirty_ops = &dirty_ops;
- return domain;
+ mock->domain.dirty_ops = &dirty_ops;
+ return &mock->domain;
}
static void mock_domain_free(struct iommu_domain *domain)
@@ -595,7 +585,7 @@ mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
struct mock_iommu_domain_nested *mock_nested;
- if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
+ if (flags)
return ERR_PTR(-EOPNOTSUPP);
mock_nested = __mock_domain_alloc_nested(user_data);
@@ -713,7 +703,6 @@ static const struct iommu_ops mock_ops = {
.owner = THIS_MODULE,
.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
.hw_info = mock_domain_hw_info,
- .domain_alloc_paging = mock_domain_alloc_paging,
.domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
.domain_alloc_nested = mock_domain_alloc_nested,
.capable = mock_domain_capable,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index ce40f0a419ea..2769e4544038 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -725,47 +725,32 @@ static int msm_iommu_probe(struct platform_device *pdev)
iommu->dev = &pdev->dev;
INIT_LIST_HEAD(&iommu->ctx_list);
- iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
+ iommu->pclk = devm_clk_get_prepared(iommu->dev, "smmu_pclk");
if (IS_ERR(iommu->pclk))
return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
"could not get smmu_pclk\n");
- ret = clk_prepare(iommu->pclk);
- if (ret)
- return dev_err_probe(iommu->dev, ret,
- "could not prepare smmu_pclk\n");
-
- iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
- if (IS_ERR(iommu->clk)) {
- clk_unprepare(iommu->pclk);
+ iommu->clk = devm_clk_get_prepared(iommu->dev, "iommu_clk");
+ if (IS_ERR(iommu->clk))
return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
"could not get iommu_clk\n");
- }
-
- ret = clk_prepare(iommu->clk);
- if (ret) {
- clk_unprepare(iommu->pclk);
- return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
- }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iommu->base = devm_ioremap_resource(iommu->dev, r);
if (IS_ERR(iommu->base)) {
ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
- goto fail;
+ return ret;
}
ioaddr = r->start;
iommu->irq = platform_get_irq(pdev, 0);
- if (iommu->irq < 0) {
- ret = -ENODEV;
- goto fail;
- }
+ if (iommu->irq < 0)
+ return -ENODEV;
ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
if (ret) {
dev_err(iommu->dev, "could not get ncb\n");
- goto fail;
+ return ret;
}
iommu->ncb = val;
@@ -780,8 +765,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
if (!par) {
pr_err("Invalid PAR value detected\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
@@ -791,7 +775,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
iommu);
if (ret) {
pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
- goto fail;
+ return ret;
}
list_add(&iommu->dev_node, &qcom_iommu_devices);
@@ -800,23 +784,19 @@ static int msm_iommu_probe(struct platform_device *pdev)
"msm-smmu.%pa", &ioaddr);
if (ret) {
pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
- goto fail;
+ return ret;
}
ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
if (ret) {
pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
- goto fail;
+ return ret;
}
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
return ret;
-fail:
- clk_unprepare(iommu->clk);
- clk_unprepare(iommu->pclk);
- return ret;
}
static const struct of_device_id msm_iommu_dt_match[] = {
@@ -824,20 +804,11 @@ static const struct of_device_id msm_iommu_dt_match[] = {
{}
};
-static void msm_iommu_remove(struct platform_device *pdev)
-{
- struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
-
- clk_unprepare(iommu->clk);
- clk_unprepare(iommu->pclk);
-}
-
static struct platform_driver msm_iommu_driver = {
.driver = {
.name = "msm_iommu",
.of_match_table = msm_iommu_dt_match,
},
.probe = msm_iommu_probe,
- .remove = msm_iommu_remove,
};
builtin_platform_driver(msm_iommu_driver);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index ab60901f8f92..034b0e670384 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/soc/mediatek/infracfg.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
+#include <linux/string_choices.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -510,7 +511,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
bank->parent_dev,
"fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n",
int_state, fault_iova, fault_pa, regval, fault_larb, fault_port,
- layer, write ? "write" : "read");
+ layer, str_write_read(write));
}
/* Interrupt clear */
@@ -602,7 +603,7 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n",
- enable ? "enable" : "disable", dev_name(larb_mmu->dev),
+ str_enable_disable(enable), dev_name(larb_mmu->dev),
portid_msk, regionid, upper_32_bits(region->iova_base));
if (enable)
@@ -630,8 +631,8 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
}
if (ret)
dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n",
- enable ? "enable" : "disable",
- dev_name(data->dev), portid_msk, ret);
+ str_enable_disable(enable), dev_name(data->dev),
+ portid_msk, ret);
}
return ret;
}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b6de1ca00cef..a565b9e40f4a 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <asm/barrier.h>
#include <asm/dma-iommu.h>
#include <dt-bindings/memory/mtk-memory-port.h>
@@ -243,7 +244,7 @@ static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
larb_mmu = &data->larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
- enable ? "enable" : "disable", portid);
+ str_enable_disable(enable), portid);
if (enable)
larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e7a6a1611d19..97987cd78da9 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -29,8 +29,6 @@ static int of_iommu_xlate(struct device *dev,
return -ENODEV;
ret = iommu_fwspec_init(dev, of_fwnode_handle(iommu_spec->np));
- if (ret == -EPROBE_DEFER)
- return driver_deferred_probe_check_state(dev);
if (ret)
return ret;
diff --git a/drivers/iommu/riscv/iommu-pci.c b/drivers/iommu/riscv/iommu-pci.c
index c7a89143014c..d82d2b00904c 100644
--- a/drivers/iommu/riscv/iommu-pci.c
+++ b/drivers/iommu/riscv/iommu-pci.c
@@ -101,6 +101,13 @@ static void riscv_iommu_pci_remove(struct pci_dev *pdev)
riscv_iommu_remove(iommu);
}
+static void riscv_iommu_pci_shutdown(struct pci_dev *pdev)
+{
+ struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev);
+
+ riscv_iommu_disable(iommu);
+}
+
static const struct pci_device_id riscv_iommu_pci_tbl[] = {
{PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_RISCV_IOMMU), 0},
{PCI_VDEVICE(RIVOS, PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA), 0},
@@ -112,6 +119,7 @@ static struct pci_driver riscv_iommu_pci_driver = {
.id_table = riscv_iommu_pci_tbl,
.probe = riscv_iommu_pci_probe,
.remove = riscv_iommu_pci_remove,
+ .shutdown = riscv_iommu_pci_shutdown,
.driver = {
.suppress_bind_attrs = true,
},
diff --git a/drivers/iommu/riscv/iommu-platform.c b/drivers/iommu/riscv/iommu-platform.c
index 382ba2841849..725e919b97ef 100644
--- a/drivers/iommu/riscv/iommu-platform.c
+++ b/drivers/iommu/riscv/iommu-platform.c
@@ -11,18 +11,43 @@
*/
#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "iommu-bits.h"
#include "iommu.h"
+static void riscv_iommu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct riscv_iommu_device *iommu = dev_get_drvdata(dev);
+ u16 idx = desc->msi_index;
+ u64 addr;
+
+ addr = ((u64)msg->address_hi << 32) | msg->address_lo;
+
+ if (addr != (addr & RISCV_IOMMU_MSI_CFG_TBL_ADDR)) {
+ dev_err_once(dev,
+ "uh oh, the IOMMU can't send MSIs to 0x%llx, sending to 0x%llx instead\n",
+ addr, addr & RISCV_IOMMU_MSI_CFG_TBL_ADDR);
+ }
+
+ addr &= RISCV_IOMMU_MSI_CFG_TBL_ADDR;
+
+ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_ADDR(idx), addr);
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_DATA(idx), msg->data);
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_CTRL(idx), 0);
+}
+
static int riscv_iommu_platform_probe(struct platform_device *pdev)
{
+ enum riscv_iommu_igs_settings igs;
struct device *dev = &pdev->dev;
struct riscv_iommu_device *iommu = NULL;
struct resource *res = NULL;
- int vec;
+ int vec, ret;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -40,16 +65,6 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
iommu->caps = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAPABILITIES);
iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL);
- /* For now we only support WSI */
- switch (FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps)) {
- case RISCV_IOMMU_CAPABILITIES_IGS_WSI:
- case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
- break;
- default:
- return dev_err_probe(dev, -ENODEV,
- "unable to use wire-signaled interrupts\n");
- }
-
iommu->irqs_count = platform_irq_count(pdev);
if (iommu->irqs_count <= 0)
return dev_err_probe(dev, -ENODEV,
@@ -57,13 +72,58 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
if (iommu->irqs_count > RISCV_IOMMU_INTR_COUNT)
iommu->irqs_count = RISCV_IOMMU_INTR_COUNT;
- for (vec = 0; vec < iommu->irqs_count; vec++)
- iommu->irqs[vec] = platform_get_irq(pdev, vec);
+ igs = FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps);
+ switch (igs) {
+ case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
+ case RISCV_IOMMU_CAPABILITIES_IGS_MSI:
+ if (is_of_node(dev->fwnode))
+ of_msi_configure(dev, to_of_node(dev->fwnode));
+
+ if (!dev_get_msi_domain(dev)) {
+ dev_warn(dev, "failed to find an MSI domain\n");
+ goto msi_fail;
+ }
+
+ ret = platform_device_msi_init_and_alloc_irqs(dev, iommu->irqs_count,
+ riscv_iommu_write_msi_msg);
+ if (ret) {
+ dev_warn(dev, "failed to allocate MSIs\n");
+ goto msi_fail;
+ }
+
+ for (vec = 0; vec < iommu->irqs_count; vec++)
+ iommu->irqs[vec] = msi_get_virq(dev, vec);
+
+ /* Enable message-signaled interrupts, fctl.WSI */
+ if (iommu->fctl & RISCV_IOMMU_FCTL_WSI) {
+ iommu->fctl ^= RISCV_IOMMU_FCTL_WSI;
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
+ }
+
+ dev_info(dev, "using MSIs\n");
+ break;
+
+msi_fail:
+ if (igs != RISCV_IOMMU_CAPABILITIES_IGS_BOTH) {
+ return dev_err_probe(dev, -ENODEV,
+ "unable to use wire-signaled interrupts\n");
+ }
- /* Enable wire-signaled interrupts, fctl.WSI */
- if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) {
- iommu->fctl |= RISCV_IOMMU_FCTL_WSI;
- riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
+ fallthrough;
+
+ case RISCV_IOMMU_CAPABILITIES_IGS_WSI:
+ for (vec = 0; vec < iommu->irqs_count; vec++)
+ iommu->irqs[vec] = platform_get_irq(pdev, vec);
+
+ /* Enable wire-signaled interrupts, fctl.WSI */
+ if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) {
+ iommu->fctl |= RISCV_IOMMU_FCTL_WSI;
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
+ }
+ dev_info(dev, "using wire-signaled interrupts\n");
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "invalid IGS\n");
}
return riscv_iommu_init(iommu);
@@ -71,7 +131,18 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
static void riscv_iommu_platform_remove(struct platform_device *pdev)
{
- riscv_iommu_remove(dev_get_drvdata(&pdev->dev));
+ struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev);
+ bool msi = !(iommu->fctl & RISCV_IOMMU_FCTL_WSI);
+
+ riscv_iommu_remove(iommu);
+
+ if (msi)
+ platform_device_msi_free_irqs_all(&pdev->dev);
+};
+
+static void riscv_iommu_platform_shutdown(struct platform_device *pdev)
+{
+ riscv_iommu_disable(dev_get_drvdata(&pdev->dev));
};
static const struct of_device_id riscv_iommu_of_match[] = {
@@ -82,6 +153,7 @@ static const struct of_device_id riscv_iommu_of_match[] = {
static struct platform_driver riscv_iommu_platform_driver = {
.probe = riscv_iommu_platform_probe,
.remove = riscv_iommu_platform_remove,
+ .shutdown = riscv_iommu_platform_shutdown,
.driver = {
.name = "riscv,iommu",
.of_match_table = riscv_iommu_of_match,
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 8a05def774bd..8f049d4a0e2c 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -240,6 +240,12 @@ static int riscv_iommu_queue_enable(struct riscv_iommu_device *iommu,
return rc;
}
+ /* Empty queue before enabling it */
+ if (queue->qid == RISCV_IOMMU_INTR_CQ)
+ riscv_iommu_writel(queue->iommu, Q_TAIL(queue), 0);
+ else
+ riscv_iommu_writel(queue->iommu, Q_HEAD(queue), 0);
+
/*
* Enable queue with interrupts, clear any memory fault if any.
* Wait for the hardware to acknowledge request and activate queue
@@ -645,9 +651,11 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm
* This is best effort IOMMU translation shutdown flow.
* Disable IOMMU without waiting for hardware response.
*/
-static void riscv_iommu_disable(struct riscv_iommu_device *iommu)
+void riscv_iommu_disable(struct riscv_iommu_device *iommu)
{
- riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, 0);
+ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP,
+ FIELD_PREP(RISCV_IOMMU_DDTP_IOMMU_MODE,
+ RISCV_IOMMU_DDTP_IOMMU_MODE_BARE));
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQCSR, 0);
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FQCSR, 0);
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_PQCSR, 0);
@@ -1270,7 +1278,7 @@ static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
dma_addr_t iova)
{
struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain);
- unsigned long pte_size;
+ size_t pte_size;
unsigned long *ptr;
ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size);
diff --git a/drivers/iommu/riscv/iommu.h b/drivers/iommu/riscv/iommu.h
index b1c4664542b4..46df79dd5495 100644
--- a/drivers/iommu/riscv/iommu.h
+++ b/drivers/iommu/riscv/iommu.h
@@ -64,6 +64,7 @@ struct riscv_iommu_device {
int riscv_iommu_init(struct riscv_iommu_device *iommu);
void riscv_iommu_remove(struct riscv_iommu_device *iommu);
+void riscv_iommu_disable(struct riscv_iommu_device *iommu);
#define riscv_iommu_readl(iommu, addr) \
readl_relaxed((iommu)->reg + (addr))
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4b369419b32c..323cc665c357 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -25,6 +25,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include "iommu-pages.h"
@@ -611,7 +612,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
dev_err(iommu->dev, "Page fault at %pad of type %s\n",
&iova,
- (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+ str_write_read(flags == IOMMU_FAULT_WRITE));
log_iova(iommu, i, iova);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 9bee02db1643..be063bfb50c4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -534,8 +534,9 @@ config LS1X_IRQ
Support for the Loongson-1 platform Interrupt Controller.
config TI_SCI_INTR_IRQCHIP
- bool
+ tristate "TI SCI INTR Interrupt Controller"
depends on TI_SCI_PROTOCOL
+ depends on ARCH_K3 || COMPILE_TEST
select IRQ_DOMAIN_HIERARCHY
help
This enables the irqchip driver support for K3 Interrupt router
@@ -544,8 +545,9 @@ config TI_SCI_INTR_IRQCHIP
TI System Controller, say Y here. Otherwise, say N.
config TI_SCI_INTA_IRQCHIP
- bool
+ tristate "TI SCI INTA Interrupt Controller"
depends on TI_SCI_PROTOCOL
+ depends on ARCH_K3 || (COMPILE_TEST && ARM64)
select IRQ_DOMAIN_HIERARCHY
select TI_SCI_INTA_MSI_DOMAIN
help
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index c988886917f7..db4c9721fcf2 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -61,32 +61,6 @@ struct brcmstb_l2_intc_data {
u32 saved_mask; /* for suspend/resume */
};
-/**
- * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
- * @d: irq_data
- *
- * Chip has separate enable/disable registers instead of a single mask
- * register and pending interrupt is acknowledged by setting a bit.
- *
- * Note: This function is generic and could easily be added to the
- * generic irqchip implementation if there ever becomes a will to do so.
- * Perhaps with a name like irq_gc_mask_disable_and_ack_set().
- *
- * e.g.: https://patchwork.kernel.org/patch/9831047/
- */
-static void brcmstb_l2_mask_and_ack(struct irq_data *d)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct irq_chip_type *ct = irq_data_get_chip_type(d);
- u32 mask = d->mask;
-
- irq_gc_lock(gc);
- irq_reg_writel(gc, mask, ct->regs.disable);
- *ct->mask_cache &= ~mask;
- irq_reg_writel(gc, mask, ct->regs.ack);
- irq_gc_unlock(gc);
-}
-
static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
{
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
@@ -248,7 +222,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
if (init_params->cpu_clear >= 0) {
ct->regs.ack = init_params->cpu_clear;
ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
} else {
/* No Ack - but still slightly more efficient to define this */
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 92244cfa0464..8c3ec5734f1e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2045,7 +2045,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
if (!is_v4(its_dev->its))
return -EINVAL;
- guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
+ guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
/* Unmap request? */
if (!info)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 79d8cc80693c..76dce0aac246 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1522,7 +1522,7 @@ static int gic_retrigger(struct irq_data *data)
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
- if (cmd == CPU_PM_EXIT) {
+ if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) {
if (gic_dist_security_disabled())
gic_enable_redist(true);
gic_cpu_sys_reg_enable();
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 808c781e2548..37e1a03fcbb4 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -141,18 +141,11 @@ static int keystone_irq_probe(struct platform_device *pdev)
if (!kirq)
return -ENOMEM;
- kirq->devctrl_regs =
- syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ kirq->devctrl_regs = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev",
+ 1, &kirq->devctrl_offset);
if (IS_ERR(kirq->devctrl_regs))
return PTR_ERR(kirq->devctrl_regs);
- ret = of_property_read_u32_index(np, "ti,syscon-dev", 1,
- &kirq->devctrl_offset);
- if (ret) {
- dev_err(dev, "couldn't read the devctrl_offset offset!\n");
- return ret;
- }
-
kirq->irq = platform_get_irq(pdev, 0);
if (kirq->irq < 0)
return kirq->irq;
diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c
index 0f6e465dd309..80e55955a29f 100644
--- a/drivers/irqchip/irq-loongarch-avec.c
+++ b/drivers/irqchip/irq-loongarch-avec.c
@@ -56,6 +56,15 @@ struct avecintc_data {
unsigned int moving;
};
+static inline void avecintc_enable(void)
+{
+ u64 value;
+
+ value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+ value |= IOCSR_MISC_FUNC_AVEC_EN;
+ iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
+}
+
static inline void avecintc_ack_irq(struct irq_data *d)
{
}
@@ -127,6 +136,8 @@ static int avecintc_cpu_online(unsigned int cpu)
guard(raw_spinlock)(&loongarch_avec.lock);
+ avecintc_enable();
+
irq_matrix_online(loongarch_avec.vector_matrix);
pending_list_init(cpu);
@@ -339,7 +350,6 @@ static int __init irq_matrix_init(void)
static int __init avecintc_init(struct irq_domain *parent)
{
int ret, parent_irq;
- unsigned long value;
raw_spin_lock_init(&loongarch_avec.lock);
@@ -378,9 +388,7 @@ static int __init avecintc_init(struct irq_domain *parent)
"irqchip/loongarch/avecintc:starting",
avecintc_cpu_online, avecintc_cpu_offline);
#endif
- value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
- value |= IOCSR_MISC_FUNC_AVEC_EN;
- iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
+ avecintc_enable();
return ret;
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index bb92fd85e975..0b4312152024 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -186,7 +186,8 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
+ IRQCHIP_SKIP_SET_WAKE;
gc->chip_types[0].regs.ack = reg_offs->pend;
gc->chip_types[0].regs.mask = reg_offs->enable;
gc->chip_types[0].regs.type = reg_offs->ctrl;
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index b83f5cbab123..a887efba262c 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -743,3 +743,4 @@ module_platform_driver(ti_sci_inta_irq_domain_driver);
MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>");
MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index c027cd9e4a69..b49a73106c69 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -303,3 +303,4 @@ module_platform_driver(ti_sci_intr_irq_domain_driver);
MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index cc219f28d317..960c343d5781 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -52,7 +52,7 @@ static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
- seq_printf(p, "%s", dev_name(&data->pdev->dev));
+ seq_puts(p, dev_name(&data->pdev->dev));
}
static const struct irq_chip ts4800_chip = {
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 1eeb0d0156ce..0ee7b6b71f5f 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -35,11 +35,10 @@ void __init irqchip_init(void)
int platform_irqchip_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *par_np = of_irq_find_parent(np);
+ struct device_node *par_np __free(device_node) = of_irq_find_parent(np);
of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
if (!irq_init_cb) {
- of_node_put(par_np);
return -EINVAL;
}
@@ -55,7 +54,6 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller can check for specific domains as necessary.
*/
if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
- of_node_put(par_np);
return -EPROBE_DEFER;
}
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index e34a7a46754e..8ec2d4d4f135 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -294,20 +294,6 @@ get_Bprotocol4mask(u_int m)
return NULL;
}
-struct Bprotocol *
-get_Bprotocol4id(u_int id)
-{
- u_int m;
-
- if (id < ISDN_P_B_START || id > 63) {
- printk(KERN_WARNING "%s id not in range %d\n",
- __func__, id);
- return NULL;
- }
- m = 1 << (id & ISDN_P_B_MASK);
- return get_Bprotocol4mask(m);
-}
-
int
mISDN_register_Bprotocol(struct Bprotocol *bp)
{
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
index 42599f49c189..5617c06de8e4 100644
--- a/drivers/isdn/mISDN/core.h
+++ b/drivers/isdn/mISDN/core.h
@@ -55,7 +55,6 @@ extern void __add_layer2(struct mISDNchannel *, struct mISDNstack *);
extern u_int get_all_Bprotocols(void);
struct Bprotocol *get_Bprotocol4mask(u_int);
-struct Bprotocol *get_Bprotocol4id(u_int);
extern int mISDN_inittimer(u_int *);
extern void mISDN_timer_cleanup(void);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b784bb74a837..2b27d043921c 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -217,6 +217,8 @@ config LEDS_TURRIS_OMNIA
depends on I2C
depends on MACH_ARMADA_38X || COMPILE_TEST
depends on OF
+ depends on TURRIS_OMNIA_MCU
+ depends on TURRIS_OMNIA_MCU_GPIO
select LEDS_TRIGGERS
help
This option enables basic support for the LEDs found on the front
@@ -511,6 +513,18 @@ config LEDS_LP8860
on the LP8860 4 channel LED driver using the I2C communication
bus.
+config LEDS_LP8864
+ tristate "LED support for the TI LP8864/LP8866 4/6 channel LED drivers"
+ depends on LEDS_CLASS && I2C && OF
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the TI LP8864-Q1,
+ LP8864S-Q1, LP8866-Q1, LP8866S-Q1 4/6 channel LED backlight
+ drivers with I2C interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp8864.
+
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
depends on LEDS_CLASS && BROKEN
@@ -580,6 +594,17 @@ config LEDS_PCA995X
LED driver chips accessed via the I2C bus. Supported
devices include PCA9955BTW, PCA9952TW and PCA9955TW.
+config LEDS_QNAP_MCU
+ tristate "LED Support for QNAP MCU controllers"
+ depends on LEDS_CLASS
+ depends on MFD_QNAP_MCU
+ help
+ This option enables support for LEDs available on embedded
+ controllers used in QNAP NAS devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called qnap-mcu-leds.
+
config LEDS_WM831X_STATUS
tristate "LED support for status LEDs on WM831x PMICs"
depends on LEDS_CLASS
@@ -815,6 +840,15 @@ config LEDS_SC27XX_BLTC
This driver can also be built as a module. If so the module will be
called leds-sc27xx-bltc.
+config LEDS_UPBOARD
+ tristate "LED support for the UP board"
+ depends on LEDS_CLASS && MFD_UPBOARD_FPGA
+ help
+ This option enables support for the UP board LEDs.
+
+ This driver can also be built as a module. If so the module will be
+ called leds-upboard.
+
comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
config LEDS_BLINKM
@@ -931,6 +965,16 @@ config LEDS_LM36274
Say Y to enable the LM36274 LED driver for TI LMU devices.
This supports the LED device LM36274.
+config LEDS_ST1202
+ tristate "LED Support for STMicroelectronics LED1202 I2C chips"
+ depends on LEDS_CLASS
+ depends on I2C
+ depends on OF
+ select LEDS_TRIGGERS
+ help
+ Say Y to enable support for LEDs connected to LED1202
+ LED driver chips accessed via the I2C bus.
+
config LEDS_TPS6105X
tristate "LED support for TI TPS6105X"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 18afbb5a23ee..6ad52e219ec6 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
obj-$(CONFIG_LEDS_LP8860) += leds-lp8860.o
+obj-$(CONFIG_LEDS_LP8864) += leds-lp8864.o
obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
obj-$(CONFIG_LEDS_MAX5970) += leds-max5970.o
obj-$(CONFIG_LEDS_MAX77650) += leds-max77650.o
@@ -79,8 +80,10 @@ obj-$(CONFIG_LEDS_PCA995X) += leds-pca995x.o
obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
+obj-$(CONFIG_LEDS_QNAP_MCU) += leds-qnap-mcu.o
obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
+obj-$(CONFIG_LEDS_ST1202) += leds-st1202.o
obj-$(CONFIG_LEDS_SUN50I_A100) += leds-sun50i-a100.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
@@ -89,6 +92,7 @@ obj-$(CONFIG_LEDS_TI_LMU_COMMON) += leds-ti-lmu-common.o
obj-$(CONFIG_LEDS_TLC591XX) += leds-tlc591xx.o
obj-$(CONFIG_LEDS_TPS6105X) += leds-tps6105x.o
obj-$(CONFIG_LEDS_TURRIS_OMNIA) += leds-turris-omnia.o
+obj-$(CONFIG_LEDS_UPBOARD) += leds-upboard.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 2a04ac61574d..c20ac8ccf52b 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -85,13 +85,13 @@ static ssize_t max_brightness_show(struct device *dev,
static DEVICE_ATTR_RO(max_brightness);
#ifdef CONFIG_LEDS_TRIGGERS
-static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
-static struct bin_attribute *led_trigger_bin_attrs[] = {
+static const BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
+static const struct bin_attribute *const led_trigger_bin_attrs[] = {
&bin_attr_trigger,
NULL,
};
static const struct attribute_group led_trigger_group = {
- .bin_attrs = led_trigger_bin_attrs,
+ .bin_attrs_new = led_trigger_bin_attrs,
};
#endif
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 78eb20093b2c..b2d40f87a5ff 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -34,7 +34,7 @@ trigger_relevant(struct led_classdev *led_cdev, struct led_trigger *trig)
}
ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -123,7 +123,7 @@ static int led_trigger_format(char *buf, size_t size,
* copy it.
*/
ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
diff --git a/drivers/leds/leds-cht-wcove.c b/drivers/leds/leds-cht-wcove.c
index 8246f048edcb..9a609dd5acdc 100644
--- a/drivers/leds/leds-cht-wcove.c
+++ b/drivers/leds/leds-cht-wcove.c
@@ -394,7 +394,7 @@ static int cht_wc_leds_probe(struct platform_device *pdev)
led->cdev.pattern_clear = cht_wc_leds_pattern_clear;
led->cdev.max_brightness = 255;
- ret = led_classdev_register(&pdev->dev, &led->cdev);
+ ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
return ret;
}
@@ -406,10 +406,6 @@ static int cht_wc_leds_probe(struct platform_device *pdev)
static void cht_wc_leds_remove(struct platform_device *pdev)
{
struct cht_wc_leds *leds = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < CHT_WC_LED_COUNT; i++)
- led_classdev_unregister(&leds->leds[i].cdev);
/* Restore LED1 regs if hw-control was active else leave LED1 off */
if (!(leds->led1_initial_regs.ctrl & CHT_WC_LED1_SWCTL))
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 7a136fd81720..06196d851ade 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -265,7 +265,7 @@ static int lp8860_init(struct lp8860_led *led)
goto out;
}
- reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs) / sizeof(lp8860_eeprom_disp_regs[0]);
+ reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs);
for (i = 0; i < reg_count; i++) {
ret = regmap_write(led->eeprom_regmap,
lp8860_eeprom_disp_regs[i].reg,
diff --git a/drivers/leds/leds-lp8864.c b/drivers/leds/leds-lp8864.c
new file mode 100644
index 000000000000..3afd729d2f8a
--- /dev/null
+++ b/drivers/leds/leds-lp8864.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI LP8864/LP8866 4/6 Channel LED Driver
+ *
+ * Copyright (C) 2024 Siemens AG
+ *
+ * Based on LP8860 driver by Dan Murphy <dmurphy@ti.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define LP8864_BRT_CONTROL 0x00
+#define LP8864_USER_CONFIG1 0x04
+#define LP8864_BRT_MODE_MASK GENMASK(9, 8)
+#define LP8864_BRT_MODE_REG BIT(9) /* Brightness control by DISPLAY_BRT reg */
+#define LP8864_SUPPLY_STATUS 0x0e
+#define LP8864_BOOST_STATUS 0x10
+#define LP8864_LED_STATUS 0x12
+#define LP8864_LED_STATUS_WR_MASK GENMASK(14, 9) /* Writeable bits in the LED_STATUS reg */
+
+/* Textual meaning for status bits, starting from bit 1 */
+static const char *const lp8864_supply_status_msg[] = {
+ "Vin under-voltage fault",
+ "Vin over-voltage fault",
+ "Vdd under-voltage fault",
+ "Vin over-current fault",
+ "Missing charge pump fault",
+ "Charge pump fault",
+ "Missing boost sync fault",
+ "CRC error fault ",
+};
+
+/* Textual meaning for status bits, starting from bit 1 */
+static const char *const lp8864_boost_status_msg[] = {
+ "Boost OVP low fault",
+ "Boost OVP high fault",
+ "Boost over-current fault",
+ "Missing boost FSET resistor fault",
+ "Missing MODE SEL resistor fault",
+ "Missing LED resistor fault",
+ "ISET resistor short to ground fault",
+ "Thermal shutdown fault",
+};
+
+/* Textual meaning for every register bit */
+static const char *const lp8864_led_status_msg[] = {
+ "LED 1 fault",
+ "LED 2 fault",
+ "LED 3 fault",
+ "LED 4 fault",
+ "LED 5 fault",
+ "LED 6 fault",
+ "LED open fault",
+ "LED internal short fault",
+ "LED short to GND fault",
+ NULL, NULL, NULL,
+ "Invalid string configuration fault",
+ NULL,
+ "I2C time out fault",
+};
+
+/**
+ * struct lp8864_led
+ * @client: Pointer to the I2C client
+ * @led_dev: led class device pointer
+ * @regmap: Devices register map
+ * @led_status_mask: Helps to report LED fault only once
+ */
+struct lp8864_led {
+ struct i2c_client *client;
+ struct led_classdev led_dev;
+ struct regmap *regmap;
+ u16 led_status_mask;
+};
+
+static int lp8864_fault_check(struct lp8864_led *led)
+{
+ int ret, i;
+ unsigned int val;
+
+ ret = regmap_read(led->regmap, LP8864_SUPPLY_STATUS, &val);
+ if (ret)
+ goto err;
+
+ /* Odd bits are status bits, even bits are clear bits */
+ for (i = 0; i < ARRAY_SIZE(lp8864_supply_status_msg); i++)
+ if (val & BIT(i * 2 + 1))
+ dev_warn(&led->client->dev, "%s\n", lp8864_supply_status_msg[i]);
+
+ /*
+ * Clear bits have an index preceding the corresponding Status bits;
+ * both have to be written "1" simultaneously to clear the corresponding
+ * Status bit.
+ */
+ if (val)
+ ret = regmap_write(led->regmap, LP8864_SUPPLY_STATUS, val >> 1 | val);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(led->regmap, LP8864_BOOST_STATUS, &val);
+ if (ret)
+ goto err;
+
+ /* Odd bits are status bits, even bits are clear bits */
+ for (i = 0; i < ARRAY_SIZE(lp8864_boost_status_msg); i++)
+ if (val & BIT(i * 2 + 1))
+ dev_warn(&led->client->dev, "%s\n", lp8864_boost_status_msg[i]);
+
+ if (val)
+ ret = regmap_write(led->regmap, LP8864_BOOST_STATUS, val >> 1 | val);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(led->regmap, LP8864_LED_STATUS, &val);
+ if (ret)
+ goto err;
+
+ /*
+ * Clear already reported faults that maintain their value until device
+ * power-down
+ */
+ val &= ~led->led_status_mask;
+
+ for (i = 0; i < ARRAY_SIZE(lp8864_led_status_msg); i++)
+ if (lp8864_led_status_msg[i] && val & BIT(i))
+ dev_warn(&led->client->dev, "%s\n", lp8864_led_status_msg[i]);
+
+ /*
+ * Mark those which maintain their value until device power-down as
+ * "already reported"
+ */
+ led->led_status_mask |= val & ~LP8864_LED_STATUS_WR_MASK;
+
+ /*
+ * Only bits 14, 12, 10 have to be cleared here, but others are RO,
+ * we don't care what we write to them.
+ */
+ if (val & LP8864_LED_STATUS_WR_MASK)
+ ret = regmap_write(led->regmap, LP8864_LED_STATUS, val >> 1 | val);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&led->client->dev, "Failed to read/clear faults (%pe)\n", ERR_PTR(ret));
+
+ return ret;
+}
+
+static int lp8864_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lp8864_led *led = container_of(led_cdev, struct lp8864_led, led_dev);
+ /* Scale 0..LED_FULL into 16-bit HW brightness */
+ unsigned int val = brt_val * 0xffff / LED_FULL;
+ int ret;
+
+ ret = lp8864_fault_check(led);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(led->regmap, LP8864_BRT_CONTROL, val);
+ if (ret)
+ dev_err(&led->client->dev, "Failed to write brightness value\n");
+
+ return ret;
+}
+
+static enum led_brightness lp8864_brightness_get(struct led_classdev *led_cdev)
+{
+ struct lp8864_led *led = container_of(led_cdev, struct lp8864_led, led_dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(led->regmap, LP8864_BRT_CONTROL, &val);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to read brightness value\n");
+ return ret;
+ }
+
+ /* Scale 16-bit HW brightness into 0..LED_FULL */
+ return val * LED_FULL / 0xffff;
+}
+
+static const struct regmap_config lp8864_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static void lp8864_disable_gpio(void *data)
+{
+ struct gpio_desc *gpio = data;
+
+ gpiod_set_value(gpio, 0);
+}
+
+static int lp8864_probe(struct i2c_client *client)
+{
+ int ret;
+ struct lp8864_led *led;
+ struct device_node *np = dev_of_node(&client->dev);
+ struct device_node *child_node;
+ struct led_init_data init_data = {};
+ struct gpio_desc *enable_gpio;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ child_node = of_get_next_available_child(np, NULL);
+ if (!child_node) {
+ dev_err(&client->dev, "No LED function defined\n");
+ return -EINVAL;
+ }
+
+ ret = devm_regulator_get_enable_optional(&client->dev, "vled");
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&client->dev, ret, "Failed to enable vled regulator\n");
+
+ enable_gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(enable_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(enable_gpio),
+ "Failed to get enable GPIO\n");
+
+ ret = devm_add_action_or_reset(&client->dev, lp8864_disable_gpio, enable_gpio);
+ if (ret)
+ return ret;
+
+ led->client = client;
+ led->led_dev.brightness_set_blocking = lp8864_brightness_set;
+ led->led_dev.brightness_get = lp8864_brightness_get;
+
+ led->regmap = devm_regmap_init_i2c(client, &lp8864_regmap_config);
+ if (IS_ERR(led->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(led->regmap),
+ "Failed to allocate regmap\n");
+
+ /* Control brightness by DISPLAY_BRT register */
+ ret = regmap_update_bits(led->regmap, LP8864_USER_CONFIG1, LP8864_BRT_MODE_MASK,
+ LP8864_BRT_MODE_REG);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to set brightness control mode\n");
+ return ret;
+ }
+
+ ret = lp8864_fault_check(led);
+ if (ret)
+ return ret;
+
+ init_data.fwnode = of_fwnode_handle(child_node);
+ init_data.devicename = "lp8864";
+ init_data.default_label = ":display_cluster";
+
+ ret = devm_led_classdev_register_ext(&client->dev, &led->led_dev, &init_data);
+ if (ret)
+ dev_err(&client->dev, "Failed to register LED device (%pe)\n", ERR_PTR(ret));
+
+ return ret;
+}
+
+static const struct i2c_device_id lp8864_id[] = {
+ { "lp8864" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lp8864_id);
+
+static const struct of_device_id of_lp8864_leds_match[] = {
+ { .compatible = "ti,lp8864" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_lp8864_leds_match);
+
+static struct i2c_driver lp8864_driver = {
+ .driver = {
+ .name = "lp8864",
+ .of_match_table = of_lp8864_leds_match,
+ },
+ .probe = lp8864_probe,
+ .id_table = lp8864_id,
+};
+module_i2c_driver(lp8864_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8864/LP8866 LED driver");
+MODULE_AUTHOR("Alexander Sverdlin <alexander.sverdlin@siemens.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index af5a908b8d9e..e95287416ef8 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -439,6 +439,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
}
gpio_ext_pdev = of_find_device_by_node(gpio_ext_np);
if (!gpio_ext_pdev) {
+ of_node_put(gpio_ext_np);
dev_err(dev, "Failed to find platform device for gpio-ext\n");
return -ENODEV;
}
diff --git a/drivers/leds/leds-qnap-mcu.c b/drivers/leds/leds-qnap-mcu.c
new file mode 100644
index 000000000000..4e4709456261
--- /dev/null
+++ b/drivers/leds/leds-qnap-mcu.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for LEDs found on QNAP MCU devices
+ *
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/leds.h>
+#include <linux/mfd/qnap-mcu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
+
+enum qnap_mcu_err_led_mode {
+ QNAP_MCU_ERR_LED_ON = 0,
+ QNAP_MCU_ERR_LED_OFF = 1,
+ QNAP_MCU_ERR_LED_BLINK_FAST = 2,
+ QNAP_MCU_ERR_LED_BLINK_SLOW = 3,
+};
+
+struct qnap_mcu_err_led {
+ struct qnap_mcu *mcu;
+ struct led_classdev cdev;
+ char name[LED_MAX_NAME_SIZE];
+ u8 num;
+ u8 mode;
+};
+
+static inline struct qnap_mcu_err_led *
+ cdev_to_qnap_mcu_err_led(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct qnap_mcu_err_led, cdev);
+}
+
+static int qnap_mcu_err_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct qnap_mcu_err_led *err_led = cdev_to_qnap_mcu_err_led(led_cdev);
+ u8 cmd[] = { '@', 'R', '0' + err_led->num, '0' };
+
+ /* Don't disturb a possible set blink-mode if LED stays on */
+ if (brightness != 0 && err_led->mode >= QNAP_MCU_ERR_LED_BLINK_FAST)
+ return 0;
+
+ err_led->mode = brightness ? QNAP_MCU_ERR_LED_ON : QNAP_MCU_ERR_LED_OFF;
+ cmd[3] = '0' + err_led->mode;
+
+ return qnap_mcu_exec_with_ack(err_led->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_err_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct qnap_mcu_err_led *err_led = cdev_to_qnap_mcu_err_led(led_cdev);
+ u8 cmd[] = { '@', 'R', '0' + err_led->num, '0' };
+
+ /* LED is off, nothing to do */
+ if (err_led->mode == QNAP_MCU_ERR_LED_OFF)
+ return 0;
+
+ if (*delay_on < 500) {
+ *delay_on = 100;
+ *delay_off = 100;
+ err_led->mode = QNAP_MCU_ERR_LED_BLINK_FAST;
+ } else {
+ *delay_on = 500;
+ *delay_off = 500;
+ err_led->mode = QNAP_MCU_ERR_LED_BLINK_SLOW;
+ }
+
+ cmd[3] = '0' + err_led->mode;
+
+ return qnap_mcu_exec_with_ack(err_led->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_register_err_led(struct device *dev, struct qnap_mcu *mcu, int num_err_led)
+{
+ struct qnap_mcu_err_led *err_led;
+ int ret;
+
+ err_led = devm_kzalloc(dev, sizeof(*err_led), GFP_KERNEL);
+ if (!err_led)
+ return -ENOMEM;
+
+ err_led->mcu = mcu;
+ err_led->num = num_err_led;
+ err_led->mode = QNAP_MCU_ERR_LED_OFF;
+
+ scnprintf(err_led->name, LED_MAX_NAME_SIZE, "hdd%d:red:status", num_err_led + 1);
+ err_led->cdev.name = err_led->name;
+
+ err_led->cdev.brightness_set_blocking = qnap_mcu_err_led_set;
+ err_led->cdev.blink_set = qnap_mcu_err_led_blink_set;
+ err_led->cdev.brightness = 0;
+ err_led->cdev.max_brightness = 1;
+
+ ret = devm_led_classdev_register(dev, &err_led->cdev);
+ if (ret)
+ return ret;
+
+ return qnap_mcu_err_led_set(&err_led->cdev, 0);
+}
+
+enum qnap_mcu_usb_led_mode {
+ QNAP_MCU_USB_LED_ON = 1,
+ QNAP_MCU_USB_LED_OFF = 3,
+ QNAP_MCU_USB_LED_BLINK = 2,
+};
+
+struct qnap_mcu_usb_led {
+ struct qnap_mcu *mcu;
+ struct led_classdev cdev;
+ u8 mode;
+};
+
+static inline struct qnap_mcu_usb_led *
+ cdev_to_qnap_mcu_usb_led(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct qnap_mcu_usb_led, cdev);
+}
+
+static int qnap_mcu_usb_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct qnap_mcu_usb_led *usb_led = cdev_to_qnap_mcu_usb_led(led_cdev);
+ u8 cmd[] = { '@', 'C', 0 };
+
+ /* Don't disturb a possible set blink-mode if LED stays on */
+ if (brightness != 0 && usb_led->mode == QNAP_MCU_USB_LED_BLINK)
+ return 0;
+
+ usb_led->mode = brightness ? QNAP_MCU_USB_LED_ON : QNAP_MCU_USB_LED_OFF;
+
+ /*
+ * Byte 3 is shared between the usb led target on/off/blink
+ * and also the buzzer control (in the input driver)
+ */
+ cmd[2] = 'D' + usb_led->mode;
+
+ return qnap_mcu_exec_with_ack(usb_led->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_usb_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct qnap_mcu_usb_led *usb_led = cdev_to_qnap_mcu_usb_led(led_cdev);
+ u8 cmd[] = { '@', 'C', 0 };
+
+ /* LED is off, nothing to do */
+ if (usb_led->mode == QNAP_MCU_USB_LED_OFF)
+ return 0;
+
+ *delay_on = 250;
+ *delay_off = 250;
+ usb_led->mode = QNAP_MCU_USB_LED_BLINK;
+
+ /*
+ * Byte 3 is shared between the USB LED target on/off/blink
+ * and also the buzzer control (in the input driver)
+ */
+ cmd[2] = 'D' + usb_led->mode;
+
+ return qnap_mcu_exec_with_ack(usb_led->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_register_usb_led(struct device *dev, struct qnap_mcu *mcu)
+{
+ struct qnap_mcu_usb_led *usb_led;
+ int ret;
+
+ usb_led = devm_kzalloc(dev, sizeof(*usb_led), GFP_KERNEL);
+ if (!usb_led)
+ return -ENOMEM;
+
+ usb_led->mcu = mcu;
+ usb_led->mode = QNAP_MCU_USB_LED_OFF;
+ usb_led->cdev.name = "usb:blue:disk";
+ usb_led->cdev.brightness_set_blocking = qnap_mcu_usb_led_set;
+ usb_led->cdev.blink_set = qnap_mcu_usb_led_blink_set;
+ usb_led->cdev.brightness = 0;
+ usb_led->cdev.max_brightness = 1;
+
+ ret = devm_led_classdev_register(dev, &usb_led->cdev);
+ if (ret)
+ return ret;
+
+ return qnap_mcu_usb_led_set(&usb_led->cdev, 0);
+}
+
+static int qnap_mcu_leds_probe(struct platform_device *pdev)
+{
+ struct qnap_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
+ const struct qnap_mcu_variant *variant = pdev->dev.platform_data;
+ int ret;
+
+ for (int i = 0; i < variant->num_drives; i++) {
+ ret = qnap_mcu_register_err_led(&pdev->dev, mcu, i);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register error LED %d\n", i);
+ }
+
+ if (variant->usb_led) {
+ ret = qnap_mcu_register_usb_led(&pdev->dev, mcu);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register USB LED\n");
+ }
+
+ return 0;
+}
+
+static struct platform_driver qnap_mcu_leds_driver = {
+ .probe = qnap_mcu_leds_probe,
+ .driver = {
+ .name = "qnap-mcu-leds",
+ },
+};
+module_platform_driver(qnap_mcu_leds_driver);
+
+MODULE_ALIAS("platform:qnap-mcu-leds");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("QNAP MCU LEDs driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-st1202.c b/drivers/leds/leds-st1202.c
new file mode 100644
index 000000000000..b691c4886993
--- /dev/null
+++ b/drivers/leds/leds-st1202.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LED driver for STMicroelectronics LED1202 chip
+ *
+ * Copyright (C) 2024 Remote-Tech Ltd. UK
+ */
+
+#include <linux/cleanup.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define ST1202_CHAN_DISABLE_ALL 0x00
+#define ST1202_CHAN_ENABLE_HIGH 0x03
+#define ST1202_CHAN_ENABLE_LOW 0x02
+#define ST1202_CONFIG_REG 0x04
+/* PATS: Pattern sequence feature enable */
+#define ST1202_CONFIG_REG_PATS BIT(7)
+/* PATSR: Pattern sequence runs (self-clear when sequence is finished) */
+#define ST1202_CONFIG_REG_PATSR BIT(6)
+#define ST1202_CONFIG_REG_SHFT BIT(3)
+#define ST1202_DEV_ENABLE 0x01
+#define ST1202_DEV_ENABLE_ON BIT(0)
+#define ST1202_DEV_ENABLE_RESET BIT(7)
+#define ST1202_DEVICE_ID 0x00
+#define ST1202_ILED_REG0 0x09
+#define ST1202_MAX_LEDS 12
+#define ST1202_MAX_PATTERNS 8
+#define ST1202_MILLIS_PATTERN_DUR_MAX 5660
+#define ST1202_MILLIS_PATTERN_DUR_MIN 22
+#define ST1202_PATTERN_DUR 0x16
+#define ST1202_PATTERN_PWM 0x1E
+#define ST1202_PATTERN_REP 0x15
+
+struct st1202_led {
+ struct fwnode_handle *fwnode;
+ struct led_classdev led_cdev;
+ struct st1202_chip *chip;
+ bool is_active;
+ int led_num;
+};
+
+struct st1202_chip {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct st1202_led leds[ST1202_MAX_LEDS];
+};
+
+static struct st1202_led *cdev_to_st1202_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct st1202_led, led_cdev);
+}
+
+static int st1202_read_reg(struct st1202_chip *chip, int reg, uint8_t *val)
+{
+ struct device *dev = &chip->client->dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read register [0x%x]: %d\n", reg, ret);
+ return ret;
+ }
+
+ *val = (uint8_t)ret;
+ return 0;
+}
+
+static int st1202_write_reg(struct st1202_chip *chip, int reg, uint8_t val)
+{
+ struct device *dev = &chip->client->dev;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+ if (ret != 0)
+ dev_err(dev, "Failed to write %d to register [0x%x]: %d\n", val, reg, ret);
+
+ return ret;
+}
+
+static uint8_t st1202_prescalar_to_miliseconds(unsigned int value)
+{
+ return value / ST1202_MILLIS_PATTERN_DUR_MIN - 1;
+}
+
+static int st1202_pwm_pattern_write(struct st1202_chip *chip, int led_num,
+ int pattern, unsigned int value)
+{
+ u8 value_l, value_h;
+ int ret;
+
+ value_l = (u8)value;
+ value_h = (u8)(value >> 8);
+
+ /*
+ * Datasheet: Register address low = 1Eh + 2*(xh) + 18h*(yh),
+ * where x is the channel number (led number) in hexadecimal (x = 00h .. 0Bh)
+ * and y is the pattern number in hexadecimal (y = 00h .. 07h)
+ */
+ ret = st1202_write_reg(chip, (ST1202_PATTERN_PWM + (led_num * 2) + 0x18 * pattern),
+ value_l);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Datasheet: Register address high = 1Eh + 01h + 2(xh) +18h*(yh),
+ * where x is the channel number in hexadecimal (x = 00h .. 0Bh)
+ * and y is the pattern number in hexadecimal (y = 00h .. 07h)
+ */
+ ret = st1202_write_reg(chip, (ST1202_PATTERN_PWM + 0x1 + (led_num * 2) + 0x18 * pattern),
+ value_h);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static int st1202_duration_pattern_write(struct st1202_chip *chip, int pattern,
+ unsigned int value)
+{
+ return st1202_write_reg(chip, (ST1202_PATTERN_DUR + pattern),
+ st1202_prescalar_to_miliseconds(value));
+}
+
+static void st1202_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct st1202_led *led = cdev_to_st1202_led(led_cdev);
+ struct st1202_chip *chip = led->chip;
+
+ guard(mutex)(&chip->lock);
+
+ st1202_write_reg(chip, ST1202_ILED_REG0 + led->led_num, value);
+}
+
+static enum led_brightness st1202_brightness_get(struct led_classdev *led_cdev)
+{
+ struct st1202_led *led = cdev_to_st1202_led(led_cdev);
+ struct st1202_chip *chip = led->chip;
+ u8 value = 0;
+
+ guard(mutex)(&chip->lock);
+
+ st1202_read_reg(chip, ST1202_ILED_REG0 + led->led_num, &value);
+
+ return value;
+}
+
+static int st1202_channel_set(struct st1202_chip *chip, int led_num, bool active)
+{
+ u8 chan_low, chan_high;
+ int ret;
+
+ guard(mutex)(&chip->lock);
+
+ if (led_num <= 7) {
+ ret = st1202_read_reg(chip, ST1202_CHAN_ENABLE_LOW, &chan_low);
+ if (ret < 0)
+ return ret;
+
+ chan_low = active ? chan_low | BIT(led_num) : chan_low & ~BIT(led_num);
+
+ ret = st1202_write_reg(chip, ST1202_CHAN_ENABLE_LOW, chan_low);
+ if (ret < 0)
+ return ret;
+
+ } else {
+ ret = st1202_read_reg(chip, ST1202_CHAN_ENABLE_HIGH, &chan_high);
+ if (ret < 0)
+ return ret;
+
+ chan_high = active ? chan_high | (BIT(led_num) >> 8) :
+ chan_high & ~(BIT(led_num) >> 8);
+
+ ret = st1202_write_reg(chip, ST1202_CHAN_ENABLE_HIGH, chan_high);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int st1202_led_set(struct led_classdev *ldev, enum led_brightness value)
+{
+ struct st1202_led *led = cdev_to_st1202_led(ldev);
+ struct st1202_chip *chip = led->chip;
+
+ return st1202_channel_set(chip, led->led_num, value == LED_OFF ? false : true);
+}
+
+static int st1202_led_pattern_clear(struct led_classdev *ldev)
+{
+ struct st1202_led *led = cdev_to_st1202_led(ldev);
+ struct st1202_chip *chip = led->chip;
+ int ret;
+
+ guard(mutex)(&chip->lock);
+
+ for (int patt = 0; patt < ST1202_MAX_PATTERNS; patt++) {
+ ret = st1202_pwm_pattern_write(chip, led->led_num, patt, LED_OFF);
+ if (ret != 0)
+ return ret;
+
+ ret = st1202_duration_pattern_write(chip, patt, ST1202_MILLIS_PATTERN_DUR_MIN);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int st1202_led_pattern_set(struct led_classdev *ldev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ struct st1202_led *led = cdev_to_st1202_led(ldev);
+ struct st1202_chip *chip = led->chip;
+ int ret;
+
+ if (len > ST1202_MAX_PATTERNS)
+ return -EINVAL;
+
+ guard(mutex)(&chip->lock);
+
+ for (int patt = 0; patt < len; patt++) {
+ if (pattern[patt].delta_t < ST1202_MILLIS_PATTERN_DUR_MIN ||
+ pattern[patt].delta_t > ST1202_MILLIS_PATTERN_DUR_MAX)
+ return -EINVAL;
+
+ ret = st1202_pwm_pattern_write(chip, led->led_num, patt, pattern[patt].brightness);
+ if (ret != 0)
+ return ret;
+
+ ret = st1202_duration_pattern_write(chip, patt, pattern[patt].delta_t);
+ if (ret != 0)
+ return ret;
+ }
+
+ ret = st1202_write_reg(chip, ST1202_PATTERN_REP, repeat);
+ if (ret != 0)
+ return ret;
+
+ ret = st1202_write_reg(chip, ST1202_CONFIG_REG, (ST1202_CONFIG_REG_PATSR |
+ ST1202_CONFIG_REG_PATS | ST1202_CONFIG_REG_SHFT));
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static int st1202_dt_init(struct st1202_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+ struct st1202_led *led;
+ int err, reg;
+
+ for_each_available_child_of_node_scoped(dev_of_node(dev), child) {
+ struct led_init_data init_data = {};
+
+ err = of_property_read_u32(child, "reg", &reg);
+ if (err)
+ return dev_err_probe(dev, err, "Invalid register\n");
+
+ led = &chip->leds[reg];
+ led->is_active = true;
+ led->fwnode = of_fwnode_handle(child);
+
+ led->led_cdev.max_brightness = U8_MAX;
+ led->led_cdev.brightness_set_blocking = st1202_led_set;
+ led->led_cdev.pattern_set = st1202_led_pattern_set;
+ led->led_cdev.pattern_clear = st1202_led_pattern_clear;
+ led->led_cdev.default_trigger = "pattern";
+
+ init_data.fwnode = led->fwnode;
+ init_data.devicename = "st1202";
+ init_data.default_label = ":";
+
+ err = devm_led_classdev_register_ext(dev, &led->led_cdev, &init_data);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to register LED class device\n");
+
+ led->led_cdev.brightness_set = st1202_brightness_set;
+ led->led_cdev.brightness_get = st1202_brightness_get;
+ }
+
+ return 0;
+}
+
+static int st1202_setup(struct st1202_chip *chip)
+{
+ int ret;
+
+ guard(mutex)(&chip->lock);
+
+ /*
+ * Once the supply voltage is applied, the LED1202 executes some internal checks,
+ * afterwords it stops the oscillator and puts the internal LDO in quiescent mode.
+ * To start the device, EN bit must be set inside the “Device Enable” register at
+ * address 01h. As soon as EN is set, the LED1202 loads the adjustment parameters
+ * from the internal non-volatile memory and performs an auto-calibration procedure
+ * in order to increase the output current precision.
+ * Such initialization lasts about 6.5 ms.
+ */
+
+ /* Reset the chip during setup */
+ ret = st1202_write_reg(chip, ST1202_DEV_ENABLE, ST1202_DEV_ENABLE_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* Enable phase-shift delay feature */
+ ret = st1202_write_reg(chip, ST1202_CONFIG_REG, ST1202_CONFIG_REG_SHFT);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the device */
+ ret = st1202_write_reg(chip, ST1202_DEV_ENABLE, ST1202_DEV_ENABLE_ON);
+ if (ret < 0)
+ return ret;
+
+ /* Duration of initialization */
+ usleep_range(6500, 10000);
+
+ /* Deactivate all LEDS (channels) and activate only the ones found in Device Tree */
+ ret = st1202_write_reg(chip, ST1202_CHAN_ENABLE_LOW, ST1202_CHAN_DISABLE_ALL);
+ if (ret < 0)
+ return ret;
+
+ ret = st1202_write_reg(chip, ST1202_CHAN_ENABLE_HIGH, ST1202_CHAN_DISABLE_ALL);
+ if (ret < 0)
+ return ret;
+
+ ret = st1202_write_reg(chip, ST1202_CONFIG_REG,
+ ST1202_CONFIG_REG_PATS | ST1202_CONFIG_REG_PATSR);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int st1202_probe(struct i2c_client *client)
+{
+ struct st1202_chip *chip;
+ struct st1202_led *led;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return dev_err_probe(&client->dev, -EIO, "SMBUS Byte Data not Supported\n");
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ devm_mutex_init(&client->dev, &chip->lock);
+ chip->client = client;
+
+ ret = st1202_dt_init(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = st1202_setup(chip);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < ST1202_MAX_LEDS; i++) {
+ led = &chip->leds[i];
+ led->chip = chip;
+ led->led_num = i;
+
+ if (!led->is_active)
+ continue;
+
+ ret = st1202_channel_set(led->chip, led->led_num, true);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to activate LED channel\n");
+
+ ret = st1202_led_pattern_clear(&led->led_cdev);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to clear LED pattern\n");
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id st1202_id[] = {
+ { "st1202-i2c" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, st1202_id);
+
+static const struct of_device_id st1202_dt_ids[] = {
+ { .compatible = "st,led1202" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, st1202_dt_ids);
+
+static struct i2c_driver st1202_driver = {
+ .driver = {
+ .name = "leds-st1202",
+ .of_match_table = of_match_ptr(st1202_dt_ids),
+ },
+ .probe = st1202_probe,
+ .id_table = st1202_id,
+};
+module_i2c_driver(st1202_driver);
+
+MODULE_AUTHOR("Remote Tech LTD");
+MODULE_DESCRIPTION("STMicroelectronics LED1202 : 12-channel constant current LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 2de825ac08b3..7d3b24c8ecae 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -2,7 +2,7 @@
/*
* CZ.NIC's Turris Omnia LEDs driver
*
- * 2020, 2023 by Marek Behún <kabel@kernel.org>
+ * 2020, 2023, 2024 by Marek Behún <kabel@kernel.org>
*/
#include <linux/i2c.h>
@@ -10,35 +10,23 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/turris-omnia-mcu-interface.h>
#define OMNIA_BOARD_LEDS 12
#define OMNIA_LED_NUM_CHANNELS 3
-/* MCU controller commands at I2C address 0x2a */
-#define OMNIA_MCU_I2C_ADDR 0x2a
-
-#define CMD_GET_STATUS_WORD 0x01
-#define STS_FEATURES_SUPPORTED BIT(2)
-
-#define CMD_GET_FEATURES 0x10
-#define FEAT_LED_GAMMA_CORRECTION BIT(5)
-
-/* LED controller commands at I2C address 0x2b */
-#define CMD_LED_MODE 0x03
-#define CMD_LED_MODE_LED(l) ((l) & 0x0f)
-#define CMD_LED_MODE_USER 0x10
-
-#define CMD_LED_STATE 0x04
-#define CMD_LED_STATE_LED(l) ((l) & 0x0f)
-#define CMD_LED_STATE_ON 0x10
-
-#define CMD_LED_COLOR 0x05
-#define CMD_LED_SET_BRIGHTNESS 0x07
-#define CMD_LED_GET_BRIGHTNESS 0x08
-
-#define CMD_SET_GAMMA_CORRECTION 0x30
-#define CMD_GET_GAMMA_CORRECTION 0x31
-
+/* MCU controller I2C address 0x2a, needed for detecting MCU features */
+#define OMNIA_MCU_I2C_ADDR 0x2a
+
+/**
+ * struct omnia_led - per-LED part of driver private data structure
+ * @mc_cdev: multi-color LED class device
+ * @subled_info: per-channel information
+ * @cached_channels: cached values of per-channel brightness that was sent to the MCU
+ * @on: whether the LED was set on
+ * @hwtrig: whether the LED blinking was offloaded to the MCU
+ * @reg: LED identifier to the MCU
+ */
struct omnia_led {
struct led_classdev_mc mc_cdev;
struct mc_subled subled_info[OMNIA_LED_NUM_CHANNELS];
@@ -49,73 +37,38 @@ struct omnia_led {
#define to_omnia_led(l) container_of(l, struct omnia_led, mc_cdev)
+/**
+ * struct omnia_leds - driver private data structure
+ * @client: I2C client device
+ * @lock: mutex to protect cached state
+ * @has_gamma_correction: whether the MCU firmware supports gamma correction
+ * @brightness_knode: kernel node of the "brightness" device sysfs attribute (this is the
+ * driver specific global brightness, not the LED classdev brightness)
+ * @leds: flexible array of per-LED data
+ */
struct omnia_leds {
struct i2c_client *client;
struct mutex lock;
bool has_gamma_correction;
+ struct kernfs_node *brightness_knode;
struct omnia_led leds[];
};
-static int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, u8 val)
-{
- u8 buf[2] = { cmd, val };
- int ret;
-
- ret = i2c_master_send(client, buf, sizeof(buf));
-
- return ret < 0 ? ret : 0;
-}
-
-static int omnia_cmd_read_raw(struct i2c_adapter *adapter, u8 addr, u8 cmd,
- void *reply, size_t len)
-{
- struct i2c_msg msgs[2];
- int ret;
-
- msgs[0].addr = addr;
- msgs[0].flags = 0;
- msgs[0].len = 1;
- msgs[0].buf = &cmd;
- msgs[1].addr = addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = reply;
-
- ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
- if (likely(ret == ARRAY_SIZE(msgs)))
- return 0;
- else if (ret < 0)
- return ret;
- else
- return -EIO;
-}
-
-static int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd)
+static int omnia_cmd_set_color(const struct i2c_client *client, u8 led, u8 r, u8 g, u8 b)
{
- u8 reply;
- int err;
+ u8 buf[5] = { OMNIA_CMD_LED_COLOR, led, r, g, b };
- err = omnia_cmd_read_raw(client->adapter, client->addr, cmd, &reply, 1);
- if (err)
- return err;
-
- return reply;
+ return omnia_cmd_write(client, buf, sizeof(buf));
}
static int omnia_led_send_color_cmd(const struct i2c_client *client,
struct omnia_led *led)
{
- char cmd[5];
int ret;
- cmd[0] = CMD_LED_COLOR;
- cmd[1] = led->reg;
- cmd[2] = led->subled_info[0].brightness;
- cmd[3] = led->subled_info[1].brightness;
- cmd[4] = led->subled_info[2].brightness;
-
/* Send the color change command */
- ret = i2c_master_send(client, cmd, 5);
+ ret = omnia_cmd_set_color(client, led->reg, led->subled_info[0].brightness,
+ led->subled_info[1].brightness, led->subled_info[2].brightness);
if (ret < 0)
return ret;
@@ -170,12 +123,12 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
* is not being blinked by HW.
*/
if (!err && !led->hwtrig && !brightness != !led->on) {
- u8 state = CMD_LED_STATE_LED(led->reg);
+ u8 state = OMNIA_CMD_LED_STATE_LED(led->reg);
if (brightness)
- state |= CMD_LED_STATE_ON;
+ state |= OMNIA_CMD_LED_STATE_ON;
- err = omnia_cmd_write_u8(leds->client, CMD_LED_STATE, state);
+ err = omnia_cmd_write_u8(leds->client, OMNIA_CMD_LED_STATE, state);
if (!err)
led->on = !!brightness;
}
@@ -210,8 +163,8 @@ static int omnia_hwtrig_activate(struct led_classdev *cdev)
if (!err) {
/* Put the LED into MCU controlled mode */
- err = omnia_cmd_write_u8(leds->client, CMD_LED_MODE,
- CMD_LED_MODE_LED(led->reg));
+ err = omnia_cmd_write_u8(leds->client, OMNIA_CMD_LED_MODE,
+ OMNIA_CMD_LED_MODE_LED(led->reg));
if (!err)
led->hwtrig = true;
}
@@ -232,9 +185,8 @@ static void omnia_hwtrig_deactivate(struct led_classdev *cdev)
led->hwtrig = false;
/* Put the LED into software mode */
- err = omnia_cmd_write_u8(leds->client, CMD_LED_MODE,
- CMD_LED_MODE_LED(led->reg) |
- CMD_LED_MODE_USER);
+ err = omnia_cmd_write_u8(leds->client, OMNIA_CMD_LED_MODE,
+ OMNIA_CMD_LED_MODE_LED(led->reg) | OMNIA_CMD_LED_MODE_USER);
mutex_unlock(&leds->lock);
@@ -300,38 +252,26 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
*/
cdev->default_trigger = omnia_hw_trigger.name;
- /* put the LED into software mode */
- ret = omnia_cmd_write_u8(client, CMD_LED_MODE,
- CMD_LED_MODE_LED(led->reg) |
- CMD_LED_MODE_USER);
- if (ret) {
- dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np,
- ret);
- return ret;
- }
+ /* Put the LED into software mode */
+ ret = omnia_cmd_write_u8(client, OMNIA_CMD_LED_MODE, OMNIA_CMD_LED_MODE_LED(led->reg) |
+ OMNIA_CMD_LED_MODE_USER);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot set LED %pOF to software mode\n", np);
- /* disable the LED */
- ret = omnia_cmd_write_u8(client, CMD_LED_STATE,
- CMD_LED_STATE_LED(led->reg));
- if (ret) {
- dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret);
- return ret;
- }
+ /* Disable the LED */
+ ret = omnia_cmd_write_u8(client, OMNIA_CMD_LED_STATE, OMNIA_CMD_LED_STATE_LED(led->reg));
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot set LED %pOF brightness\n", np);
/* Set initial color and cache it */
ret = omnia_led_send_color_cmd(client, led);
- if (ret < 0) {
- dev_err(dev, "Cannot set LED %pOF initial color: %i\n", np,
- ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot set LED %pOF initial color\n", np);
ret = devm_led_classdev_multicolor_register_ext(dev, &led->mc_cdev,
&init_data);
- if (ret < 0) {
- dev_err(dev, "Cannot register LED %pOF: %i\n", np, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot register LED %pOF\n", np);
return 1;
}
@@ -351,14 +291,14 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
- int ret;
-
- ret = omnia_cmd_read_u8(client, CMD_LED_GET_BRIGHTNESS);
+ u8 reply;
+ int err;
- if (ret < 0)
- return ret;
+ err = omnia_cmd_read_u8(client, OMNIA_CMD_GET_BRIGHTNESS, &reply);
+ if (err < 0)
+ return err;
- return sysfs_emit(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", reply);
}
static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
@@ -374,7 +314,7 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
if (brightness > 100)
return -EINVAL;
- err = omnia_cmd_write_u8(client, CMD_LED_SET_BRIGHTNESS, brightness);
+ err = omnia_cmd_write_u8(client, OMNIA_CMD_SET_BRIGHTNESS, brightness);
return err ?: count;
}
@@ -385,17 +325,16 @@ static ssize_t gamma_correction_show(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct omnia_leds *leds = i2c_get_clientdata(client);
- int ret;
+ u8 reply = 0;
+ int err;
if (leds->has_gamma_correction) {
- ret = omnia_cmd_read_u8(client, CMD_GET_GAMMA_CORRECTION);
- if (ret < 0)
- return ret;
- } else {
- ret = 0;
+ err = omnia_cmd_read_u8(client, OMNIA_CMD_GET_GAMMA_CORRECTION, &reply);
+ if (err < 0)
+ return err;
}
- return sysfs_emit(buf, "%d\n", !!ret);
+ return sysfs_emit(buf, "%d\n", !!reply);
}
static ssize_t gamma_correction_store(struct device *dev,
@@ -413,7 +352,7 @@ static ssize_t gamma_correction_store(struct device *dev,
if (kstrtobool(buf, &val) < 0)
return -EINVAL;
- err = omnia_cmd_write_u8(client, CMD_SET_GAMMA_CORRECTION, val);
+ err = omnia_cmd_write_u8(client, OMNIA_CMD_SET_GAMMA_CORRECTION, val);
return err ?: count;
}
@@ -426,26 +365,104 @@ static struct attribute *omnia_led_controller_attrs[] = {
};
ATTRIBUTE_GROUPS(omnia_led_controller);
-static int omnia_mcu_get_features(const struct i2c_client *client)
+static irqreturn_t omnia_brightness_changed_threaded_fn(int irq, void *data)
+{
+ struct omnia_leds *leds = data;
+
+ if (unlikely(!leds->brightness_knode)) {
+ /*
+ * Note that sysfs_get_dirent() may sleep. This is okay, because we are in threaded
+ * context.
+ */
+ leds->brightness_knode = sysfs_get_dirent(leds->client->dev.kobj.sd, "brightness");
+ if (!leds->brightness_knode)
+ return IRQ_NONE;
+ }
+
+ sysfs_notify_dirent(leds->brightness_knode);
+
+ return IRQ_HANDLED;
+}
+
+static void omnia_brightness_knode_put(void *data)
+{
+ struct omnia_leds *leds = data;
+
+ if (leds->brightness_knode)
+ sysfs_put(leds->brightness_knode);
+}
+
+static int omnia_request_brightness_irq(struct omnia_leds *leds)
+{
+ struct device *dev = &leds->client->dev;
+ int ret;
+
+ if (!leds->client->irq) {
+ dev_info(dev,
+ "Brightness change interrupt supported by MCU firmware but not described in device-tree\n");
+
+ return 0;
+ }
+
+ /*
+ * Registering the brightness_knode destructor before requesting the IRQ ensures that on
+ * removal the brightness_knode sysfs node is put only after the IRQ is freed.
+ * This is needed because the interrupt handler uses the knode.
+ */
+ ret = devm_add_action(dev, omnia_brightness_knode_put, leds);
+ if (ret < 0)
+ return ret;
+
+ return devm_request_threaded_irq(dev, leds->client->irq, NULL,
+ omnia_brightness_changed_threaded_fn, IRQF_ONESHOT,
+ "leds-turris-omnia", leds);
+}
+
+static int omnia_mcu_get_features(const struct i2c_client *mcu_client)
{
u16 reply;
int err;
- err = omnia_cmd_read_raw(client->adapter, OMNIA_MCU_I2C_ADDR,
- CMD_GET_STATUS_WORD, &reply, sizeof(reply));
+ err = omnia_cmd_read_u16(mcu_client, OMNIA_CMD_GET_STATUS_WORD, &reply);
if (err)
return err;
- /* Check whether MCU firmware supports the CMD_GET_FEAUTRES command */
- if (!(le16_to_cpu(reply) & STS_FEATURES_SUPPORTED))
+ /* Check whether MCU firmware supports the OMNIA_CMD_GET_FEAUTRES command */
+ if (!(reply & OMNIA_STS_FEATURES_SUPPORTED))
return 0;
- err = omnia_cmd_read_raw(client->adapter, OMNIA_MCU_I2C_ADDR,
- CMD_GET_FEATURES, &reply, sizeof(reply));
+ err = omnia_cmd_read_u16(mcu_client, OMNIA_CMD_GET_FEATURES, &reply);
if (err)
return err;
- return le16_to_cpu(reply);
+ return reply;
+}
+
+static int omnia_match_mcu_client(struct device *dev, void *data)
+{
+ struct i2c_client *client;
+
+ client = i2c_verify_client(dev);
+ if (!client)
+ return 0;
+
+ return client->addr == OMNIA_MCU_I2C_ADDR;
+}
+
+static int omnia_find_mcu_and_get_features(struct device *dev)
+{
+ struct device *mcu_dev;
+ int ret;
+
+ mcu_dev = device_find_child(dev->parent, NULL, omnia_match_mcu_client);
+ if (!mcu_dev)
+ return -ENODEV;
+
+ ret = omnia_mcu_get_features(i2c_verify_client(mcu_dev));
+
+ put_device(mcu_dev);
+
+ return ret;
}
static int omnia_leds_probe(struct i2c_client *client)
@@ -457,13 +474,10 @@ static int omnia_leds_probe(struct i2c_client *client)
int ret, count;
count = of_get_available_child_count(np);
- if (!count) {
- dev_err(dev, "LEDs are not defined in device tree!\n");
- return -ENODEV;
- } else if (count > OMNIA_BOARD_LEDS) {
- dev_err(dev, "Too many LEDs defined in device tree!\n");
- return -EINVAL;
- }
+ if (count == 0)
+ return dev_err_probe(dev, -ENODEV, "LEDs are not defined in device tree!\n");
+ if (count > OMNIA_BOARD_LEDS)
+ return dev_err_probe(dev, -EINVAL, "Too many LEDs defined in device tree!\n");
leds = devm_kzalloc(dev, struct_size(leds, leds, count), GFP_KERNEL);
if (!leds)
@@ -472,28 +486,23 @@ static int omnia_leds_probe(struct i2c_client *client)
leds->client = client;
i2c_set_clientdata(client, leds);
- ret = omnia_mcu_get_features(client);
- if (ret < 0) {
- dev_err(dev, "Cannot determine MCU supported features: %d\n",
- ret);
- return ret;
- }
+ ret = omnia_find_mcu_and_get_features(dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot determine MCU supported features\n");
- leds->has_gamma_correction = ret & FEAT_LED_GAMMA_CORRECTION;
- if (!leds->has_gamma_correction) {
- dev_info(dev,
- "Your board's MCU firmware does not support the LED gamma correction feature.\n");
- dev_info(dev,
- "Consider upgrading MCU firmware with the omnia-mcutool utility.\n");
+ leds->has_gamma_correction = ret & OMNIA_FEAT_LED_GAMMA_CORRECTION;
+
+ if (ret & OMNIA_FEAT_BRIGHTNESS_INT) {
+ ret = omnia_request_brightness_irq(leds);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot request brightness IRQ\n");
}
mutex_init(&leds->lock);
ret = devm_led_trigger_register(dev, &omnia_hw_trigger);
- if (ret < 0) {
- dev_err(dev, "Cannot register private LED trigger: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot register private LED trigger\n");
led = &leds->leds[0];
for_each_available_child_of_node_scoped(np, child) {
@@ -509,20 +518,11 @@ static int omnia_leds_probe(struct i2c_client *client)
static void omnia_leds_remove(struct i2c_client *client)
{
- u8 buf[5];
-
- /* put all LEDs into default (HW triggered) mode */
- omnia_cmd_write_u8(client, CMD_LED_MODE,
- CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
-
- /* set all LEDs color to [255, 255, 255] */
- buf[0] = CMD_LED_COLOR;
- buf[1] = OMNIA_BOARD_LEDS;
- buf[2] = 255;
- buf[3] = 255;
- buf[4] = 255;
+ /* Put all LEDs into default (HW triggered) mode */
+ omnia_cmd_write_u8(client, OMNIA_CMD_LED_MODE, OMNIA_CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
- i2c_master_send(client, buf, 5);
+ /* Set all LEDs color to [255, 255, 255] */
+ omnia_cmd_set_color(client, OMNIA_BOARD_LEDS, 255, 255, 255);
}
static const struct of_device_id of_omnia_leds_match[] = {
diff --git a/drivers/leds/leds-upboard.c b/drivers/leds/leds-upboard.c
new file mode 100644
index 000000000000..b350eb294280
--- /dev/null
+++ b/drivers/leds/leds-upboard.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UP board LED driver.
+ *
+ * Copyright (c) AAEON. All rights reserved.
+ * Copyright (C) 2024 Bootlin
+ *
+ * Author: Gary Wang <garywang@aaeon.com.tw>
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/container_of.h>
+#include <linux/leds.h>
+#include <linux/mfd/upboard-fpga.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define led_cdev_to_led_upboard(c) container_of(c, struct upboard_led, cdev)
+
+struct upboard_led {
+ struct regmap_field *field;
+ struct led_classdev cdev;
+};
+
+struct upboard_led_profile {
+ const char *name;
+ unsigned int bit;
+};
+
+static struct upboard_led_profile upboard_up_led_profile[] = {
+ { "upboard:yellow:" LED_FUNCTION_STATUS, 0 },
+ { "upboard:green:" LED_FUNCTION_STATUS, 1 },
+ { "upboard:red:" LED_FUNCTION_STATUS, 2 },
+};
+
+static struct upboard_led_profile upboard_up2_led_profile[] = {
+ { "upboard:blue:" LED_FUNCTION_STATUS, 0 },
+ { "upboard:yellow:" LED_FUNCTION_STATUS, 1 },
+ { "upboard:green:" LED_FUNCTION_STATUS, 2 },
+ { "upboard:red:" LED_FUNCTION_STATUS, 3 },
+};
+
+static enum led_brightness upboard_led_brightness_get(struct led_classdev *cdev)
+{
+ struct upboard_led *led = led_cdev_to_led_upboard(cdev);
+ int brightness, ret;
+
+ ret = regmap_field_read(led->field, &brightness);
+
+ return ret ? LED_OFF : brightness;
+};
+
+static int upboard_led_brightness_set(struct led_classdev *cdev, enum led_brightness brightness)
+{
+ struct upboard_led *led = led_cdev_to_led_upboard(cdev);
+
+ return regmap_field_write(led->field, brightness != LED_OFF);
+};
+
+static int upboard_led_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct upboard_fpga *fpga = dev_get_drvdata(dev->parent);
+ struct upboard_led_profile *led_profile;
+ struct upboard_led *led;
+ int led_instances, ret, i;
+
+ switch (fpga->fpga_data->type) {
+ case UPBOARD_UP_FPGA:
+ led_profile = upboard_up_led_profile;
+ led_instances = ARRAY_SIZE(upboard_up_led_profile);
+ break;
+ case UPBOARD_UP2_FPGA:
+ led_profile = upboard_up2_led_profile;
+ led_instances = ARRAY_SIZE(upboard_up2_led_profile);
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unknown device type %d\n",
+ fpga->fpga_data->type);
+ }
+
+ for (i = 0; i < led_instances; i++) {
+ const struct reg_field fldconf = {
+ .reg = UPBOARD_REG_FUNC_EN0,
+ .lsb = led_profile[i].bit,
+ .msb = led_profile[i].bit,
+ };
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->field = devm_regmap_field_alloc(&pdev->dev, fpga->regmap, fldconf);
+ if (IS_ERR(led->field))
+ return PTR_ERR(led->field);
+
+ led->cdev.brightness_get = upboard_led_brightness_get;
+ led->cdev.brightness_set_blocking = upboard_led_brightness_set;
+ led->cdev.max_brightness = LED_ON;
+
+ led->cdev.name = led_profile[i].name;
+
+ ret = devm_led_classdev_register(dev, &led->cdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver upboard_led_driver = {
+ .driver = {
+ .name = "upboard-leds",
+ },
+ .probe = upboard_led_probe,
+};
+
+module_platform_driver(upboard_led_driver);
+
+MODULE_AUTHOR("Gary Wang <garywang@aaeon.com.tw>");
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
+MODULE_DESCRIPTION("UP Board LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:upboard-led");
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index d7999e7372a4..bee46651e068 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -22,10 +22,10 @@ void led_stop_software_blink(struct led_classdev *led_cdev);
void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value);
void led_set_brightness_nosleep(struct led_classdev *led_cdev, unsigned int value);
ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count);
ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count);
extern struct rw_semaphore leds_list_lock;
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index e1a81e0109e8..f80a06cc31f8 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -50,7 +50,13 @@ static int led_pwm_mc_set(struct led_classdev *cdev,
duty = priv->leds[i].state.period - duty;
priv->leds[i].state.duty_cycle = duty;
- priv->leds[i].state.enabled = duty > 0;
+ /*
+ * Disabling a PWM doesn't guarantee that it emits the inactive level.
+ * So keep it on. Only for suspending the PWM should be disabled because
+ * otherwise it refuses to suspend. The possible downside is that the
+ * LED might stay (or even go) on.
+ */
+ priv->leds[i].state.enabled = !(cdev->flags & LED_SUSPENDED);
ret = pwm_apply_might_sleep(priv->leds[i].pwm,
&priv->leds[i].state);
if (ret)
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 33cbf8413658..b3ee33aed36e 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -156,7 +156,7 @@ static ssize_t led_invert_show(struct device *dev,
{
struct activity_data *activity_data = led_trigger_get_drvdata(dev);
- return sprintf(buf, "%u\n", activity_data->invert);
+ return sprintf(buf, "%d\n", activity_data->invert);
}
static ssize_t led_invert_store(struct device *dev,
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 4b0863db901a..c15efe3e5078 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -605,6 +605,8 @@ static int netdev_trig_notify(struct notifier_block *nb,
trigger_data->net_dev = NULL;
break;
case NETDEV_UP:
+ trigger_data->hw_control = can_hw_control(trigger_data);
+ fallthrough;
case NETDEV_CHANGE:
get_device_state(trigger_data);
/* Refresh link_speed visibility */
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index b461b1bed25b..369d72f59b3c 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -215,7 +215,7 @@ static int mac_hid_toggle_emumouse(const struct ctl_table *table, int write,
}
/* file(s) in /proc/sys/dev/mac_hid */
-static struct ctl_table mac_hid_files[] = {
+static const struct ctl_table mac_hid_files[] = {
{
.procname = "mouse_button_emulation",
.data = &mouse_emulate_buttons,
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 1e9db8e4acdf..0b1870a09e1f 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -61,6 +61,19 @@ config MD_BITMAP_FILE
various kernel APIs and can only work with files on a file system not
actually sitting on the MD device.
+config MD_LINEAR
+ tristate "Linear (append) mode"
+ depends on BLK_DEV_MD
+ help
+ If you say Y here, then your multiple devices driver will be able to
+ use the so-called linear mode, i.e. it will combine the hard disk
+ partitions by simply appending one to the other.
+
+ To compile this as a module, choose M here: the module
+ will be called linear.
+
+ If unsure, say Y.
+
config MD_RAID0
tristate "RAID-0 (striping) mode"
depends on BLK_DEV_MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 476a214e4bdc..87bdfc9fe14c 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,12 +29,14 @@ dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
md-mod-y += md.o md-bitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
+linear-y += md-linear.o
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
# themselves, and md.o may use the personalities when it
# auto-initialised.
+obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index ef6abf33f926..45ca134cbf02 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io)
bio_init(bio, NULL, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
- bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+ bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_private = &io->cl;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index c1d28e365910..453efbbdc8ee 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -334,7 +334,7 @@ static void dirty_init(struct keybuf_key *w)
bio_init(bio, NULL, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
- bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+ bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
bio->bi_private = w;
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 499f8cc8a39f..e23076f7ece2 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -547,7 +547,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
md->tag_set->ops = &dm_mq_ops;
md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
md->tag_set->numa_node = md->numa_node_id;
- md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
+ md->tag_set->flags = BLK_MQ_F_STACKING;
md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
md->tag_set->driver_data = md;
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index e61855da6461..0c41949db784 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -122,7 +122,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
par = fec_read_parity(v, rsb, block_offset, &offset,
- par_buf_offset, &buf, bio_prio(bio));
+ par_buf_offset, &buf, bio->bi_ioprio);
if (IS_ERR(par))
return PTR_ERR(par);
@@ -164,7 +164,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
dm_bufio_release(buf);
par = fec_read_parity(v, rsb, block_offset, &offset,
- par_buf_offset, &buf, bio_prio(bio));
+ par_buf_offset, &buf, bio->bi_ioprio);
if (IS_ERR(par))
return PTR_ERR(par);
}
@@ -254,7 +254,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
bufio = v->bufio;
}
- bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio_prio(bio));
+ bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio->bi_ioprio);
if (IS_ERR(bbuf)) {
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
v->data_dev->name,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 47d595f6a76e..e86c1431b108 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -321,7 +321,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
}
} else {
data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
- &buf, bio_prio(bio));
+ &buf, bio->bi_ioprio);
}
if (IS_ERR(data))
@@ -789,7 +789,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
verity_fec_init_io(io);
- verity_submit_prefetch(v, io, bio_prio(bio));
+ verity_submit_prefetch(v, io, bio->bi_ioprio);
submit_bio_noacct(bio);
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index b2a00f213c2c..4b80165afd23 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -49,6 +49,7 @@ static int md_setup_ents __initdata;
* instead of just one. -- KTK
* 18May2000: Added support for persistent-superblock arrays:
* md=n,0,factor,fault,device-list uses RAID0 for device n
+ * md=n,-1,factor,fault,device-list uses LINEAR for device n
* md=n,device-list reads a RAID superblock from the devices
* elements in device-list are read by name_to_kdev_t so can be
* a hex number or something like /dev/hda1 /dev/sdb
@@ -87,7 +88,7 @@ static int __init md_setup(char *str)
md_setup_ents++;
switch (get_option(&str, &level)) { /* RAID level */
case 2: /* could be 0 or -1.. */
- if (level == 0) {
+ if (level == 0 || level == LEVEL_LINEAR) {
if (get_option(&str, &factor) != 2 || /* Chunk Size */
get_option(&str, &fault) != 2) {
printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
@@ -95,7 +96,10 @@ static int __init md_setup(char *str)
}
md_setup_args[ent].level = level;
md_setup_args[ent].chunk = 1 << (factor+12);
- pername = "raid0";
+ if (level == LEVEL_LINEAR)
+ pername = "linear";
+ else
+ pername = "raid0";
break;
}
fallthrough;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index c3a42dd66ce5..ec4ecd96e6b1 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -682,7 +682,7 @@ static void bitmap_update_sb(void *data)
return;
if (!bitmap->storage.sb_page) /* no superblock */
return;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared)
/* rocking back to read-only */
@@ -702,7 +702,7 @@ static void bitmap_update_sb(void *data)
sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
bitmap_info.space);
- kunmap_atomic(sb);
+ kunmap_local(sb);
if (bitmap->storage.file)
write_file_page(bitmap, bitmap->storage.sb_page, 1);
@@ -717,7 +717,7 @@ static void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->storage.sb_page)
return;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %u\n", le32_to_cpu(sb->version));
@@ -736,7 +736,7 @@ static void bitmap_print_sb(struct bitmap *bitmap)
pr_debug(" sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2);
pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
- kunmap_atomic(sb);
+ kunmap_local(sb);
}
/*
@@ -760,7 +760,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
return -ENOMEM;
bitmap->storage.sb_index = 0;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->magic = cpu_to_le32(BITMAP_MAGIC);
sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
@@ -768,7 +768,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
chunksize = bitmap->mddev->bitmap_info.chunksize;
BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) {
- kunmap_atomic(sb);
+ kunmap_local(sb);
pr_warn("bitmap chunksize not a power of 2\n");
return -EINVAL;
}
@@ -803,7 +803,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
bitmap->mddev->bitmap_info.nodes = 0;
- kunmap_atomic(sb);
+ kunmap_local(sb);
return 0;
}
@@ -865,7 +865,7 @@ re_read:
return err;
err = -EINVAL;
- sb = kmap_atomic(sb_page);
+ sb = kmap_local_page(sb_page);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -932,7 +932,7 @@ re_read:
err = 0;
out:
- kunmap_atomic(sb);
+ kunmap_local(sb);
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
/* Assigning chunksize is required for "re_read" */
bitmap->mddev->bitmap_info.chunksize = chunksize;
@@ -1161,12 +1161,12 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
bit = file_page_offset(&bitmap->storage, chunk);
/* set the bit */
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
set_bit(bit, kaddr);
else
set_bit_le(bit, kaddr);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
pr_debug("set file bit %lu page %lu\n", bit, index);
/* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_DIRTY);
@@ -1190,12 +1190,12 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
if (!page)
return;
bit = file_page_offset(&bitmap->storage, chunk);
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
clear_bit(bit, paddr);
else
clear_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
if (!test_page_attr(bitmap, index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_PENDING);
bitmap->allclean = 0;
@@ -1214,12 +1214,12 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
if (!page)
return -EINVAL;
bit = file_page_offset(&bitmap->storage, chunk);
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
set = test_bit(bit, paddr);
else
set = test_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
return set;
}
@@ -1388,9 +1388,9 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
* If the bitmap is out of date, dirty the whole page
* and write it out
*/
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
memset(paddr + offset, 0xff, PAGE_SIZE - offset);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
filemap_write_page(bitmap, i, true);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) {
@@ -1406,12 +1406,12 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
void *paddr;
bool was_set;
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
was_set = test_bit(bit, paddr);
else
was_set = test_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
if (was_set) {
/* if the disk bit is set, set the memory bit */
@@ -1546,10 +1546,10 @@ static void bitmap_daemon_work(struct mddev *mddev)
bitmap_super_t *sb;
bitmap->need_sync = 0;
if (bitmap->storage.filemap) {
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->events_cleared =
cpu_to_le64(bitmap->events_cleared);
- kunmap_atomic(sb);
+ kunmap_local(sb);
set_page_attr(bitmap, 0,
BITMAP_PAGE_NEEDWRITE);
}
@@ -1671,24 +1671,13 @@ __acquires(bitmap->lock)
}
static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool behind)
+ unsigned long sectors)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return 0;
- if (behind) {
- int bw;
- atomic_inc(&bitmap->behind_writes);
- bw = atomic_read(&bitmap->behind_writes);
- if (bw > bitmap->behind_writes_used)
- bitmap->behind_writes_used = bw;
-
- pr_debug("inc write-behind count %d/%lu\n",
- bw, bitmap->mddev->bitmap_info.max_write_behind);
- }
-
while (sectors) {
sector_t blocks;
bitmap_counter_t *bmc;
@@ -1737,21 +1726,13 @@ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
}
static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool success, bool behind)
+ unsigned long sectors)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return;
- if (behind) {
- if (atomic_dec_and_test(&bitmap->behind_writes))
- wake_up(&bitmap->behind_wait);
- pr_debug("dec write-behind count %d/%lu\n",
- atomic_read(&bitmap->behind_writes),
- bitmap->mddev->bitmap_info.max_write_behind);
- }
-
while (sectors) {
sector_t blocks;
unsigned long flags;
@@ -1764,15 +1745,16 @@ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
return;
}
- if (success && !bitmap->mddev->degraded &&
- bitmap->events_cleared < bitmap->mddev->events) {
- bitmap->events_cleared = bitmap->mddev->events;
- bitmap->need_sync = 1;
- sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
- }
-
- if (!success && !NEEDED(*bmc))
+ if (!bitmap->mddev->degraded) {
+ if (bitmap->events_cleared < bitmap->mddev->events) {
+ bitmap->events_cleared = bitmap->mddev->events;
+ bitmap->need_sync = 1;
+ sysfs_notify_dirent_safe(
+ bitmap->sysfs_can_clear);
+ }
+ } else if (!NEEDED(*bmc)) {
*bmc |= NEEDED_MASK;
+ }
if (COUNTER(*bmc) == COUNTER_MAX)
wake_up(&bitmap->overflow_wait);
@@ -2062,6 +2044,37 @@ static void md_bitmap_free(void *data)
kfree(bitmap);
}
+static void bitmap_start_behind_write(struct mddev *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+ int bw;
+
+ if (!bitmap)
+ return;
+
+ atomic_inc(&bitmap->behind_writes);
+ bw = atomic_read(&bitmap->behind_writes);
+ if (bw > bitmap->behind_writes_used)
+ bitmap->behind_writes_used = bw;
+
+ pr_debug("inc write-behind count %d/%lu\n",
+ bw, bitmap->mddev->bitmap_info.max_write_behind);
+}
+
+static void bitmap_end_behind_write(struct mddev *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return;
+
+ if (atomic_dec_and_test(&bitmap->behind_writes))
+ wake_up(&bitmap->behind_wait);
+ pr_debug("dec write-behind count %d/%lu\n",
+ atomic_read(&bitmap->behind_writes),
+ bitmap->mddev->bitmap_info.max_write_behind);
+}
+
static void bitmap_wait_behind_writes(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
@@ -2981,6 +2994,9 @@ static struct bitmap_operations bitmap_ops = {
.dirty_bits = bitmap_dirty_bits,
.unplug = bitmap_unplug,
.daemon_work = bitmap_daemon_work,
+
+ .start_behind_write = bitmap_start_behind_write,
+ .end_behind_write = bitmap_end_behind_write,
.wait_behind_writes = bitmap_wait_behind_writes,
.startwrite = bitmap_startwrite,
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 662e6fc141a7..31c93019c76b 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -84,12 +84,15 @@ struct bitmap_operations {
unsigned long e);
void (*unplug)(struct mddev *mddev, bool sync);
void (*daemon_work)(struct mddev *mddev);
+
+ void (*start_behind_write)(struct mddev *mddev);
+ void (*end_behind_write)(struct mddev *mddev);
void (*wait_behind_writes)(struct mddev *mddev);
int (*startwrite)(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool behind);
+ unsigned long sectors);
void (*endwrite)(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool success, bool behind);
+ unsigned long sectors);
bool (*start_sync)(struct mddev *mddev, sector_t offset,
sector_t *blocks, bool degraded);
void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
new file mode 100644
index 000000000000..a382929ce7ba
--- /dev/null
+++ b/drivers/md/md-linear.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linear.c : Multiple Devices driver for Linux Copyright (C) 1994-96 Marc
+ * ZYNGIER <zyngier@ufr-info-p7.ibp.fr> or <maz@gloups.fdn.fr>
+ */
+
+#include <linux/blkdev.h>
+#include <linux/raid/md_u.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <trace/events/block.h>
+#include "md.h"
+
+struct dev_info {
+ struct md_rdev *rdev;
+ sector_t end_sector;
+};
+
+struct linear_conf {
+ struct rcu_head rcu;
+ sector_t array_sectors;
+ /* a copy of mddev->raid_disks */
+ int raid_disks;
+ struct dev_info disks[] __counted_by(raid_disks);
+};
+
+/*
+ * find which device holds a particular offset
+ */
+static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
+{
+ int lo, mid, hi;
+ struct linear_conf *conf;
+
+ lo = 0;
+ hi = mddev->raid_disks - 1;
+ conf = mddev->private;
+
+ /*
+ * Binary Search
+ */
+
+ while (hi > lo) {
+
+ mid = (hi + lo) / 2;
+ if (sector < conf->disks[mid].end_sector)
+ hi = mid;
+ else
+ lo = mid + 1;
+ }
+
+ return conf->disks + lo;
+}
+
+static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
+{
+ struct linear_conf *conf;
+ sector_t array_sectors;
+
+ conf = mddev->private;
+ WARN_ONCE(sectors || raid_disks,
+ "%s does not support generic reshape\n", __func__);
+ array_sectors = conf->array_sectors;
+
+ return array_sectors;
+}
+
+static int linear_set_limits(struct mddev *mddev)
+{
+ struct queue_limits lim;
+ int err;
+
+ md_init_stacking_limits(&lim);
+ lim.max_hw_sectors = mddev->chunk_sectors;
+ lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.io_min = mddev->chunk_sectors << 9;
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+ if (err) {
+ queue_limits_cancel_update(mddev->gendisk->queue);
+ return err;
+ }
+
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+}
+
+static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
+{
+ struct linear_conf *conf;
+ struct md_rdev *rdev;
+ int ret = -EINVAL;
+ int cnt;
+ int i;
+
+ conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
+ if (!conf)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * conf->raid_disks is copy of mddev->raid_disks. The reason to
+ * keep a copy of mddev->raid_disks in struct linear_conf is,
+ * mddev->raid_disks may not be consistent with pointers number of
+ * conf->disks[] when it is updated in linear_add() and used to
+ * iterate old conf->disks[] earray in linear_congested().
+ * Here conf->raid_disks is always consitent with number of
+ * pointers in conf->disks[] array, and mddev->private is updated
+ * with rcu_assign_pointer() in linear_addr(), such race can be
+ * avoided.
+ */
+ conf->raid_disks = raid_disks;
+
+ cnt = 0;
+ conf->array_sectors = 0;
+
+ rdev_for_each(rdev, mddev) {
+ int j = rdev->raid_disk;
+ struct dev_info *disk = conf->disks + j;
+ sector_t sectors;
+
+ if (j < 0 || j >= raid_disks || disk->rdev) {
+ pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
+ goto out;
+ }
+
+ disk->rdev = rdev;
+ if (mddev->chunk_sectors) {
+ sectors = rdev->sectors;
+ sector_div(sectors, mddev->chunk_sectors);
+ rdev->sectors = sectors * mddev->chunk_sectors;
+ }
+
+ conf->array_sectors += rdev->sectors;
+ cnt++;
+ }
+ if (cnt != raid_disks) {
+ pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
+ goto out;
+ }
+
+ /*
+ * Here we calculate the device offsets.
+ */
+ conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
+
+ for (i = 1; i < raid_disks; i++)
+ conf->disks[i].end_sector =
+ conf->disks[i-1].end_sector +
+ conf->disks[i].rdev->sectors;
+
+ if (!mddev_is_dm(mddev)) {
+ ret = linear_set_limits(mddev);
+ if (ret)
+ goto out;
+ }
+
+ return conf;
+
+out:
+ kfree(conf);
+ return ERR_PTR(ret);
+}
+
+static int linear_run(struct mddev *mddev)
+{
+ struct linear_conf *conf;
+ int ret;
+
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+
+ conf = linear_conf(mddev, mddev->raid_disks);
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+
+ mddev->private = conf;
+ md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
+
+ ret = md_integrity_register(mddev);
+ if (ret) {
+ kfree(conf);
+ mddev->private = NULL;
+ }
+ return ret;
+}
+
+static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
+{
+ /* Adding a drive to a linear array allows the array to grow.
+ * It is permitted if the new drive has a matching superblock
+ * already on it, with raid_disk equal to raid_disks.
+ * It is achieved by creating a new linear_private_data structure
+ * and swapping it in in-place of the current one.
+ * The current one is never freed until the array is stopped.
+ * This avoids races.
+ */
+ struct linear_conf *newconf, *oldconf;
+
+ if (rdev->saved_raid_disk != mddev->raid_disks)
+ return -EINVAL;
+
+ rdev->raid_disk = rdev->saved_raid_disk;
+ rdev->saved_raid_disk = -1;
+
+ newconf = linear_conf(mddev, mddev->raid_disks + 1);
+ if (IS_ERR(newconf))
+ return PTR_ERR(newconf);
+
+ /* newconf->raid_disks already keeps a copy of * the increased
+ * value of mddev->raid_disks, WARN_ONCE() is just used to make
+ * sure of this. It is possible that oldconf is still referenced
+ * in linear_congested(), therefore kfree_rcu() is used to free
+ * oldconf until no one uses it anymore.
+ */
+ oldconf = rcu_dereference_protected(mddev->private,
+ lockdep_is_held(&mddev->reconfig_mutex));
+ mddev->raid_disks++;
+ WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
+ "copied raid_disks doesn't match mddev->raid_disks");
+ rcu_assign_pointer(mddev->private, newconf);
+ md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
+ set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
+ kfree_rcu(oldconf, rcu);
+ return 0;
+}
+
+static void linear_free(struct mddev *mddev, void *priv)
+{
+ struct linear_conf *conf = priv;
+
+ kfree(conf);
+}
+
+static bool linear_make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct dev_info *tmp_dev;
+ sector_t start_sector, end_sector, data_offset;
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
+ return true;
+
+ tmp_dev = which_dev(mddev, bio_sector);
+ start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+ end_sector = tmp_dev->end_sector;
+ data_offset = tmp_dev->rdev->data_offset;
+
+ if (unlikely(bio_sector >= end_sector ||
+ bio_sector < start_sector))
+ goto out_of_bounds;
+
+ if (unlikely(is_rdev_broken(tmp_dev->rdev))) {
+ md_error(mddev, tmp_dev->rdev);
+ bio_io_error(bio);
+ return true;
+ }
+
+ if (unlikely(bio_end_sector(bio) > end_sector)) {
+ /* This bio crosses a device boundary, so we have to split it */
+ struct bio *split = bio_split(bio, end_sector - bio_sector,
+ GFP_NOIO, &mddev->bio_set);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return true;
+ }
+
+ bio_chain(split, bio);
+ submit_bio_noacct(bio);
+ bio = split;
+ }
+
+ md_account_bio(mddev, &bio);
+ bio_set_dev(bio, tmp_dev->rdev->bdev);
+ bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
+ start_sector + data_offset;
+
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !bdev_max_discard_sectors(bio->bi_bdev))) {
+ /* Just ignore it */
+ bio_endio(bio);
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
+ bio_sector);
+ mddev_check_write_zeroes(mddev, bio);
+ submit_bio_noacct(bio);
+ }
+ return true;
+
+out_of_bounds:
+ pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_iter.bi_sector,
+ tmp_dev->rdev->bdev,
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
+ bio_io_error(bio);
+ return true;
+}
+
+static void linear_status(struct seq_file *seq, struct mddev *mddev)
+{
+ seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
+}
+
+static void linear_error(struct mddev *mddev, struct md_rdev *rdev)
+{
+ if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
+ char *md_name = mdname(mddev);
+
+ pr_crit("md/linear%s: Disk failure on %pg detected, failing array.\n",
+ md_name, rdev->bdev);
+ }
+}
+
+static void linear_quiesce(struct mddev *mddev, int state)
+{
+}
+
+static struct md_personality linear_personality = {
+ .name = "linear",
+ .level = LEVEL_LINEAR,
+ .owner = THIS_MODULE,
+ .make_request = linear_make_request,
+ .run = linear_run,
+ .free = linear_free,
+ .status = linear_status,
+ .hot_add_disk = linear_add,
+ .size = linear_size,
+ .quiesce = linear_quiesce,
+ .error_handler = linear_error,
+};
+
+static int __init linear_init(void)
+{
+ return register_md_personality(&linear_personality);
+}
+
+static void linear_exit(void)
+{
+ unregister_md_personality(&linear_personality);
+}
+
+module_init(linear_init);
+module_exit(linear_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Linear device concatenation personality for MD (deprecated)");
+MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
+MODULE_ALIAS("md-linear");
+MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aebe12b0ee27..866015b681af 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8124,7 +8124,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
return;
mddev->pers->error_handler(mddev, rdev);
- if (mddev->pers->level == 0)
+ if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
return;
if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
@@ -8745,12 +8745,32 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+static void md_bitmap_start(struct mddev *mddev,
+ struct md_io_clone *md_io_clone)
+{
+ if (mddev->pers->bitmap_sector)
+ mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
+ &md_io_clone->sectors);
+
+ mddev->bitmap_ops->startwrite(mddev, md_io_clone->offset,
+ md_io_clone->sectors);
+}
+
+static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
+{
+ mddev->bitmap_ops->endwrite(mddev, md_io_clone->offset,
+ md_io_clone->sectors);
+}
+
static void md_end_clone_io(struct bio *bio)
{
struct md_io_clone *md_io_clone = bio->bi_private;
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
+ if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ md_bitmap_end(mddev, md_io_clone);
+
if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
@@ -8775,6 +8795,12 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
if (blk_queue_io_stat(bdev->bd_disk->queue))
md_io_clone->start_time = bio_start_io_acct(*bio);
+ if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
+ md_io_clone->offset = (*bio)->bi_iter.bi_sector;
+ md_io_clone->sectors = bio_sectors(*bio);
+ md_bitmap_start(mddev, md_io_clone);
+ }
+
clone->bi_end_io = md_end_clone_io;
clone->bi_private = md_io_clone;
*bio = clone;
@@ -8793,6 +8819,9 @@ void md_free_cloned_bio(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
+ if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ md_bitmap_end(mddev, md_io_clone);
+
if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 4ba93af36126..def808064ad8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -746,6 +746,9 @@ struct md_personality
void *(*takeover) (struct mddev *mddev);
/* Changes the consistency policy of an active array. */
int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
+ /* convert io ranges from array to bitmap */
+ void (*bitmap_sector)(struct mddev *mddev, sector_t *offset,
+ unsigned long *sectors);
};
struct md_sysfs_entry {
@@ -828,6 +831,8 @@ struct md_io_clone {
struct mddev *mddev;
struct bio *orig_bio;
unsigned long start_time;
+ sector_t offset;
+ unsigned long sectors;
struct bio bio_clone;
};
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 7049ec7fb8eb..8fc9339b00c7 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -384,7 +384,7 @@ static int raid0_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
- lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 519c56f0ee3d..9d57a88dbd26 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -420,10 +420,8 @@ static void close_write(struct r1bio *r1_bio)
r1_bio->behind_master_bio = NULL;
}
- /* clear the bitmap if all writes complete successfully */
- mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->end_behind_write(mddev);
md_write_end(mddev);
}
@@ -480,8 +478,6 @@ static void raid1_end_write_request(struct bio *bio)
if (!test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_WriteError, &r1_bio->state);
else {
- /* Fail the request */
- set_bit(R1BIO_Degraded, &r1_bio->state);
/* Finished with this branch */
r1_bio->bios[mirror] = NULL;
to_put = bio;
@@ -1535,11 +1531,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
write_behind = true;
r1_bio->bios[i] = NULL;
- if (!rdev || test_bit(Faulty, &rdev->flags)) {
- if (i < conf->raid_disks)
- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
- }
atomic_inc(&rdev->nr_pending);
if (test_bit(WriteErrorSeen, &rdev->flags)) {
@@ -1558,16 +1551,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
*/
max_sectors = bad_sectors;
rdev_dec_pending(rdev, mddev);
- /* We don't set R1BIO_Degraded as that
- * only applies if the disk is
- * missing, so it might be re-added,
- * and we want to know to recover this
- * chunk.
- * In this case the device is here,
- * and the fact that this chunk is not
- * in-sync is recorded in the bad
- * block log
- */
continue;
}
if (is_bad) {
@@ -1645,9 +1628,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
stats.behind_writes < max_write_behind)
alloc_behind_master_bio(r1_bio, bio);
- mddev->bitmap_ops->startwrite(
- mddev, r1_bio->sector, r1_bio->sectors,
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->start_behind_write(mddev);
first_clone = 0;
}
@@ -2614,12 +2596,10 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
* errors.
*/
fail = true;
- if (!narrow_write_error(r1_bio, m)) {
+ if (!narrow_write_error(r1_bio, m))
md_error(conf->mddev,
conf->mirrors[m].rdev);
/* an I/O failed, we can't clear the bitmap */
- set_bit(R1BIO_Degraded, &r1_bio->state);
- }
rdev_dec_pending(conf->mirrors[m].rdev,
conf->mddev);
}
@@ -2710,8 +2690,6 @@ static void raid1d(struct md_thread *thread)
list_del(&r1_bio->retry_list);
idx = sector_to_idx(r1_bio->sector);
atomic_dec(&conf->nr_queued[idx]);
- if (mddev->degraded)
- set_bit(R1BIO_Degraded, &r1_bio->state);
if (test_bit(R1BIO_WriteError, &r1_bio->state))
close_write(r1_bio);
raid_end_bio_io(r1_bio);
@@ -3239,7 +3217,7 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
- lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 5300cbaa58a4..33f318fcc268 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -188,7 +188,6 @@ struct r1bio {
enum r1bio_state {
R1BIO_Uptodate,
R1BIO_IsSync,
- R1BIO_Degraded,
R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7d7a8a2524dc..efe93b979167 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -428,10 +428,6 @@ static void close_write(struct r10bio *r10_bio)
{
struct mddev *mddev = r10_bio->mddev;
- /* clear the bitmap if all writes complete successfully */
- mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors,
- !test_bit(R10BIO_Degraded, &r10_bio->state),
- false);
md_write_end(mddev);
}
@@ -501,7 +497,6 @@ static void raid10_end_write_request(struct bio *bio)
set_bit(R10BIO_WriteError, &r10_bio->state);
else {
/* Fail the request */
- set_bit(R10BIO_Degraded, &r10_bio->state);
r10_bio->devs[slot].bio = NULL;
to_put = bio;
dec_rdev = 1;
@@ -1438,10 +1433,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->devs[i].bio = NULL;
r10_bio->devs[i].repl_bio = NULL;
- if (!rdev && !rrdev) {
- set_bit(R10BIO_Degraded, &r10_bio->state);
+ if (!rdev && !rrdev)
continue;
- }
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
@@ -1458,14 +1451,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
* to other devices yet
*/
max_sectors = bad_sectors;
- /* We don't set R10BIO_Degraded as that
- * only applies if the disk is missing,
- * so it might be re-added, and we want to
- * know to recover this chunk.
- * In this case the device is here, and the
- * fact that this chunk is not in-sync is
- * recorded in the bad block log.
- */
continue;
}
if (is_bad) {
@@ -1519,8 +1504,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
md_account_bio(mddev, &bio);
r10_bio->master_bio = bio;
atomic_set(&r10_bio->remaining, 1);
- mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors,
- false);
for (i = 0; i < conf->copies; i++) {
if (r10_bio->devs[i].bio)
@@ -2966,11 +2949,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_dec_pending(rdev, conf->mddev);
} else if (bio != NULL && bio->bi_status) {
fail = true;
- if (!narrow_write_error(r10_bio, m)) {
+ if (!narrow_write_error(r10_bio, m))
md_error(conf->mddev, rdev);
- set_bit(R10BIO_Degraded,
- &r10_bio->state);
- }
rdev_dec_pending(rdev, conf->mddev);
}
bio = r10_bio->devs[m].repl_bio;
@@ -3029,8 +3009,6 @@ static void raid10d(struct md_thread *thread)
r10_bio = list_first_entry(&tmp, struct r10bio,
retry_list);
list_del(&r10_bio->retry_list);
- if (mddev->degraded)
- set_bit(R10BIO_Degraded, &r10_bio->state);
if (test_bit(R10BIO_WriteError,
&r10_bio->state))
@@ -4040,7 +4018,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
- lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 2e75e88d0802..3f16ad6904a9 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -161,7 +161,6 @@ enum r10bio_state {
R10BIO_IsSync,
R10BIO_IsRecover,
R10BIO_IsReshape,
- R10BIO_Degraded,
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them.
*/
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index b4f7b79fd187..e530271cb86b 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -313,10 +313,6 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
if (sh->dev[i].written) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- !test_bit(STRIPE_DEGRADED, &sh->state),
- false);
}
}
}
@@ -1023,10 +1019,10 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
/* checksum is already calculated in last run */
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
continue;
- addr = kmap_atomic(sh->dev[i].page);
+ addr = kmap_local_page(sh->dev[i].page);
sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
parity_pages = 1 + !!(sh->qd_idx >= 0);
data_pages = write_disks - parity_pages;
@@ -1979,9 +1975,9 @@ r5l_recovery_verify_data_checksum(struct r5l_log *log,
u32 checksum;
r5l_recovery_read_page(log, ctx, page, log_offset);
- addr = kmap_atomic(page);
+ addr = kmap_local_page(page);
checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
}
@@ -2381,11 +2377,11 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
payload->size = cpu_to_le32(BLOCK_SECTORS);
payload->location = cpu_to_le64(
raid5_compute_blocknr(sh, i, 0));
- addr = kmap_atomic(dev->page);
+ addr = kmap_local_page(dev->page);
payload->checksum[0] = cpu_to_le32(
crc32c_le(log->uuid_checksum, addr,
PAGE_SIZE));
- kunmap_atomic(addr);
+ kunmap_local(addr);
sync_page_io(log->rdev, write_pos, PAGE_SIZE,
dev->page, REQ_OP_WRITE, false);
write_pos = r5l_ring_add(log, write_pos,
@@ -2888,10 +2884,10 @@ int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
continue;
- addr = kmap_atomic(sh->dev[i].page);
+ addr = kmap_local_page(sh->dev[i].page);
sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
pages++;
}
WARN_ON(pages == 0);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f09e7677ee9f..5c79429acc64 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -906,8 +906,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
if (raid5_has_log(conf) || raid5_has_ppl(conf))
return false;
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
- !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
- is_full_stripe_write(sh);
+ is_full_stripe_write(sh);
}
/* we only do back search */
@@ -1345,8 +1344,6 @@ again:
submit_bio_noacct(rbi);
}
if (!rdev && !rrdev) {
- if (op_is_write(op))
- set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %d on disc %d for sector %llu\n",
bi->bi_opf, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
@@ -2884,7 +2881,6 @@ static void raid5_end_write_request(struct bio *bi)
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
if (bi->bi_status) {
- set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
@@ -3548,29 +3544,9 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
(*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
sh->dev[dd_idx].sector);
- if (conf->mddev->bitmap && firstwrite) {
- /* Cannot hold spinlock over bitmap_startwrite,
- * but must ensure this isn't added to a batch until
- * we have added to the bitmap and set bm_seq.
- * So set STRIPE_BITMAP_PENDING to prevent
- * batching.
- * If multiple __add_stripe_bio() calls race here they
- * much all set STRIPE_BITMAP_PENDING. So only the first one
- * to complete "bitmap_startwrite" gets to set
- * STRIPE_BIT_DELAY. This is important as once a stripe
- * is added to a batch, STRIPE_BIT_DELAY cannot be changed
- * any more.
- */
- set_bit(STRIPE_BITMAP_PENDING, &sh->state);
- spin_unlock_irq(&sh->stripe_lock);
- conf->mddev->bitmap_ops->startwrite(conf->mddev, sh->sector,
- RAID5_STRIPE_SECTORS(conf), false);
- spin_lock_irq(&sh->stripe_lock);
- clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
- if (!sh->batch_head) {
- sh->bm_seq = conf->seq_flush+1;
- set_bit(STRIPE_BIT_DELAY, &sh->state);
- }
+ if (conf->mddev->bitmap && firstwrite && !sh->batch_head) {
+ sh->bm_seq = conf->seq_flush+1;
+ set_bit(STRIPE_BIT_DELAY, &sh->state);
}
}
@@ -3621,7 +3597,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
BUG_ON(sh->batch_head);
for (i = disks; i--; ) {
struct bio *bi;
- int bitmap_end = 0;
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
struct md_rdev *rdev = conf->disks[i].rdev;
@@ -3646,8 +3621,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].towrite = NULL;
sh->overwrite_disks = 0;
spin_unlock_irq(&sh->stripe_lock);
- if (bi)
- bitmap_end = 1;
log_stripe_write_finished(sh);
@@ -3662,11 +3635,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bio_io_error(bi);
bi = nextbi;
}
- if (bitmap_end)
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- false, false);
- bitmap_end = 0;
/* and fail all 'written' */
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
@@ -3675,7 +3643,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].page = sh->dev[i].orig_page;
}
- if (bi) bitmap_end = 1;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
@@ -3709,10 +3676,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = nextbi;
}
}
- if (bitmap_end)
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- false, false);
/* If we were in the middle of a write the parity block might
* still be locked - so just clear all R5_LOCKED flags
*/
@@ -4061,10 +4024,7 @@ returnbi:
bio_endio(wbi);
wbi = wbi2;
}
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- !test_bit(STRIPE_DEGRADED, &sh->state),
- false);
+
if (head_sh->batch_head) {
sh = list_first_entry(&sh->batch_list,
struct stripe_head,
@@ -4341,7 +4301,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
s->locked++;
set_bit(R5_Wantwrite, &dev->flags);
- clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
case check_state_run:
@@ -4498,7 +4457,6 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
clear_bit(R5_Wantwrite, &dev->flags);
s->locked--;
}
- clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
@@ -4891,8 +4849,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_COMPUTE_RUN) |
(1 << STRIPE_DISCARD) |
(1 << STRIPE_BATCH_READY) |
- (1 << STRIPE_BATCH_ERR) |
- (1 << STRIPE_BITMAP_PENDING)),
+ (1 << STRIPE_BATCH_ERR)),
"stripe state: %lx\n", sh->state);
WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
(1 << STRIPE_REPLACED)),
@@ -4900,7 +4857,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
- (1 << STRIPE_DEGRADED) |
(1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
@@ -5784,10 +5740,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
}
spin_unlock_irq(&sh->stripe_lock);
if (conf->mddev->bitmap) {
- for (d = 0; d < conf->raid_disks - conf->max_degraded;
- d++)
- mddev->bitmap_ops->startwrite(mddev, sh->sector,
- RAID5_STRIPE_SECTORS(conf), false);
sh->bm_seq = conf->seq_flush + 1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
@@ -5928,6 +5880,54 @@ static enum reshape_loc get_reshape_loc(struct mddev *mddev,
return LOC_BEHIND_RESHAPE;
}
+static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset,
+ unsigned long *sectors)
+{
+ struct r5conf *conf = mddev->private;
+ sector_t start = *offset;
+ sector_t end = start + *sectors;
+ sector_t prev_start = start;
+ sector_t prev_end = end;
+ int sectors_per_chunk;
+ enum reshape_loc loc;
+ int dd_idx;
+
+ sectors_per_chunk = conf->chunk_sectors *
+ (conf->raid_disks - conf->max_degraded);
+ start = round_down(start, sectors_per_chunk);
+ end = round_up(end, sectors_per_chunk);
+
+ start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL);
+ end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL);
+
+ /*
+ * For LOC_INSIDE_RESHAPE, this IO will wait for reshape to make
+ * progress, hence it's the same as LOC_BEHIND_RESHAPE.
+ */
+ loc = get_reshape_loc(mddev, conf, prev_start);
+ if (likely(loc != LOC_AHEAD_OF_RESHAPE)) {
+ *offset = start;
+ *sectors = end - start;
+ return;
+ }
+
+ sectors_per_chunk = conf->prev_chunk_sectors *
+ (conf->previous_raid_disks - conf->max_degraded);
+ prev_start = round_down(prev_start, sectors_per_chunk);
+ prev_end = round_down(prev_end, sectors_per_chunk);
+
+ prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL);
+ prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL);
+
+ /*
+ * for LOC_AHEAD_OF_RESHAPE, reshape can make progress before this IO
+ * is handled in make_stripe_request(), we can't know this here hence
+ * we set bits for both.
+ */
+ *offset = min(start, prev_start);
+ *sectors = max(end, prev_end) - *offset;
+}
+
static enum stripe_result make_stripe_request(struct mddev *mddev,
struct r5conf *conf, struct stripe_request_ctx *ctx,
sector_t logical_sector, struct bio *bi)
@@ -8976,6 +8976,7 @@ static struct md_personality raid6_personality =
.takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
+ .bitmap_sector = raid5_bitmap_sector,
};
static struct md_personality raid5_personality =
{
@@ -9001,6 +9002,7 @@ static struct md_personality raid5_personality =
.takeover = raid5_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
+ .bitmap_sector = raid5_bitmap_sector,
};
static struct md_personality raid4_personality =
@@ -9027,6 +9029,7 @@ static struct md_personality raid4_personality =
.takeover = raid4_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
+ .bitmap_sector = raid5_bitmap_sector,
};
static int __init raid5_init(void)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index d174e586698f..eafc6e9ed6ee 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -358,7 +358,6 @@ enum {
STRIPE_REPLACED,
STRIPE_PREREAD_ACTIVE,
STRIPE_DELAYED,
- STRIPE_DEGRADED,
STRIPE_BIT_DELAY,
STRIPE_EXPANDING,
STRIPE_EXPAND_SOURCE,
@@ -372,9 +371,6 @@ enum {
STRIPE_ON_RELEASE_LIST,
STRIPE_BATCH_READY,
STRIPE_BATCH_ERR,
- STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
- * to batch yet.
- */
STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
* this bit is used in two scenarios:
*
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index 1bf249f446a9..1cb745855600 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -2,6 +2,7 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/i2c.h>
@@ -107,7 +108,6 @@ static const char * const ipu_vcm_types[] = {
"lc898212axb",
};
-#if IS_ENABLED(CONFIG_ACPI)
/*
* Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
* instead of device and driver match to probe IVSC device.
@@ -127,11 +127,11 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
struct acpi_device *consumer, *ivsc_adev;
- acpi_handle handle = acpi_device_handle(adev);
+ acpi_handle handle = acpi_device_handle(ACPI_PTR(adev));
for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
/* camera sensor depends on IVSC in DSDT if exist */
for_each_acpi_consumer_dev(ivsc_adev, consumer)
- if (consumer->handle == handle) {
+ if (ACPI_PTR(consumer->handle) == handle) {
acpi_dev_put(consumer);
return ivsc_adev;
}
@@ -139,12 +139,6 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
return NULL;
}
-#else
-static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
-{
- return NULL;
-}
-#endif
static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
{
@@ -259,12 +253,8 @@ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_dev
{
enum v4l2_fwnode_orientation orientation;
struct acpi_pld_info *pld = NULL;
- acpi_status status = AE_ERROR;
-#if IS_ENABLED(CONFIG_ACPI)
- status = acpi_get_physical_device_location(adev->handle, &pld);
-#endif
- if (ACPI_FAILURE(status)) {
+ if (!acpi_get_physical_device_location(ACPI_PTR(adev->handle), &pld)) {
dev_warn(ADEV_DEV(adev), "_PLD call failed, using default orientation\n");
return V4L2_FWNODE_ORIENTATION_EXTERNAL;
}
@@ -498,9 +488,7 @@ static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
if (sensor->csi_dev) {
const char *device_hid = "";
-#if IS_ENABLED(CONFIG_ACPI)
device_hid = acpi_device_hid(sensor->ivsc_adev);
-#endif
snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
device_hid, sensor->link);
@@ -671,11 +659,7 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
struct acpi_device *adev = NULL;
int ret;
-#if IS_ENABLED(CONFIG_ACPI)
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
-#else
- while (true) {
-#endif
if (!ACPI_PTR(adev->status.enabled))
continue;
@@ -768,15 +752,10 @@ static int ipu_bridge_ivsc_is_ready(void)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
-#if IS_ENABLED(CONFIG_ACPI)
const struct ipu_sensor_config *cfg =
&ipu_supported_sensors[i];
for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
-#else
- while (true) {
- sensor_adev = NULL;
-#endif
if (!ACPI_PTR(sensor_adev->status.enabled))
continue;
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
index 6b294a2d6717..d1320298a0f7 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -271,7 +271,7 @@ static int wave5_vpu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
dev->hrtimer.function = &wave5_vpu_timer_callback;
- dev->worker = kthread_create_worker(0, "vpu_irq_thread");
+ dev->worker = kthread_run_worker(0, "vpu_irq_thread");
if (IS_ERR(dev->worker)) {
dev_err(&pdev->dev, "failed to create vpu irq worker\n");
ret = PTR_ERR(dev->worker);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 50eb9f49512b..e2a75a52563f 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -358,17 +358,6 @@ static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
return (time_ps + tick_ps - 1) / tick_ps;
}
-static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
- enum gpmc_clk_domain cd)
-{
- return ticks * gpmc_get_clk_period(cs, cd) / 1000;
-}
-
-unsigned int gpmc_ticks_to_ns(unsigned int ticks)
-{
- return gpmc_clk_ticks_to_ns(ticks, /* any CS */ 0, GPMC_CD_FCLK);
-}
-
static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
{
return ticks * gpmc_get_fclk_period();
@@ -415,6 +404,13 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
}
#ifdef CONFIG_OMAP_GPMC_DEBUG
+
+static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
+ enum gpmc_clk_domain cd)
+{
+ return ticks * gpmc_get_clk_period(cs, cd) / 1000;
+}
+
/**
* get_gpmc_timing_reg - read a timing parameter and print DTS settings for it.
* @cs: Chip Select Region
@@ -1295,21 +1291,6 @@ int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
}
EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
-int gpmc_get_client_irq(unsigned int irq_config)
-{
- if (!gpmc_irq_domain) {
- pr_warn("%s called before GPMC IRQ domain available\n",
- __func__);
- return 0;
- }
-
- /* we restrict this to NAND IRQs only */
- if (irq_config >= GPMC_NR_NAND_IRQS)
- return 0;
-
- return irq_create_mapping(gpmc_irq_domain, irq_config);
-}
-
static int gpmc_irq_endis(unsigned long hwirq, bool endis)
{
u32 regval;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 7193f848d17e..9b7d30a21a5b 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -474,14 +474,15 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
ram_code = tegra_read_ram_code();
- for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np;
- np = of_find_node_by_name(np, "emc-tables")) {
+ for_each_child_of_node(dev->of_node, np) {
+ if (!of_node_name_eq(np, "emc-tables"))
+ continue;
err = of_property_read_u32(np, "nvidia,ram-code", &value);
if (err || value != ram_code) {
struct device_node *lpddr2_np;
bool cfg_mismatches = false;
- lpddr2_np = of_find_node_by_name(np, "lpddr2");
+ lpddr2_np = of_get_child_by_name(np, "lpddr2");
if (lpddr2_np) {
const struct lpddr2_info *info;
@@ -518,7 +519,6 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
}
if (cfg_mismatches) {
- of_node_put(np);
continue;
}
}
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index d54dc3cfff73..c8b83c9edbd5 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -13,7 +13,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/memory/ti-aemif.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -69,39 +71,27 @@
#define ACR_SSTROBE_MASK BIT(31)
#define ASIZE_16BIT 1
-#define CONFIG_MASK (TA(TA_MAX) | \
- RHOLD(RHOLD_MAX) | \
- RSTROBE(RSTROBE_MAX) | \
- RSETUP(RSETUP_MAX) | \
- WHOLD(WHOLD_MAX) | \
- WSTROBE(WSTROBE_MAX) | \
- WSETUP(WSETUP_MAX) | \
- EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | \
- ASIZE_MAX)
+#define TIMINGS_MASK (TA(TA_MAX) | \
+ RHOLD(RHOLD_MAX) | \
+ RSTROBE(RSTROBE_MAX) | \
+ RSETUP(RSETUP_MAX) | \
+ WHOLD(WHOLD_MAX) | \
+ WSTROBE(WSTROBE_MAX) | \
+ WSETUP(WSETUP_MAX))
+
+#define CONFIG_MASK (EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | ASIZE_MAX)
/**
- * struct aemif_cs_data: structure to hold cs parameters
+ * struct aemif_cs_data: structure to hold CS parameters
+ * @timings: timings configuration
* @cs: chip-select number
- * @wstrobe: write strobe width, ns
- * @rstrobe: read strobe width, ns
- * @wsetup: write setup width, ns
- * @whold: write hold width, ns
- * @rsetup: read setup width, ns
- * @rhold: read hold width, ns
- * @ta: minimum turn around time, ns
* @enable_ss: enable/disable select strobe mode
* @enable_ew: enable/disable extended wait mode
* @asize: width of the asynchronous device's data bus
*/
struct aemif_cs_data {
+ struct aemif_cs_timings timings;
u8 cs;
- u16 wstrobe;
- u16 rstrobe;
- u8 wsetup;
- u8 whold;
- u8 rsetup;
- u8 rhold;
- u8 ta;
u8 enable_ss;
u8 enable_ew;
u8 asize;
@@ -115,6 +105,7 @@ struct aemif_cs_data {
* @num_cs: number of assigned chip-selects
* @cs_offset: start number of cs nodes
* @cs_data: array of chip-select settings
+ * @config_cs_lock: lock used to access CS configuration
*/
struct aemif_device {
void __iomem *base;
@@ -123,20 +114,94 @@ struct aemif_device {
u8 num_cs;
int cs_offset;
struct aemif_cs_data cs_data[NUM_CS];
+ struct mutex config_cs_lock;
};
/**
+ * aemif_check_cs_timings() - Check the validity of a CS timing configuration.
+ * @timings: timings configuration
+ *
+ * @return: 0 if the timing configuration is valid, negative error number otherwise.
+ */
+int aemif_check_cs_timings(struct aemif_cs_timings *timings)
+{
+ if (timings->ta > TA_MAX)
+ return -EINVAL;
+
+ if (timings->rhold > RHOLD_MAX)
+ return -EINVAL;
+
+ if (timings->rstrobe > RSTROBE_MAX)
+ return -EINVAL;
+
+ if (timings->rsetup > RSETUP_MAX)
+ return -EINVAL;
+
+ if (timings->whold > WHOLD_MAX)
+ return -EINVAL;
+
+ if (timings->wstrobe > WSTROBE_MAX)
+ return -EINVAL;
+
+ if (timings->wsetup > WSETUP_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aemif_check_cs_timings);
+
+/**
+ * aemif_set_cs_timings() - Set the timing configuration of a given chip select.
+ * @aemif: aemif device to configure
+ * @cs: index of the chip select to configure
+ * @timings: timings configuration to set
+ *
+ * @return: 0 on success, else negative errno.
+ */
+int aemif_set_cs_timings(struct aemif_device *aemif, u8 cs,
+ struct aemif_cs_timings *timings)
+{
+ unsigned int offset;
+ u32 val, set;
+ int ret;
+
+ if (!timings || !aemif)
+ return -EINVAL;
+
+ if (cs > aemif->num_cs)
+ return -EINVAL;
+
+ ret = aemif_check_cs_timings(timings);
+ if (ret)
+ return ret;
+
+ set = TA(timings->ta) | RHOLD(timings->rhold) | RSTROBE(timings->rstrobe) |
+ RSETUP(timings->rsetup) | WHOLD(timings->whold) |
+ WSTROBE(timings->wstrobe) | WSETUP(timings->wsetup);
+
+ offset = A1CR_OFFSET + cs * 4;
+
+ mutex_lock(&aemif->config_cs_lock);
+ val = readl(aemif->base + offset);
+ val &= ~TIMINGS_MASK;
+ val |= set;
+ writel(val, aemif->base + offset);
+ mutex_unlock(&aemif->config_cs_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aemif_set_cs_timings);
+
+/**
* aemif_calc_rate - calculate timing data.
* @pdev: platform device to calculate for
* @wanted: The cycle time needed in nanoseconds.
* @clk: The input clock rate in kHz.
- * @max: The maximum divider value that can be programmed.
*
- * On success, returns the calculated timing value minus 1 for easy
- * programming into AEMIF timing registers, else negative errno.
+ * @return: the calculated timing value minus 1 for easy
+ * programming into AEMIF timing registers.
*/
-static int aemif_calc_rate(struct platform_device *pdev, int wanted,
- unsigned long clk, int max)
+static u32 aemif_calc_rate(struct platform_device *pdev, int wanted, unsigned long clk)
{
int result;
@@ -149,10 +214,6 @@ static int aemif_calc_rate(struct platform_device *pdev, int wanted,
if (result < 0)
result = 0;
- /* ... But configuring tighter timings is not an option. */
- else if (result > max)
- result = -EINVAL;
-
return result;
}
@@ -174,48 +235,25 @@ static int aemif_config_abus(struct platform_device *pdev, int csnum)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
struct aemif_cs_data *data = &aemif->cs_data[csnum];
- int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
- unsigned long clk_rate = aemif->clk_rate;
unsigned offset;
u32 set, val;
offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
- ta = aemif_calc_rate(pdev, data->ta, clk_rate, TA_MAX);
- rhold = aemif_calc_rate(pdev, data->rhold, clk_rate, RHOLD_MAX);
- rstrobe = aemif_calc_rate(pdev, data->rstrobe, clk_rate, RSTROBE_MAX);
- rsetup = aemif_calc_rate(pdev, data->rsetup, clk_rate, RSETUP_MAX);
- whold = aemif_calc_rate(pdev, data->whold, clk_rate, WHOLD_MAX);
- wstrobe = aemif_calc_rate(pdev, data->wstrobe, clk_rate, WSTROBE_MAX);
- wsetup = aemif_calc_rate(pdev, data->wsetup, clk_rate, WSETUP_MAX);
-
- if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
- whold < 0 || wstrobe < 0 || wsetup < 0) {
- dev_err(&pdev->dev, "%s: cannot get suitable timings\n",
- __func__);
- return -EINVAL;
- }
-
- set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
- WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
-
- set |= (data->asize & ACR_ASIZE_MASK);
+ set = (data->asize & ACR_ASIZE_MASK);
if (data->enable_ew)
set |= ACR_EW_MASK;
if (data->enable_ss)
set |= ACR_SSTROBE_MASK;
+ mutex_lock(&aemif->config_cs_lock);
val = readl(aemif->base + offset);
val &= ~CONFIG_MASK;
val |= set;
writel(val, aemif->base + offset);
+ mutex_unlock(&aemif->config_cs_lock);
- return 0;
-}
-
-static inline int aemif_cycles_to_nsec(int val, unsigned long clk_rate)
-{
- return ((val + 1) * NSEC_PER_MSEC) / clk_rate;
+ return aemif_set_cs_timings(aemif, data->cs - aemif->cs_offset, &data->timings);
}
/**
@@ -231,19 +269,18 @@ static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
struct aemif_cs_data *data = &aemif->cs_data[csnum];
- unsigned long clk_rate = aemif->clk_rate;
u32 val, offset;
offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
val = readl(aemif->base + offset);
- data->ta = aemif_cycles_to_nsec(TA_VAL(val), clk_rate);
- data->rhold = aemif_cycles_to_nsec(RHOLD_VAL(val), clk_rate);
- data->rstrobe = aemif_cycles_to_nsec(RSTROBE_VAL(val), clk_rate);
- data->rsetup = aemif_cycles_to_nsec(RSETUP_VAL(val), clk_rate);
- data->whold = aemif_cycles_to_nsec(WHOLD_VAL(val), clk_rate);
- data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
- data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
+ data->timings.ta = TA_VAL(val);
+ data->timings.rhold = RHOLD_VAL(val);
+ data->timings.rstrobe = RSTROBE_VAL(val);
+ data->timings.rsetup = RSETUP_VAL(val);
+ data->timings.whold = WHOLD_VAL(val);
+ data->timings.wstrobe = WSTROBE_VAL(val);
+ data->timings.wsetup = WSETUP_VAL(val);
data->enable_ew = EW_VAL(val);
data->enable_ss = SSTROBE_VAL(val);
data->asize = val & ASIZE_MAX;
@@ -261,6 +298,7 @@ static int of_aemif_parse_abus_config(struct platform_device *pdev,
struct device_node *np)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
+ unsigned long clk_rate = aemif->clk_rate;
struct aemif_cs_data *data;
u32 cs;
u32 val;
@@ -288,32 +326,33 @@ static int of_aemif_parse_abus_config(struct platform_device *pdev,
/* override the values from device node */
if (!of_property_read_u32(np, "ti,cs-min-turnaround-ns", &val))
- data->ta = val;
+ data->timings.ta = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-read-hold-ns", &val))
- data->rhold = val;
+ data->timings.rhold = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-read-strobe-ns", &val))
- data->rstrobe = val;
+ data->timings.rstrobe = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-read-setup-ns", &val))
- data->rsetup = val;
+ data->timings.rsetup = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-write-hold-ns", &val))
- data->whold = val;
+ data->timings.whold = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-write-strobe-ns", &val))
- data->wstrobe = val;
+ data->timings.wstrobe = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-write-setup-ns", &val))
- data->wsetup = val;
+ data->timings.wsetup = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-bus-width", &val))
if (val == 16)
data->asize = 1;
data->enable_ew = of_property_read_bool(np, "ti,cs-extended-wait-mode");
data->enable_ss = of_property_read_bool(np, "ti,cs-select-strobe-mode");
- return 0;
+
+ return aemif_check_cs_timings(&data->timings);
}
static const struct of_device_id aemif_of_match[] = {
@@ -351,6 +390,7 @@ static int aemif_probe(struct platform_device *pdev)
if (IS_ERR(aemif->base))
return PTR_ERR(aemif->base);
+ mutex_init(&aemif->config_cs_lock);
if (np) {
/*
* For every controller device node, there is a cs device node
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 20a2466bec23..5b617c1f6789 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2094,8 +2094,7 @@ static int msb_init_disk(struct memstick_dev *card)
if (msb->disk_id < 0)
return msb->disk_id;
- rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, 0);
if (rc)
goto out_release_id;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 13b317c56069..634d343b6bdb 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1139,8 +1139,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
if (disk_id < 0)
return disk_id;
- rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, 0);
if (rc)
goto out_release_id;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ae23b317a64e..6b0682af6e32 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2386,6 +2386,19 @@ config MFD_INTEL_M10_BMC_PMCI
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_QNAP_MCU
+ tristate "QNAP microcontroller unit core driver"
+ depends on SERIAL_DEV_BUS
+ select MFD_CORE
+ help
+ Select this to get support for the QNAP MCU device found in
+ several devices of QNAP network attached storage products that
+ implements additional functionality for the device, like fan
+ and LED control.
+
+ This driver implements the base serial protocol to talk to the
+ device and provides functions for the other parts to hook into.
+
config MFD_RSMU_I2C
tristate "Renesas Synchronization Management Unit with I2C"
depends on I2C && OF
@@ -2414,5 +2427,17 @@ config MFD_RSMU_SPI
Additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_UPBOARD_FPGA
+ tristate "Support for the AAeon UP board FPGA"
+ depends on (X86 && ACPI)
+ select MFD_CORE
+ help
+ Select this option to enable the AAEON UP and UP^2 onboard FPGA.
+ This is the core driver of this FPGA, which has a pin controller and a
+ LED controller.
+
+ To compile this driver as a module, choose M here: the module will be
+ called upboard-fpga.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index e057d6d6faef..9220eaf7cf12 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -288,5 +288,9 @@ obj-$(CONFIG_MFD_INTEL_M10_BMC_PMCI) += intel-m10-bmc-pmci.o
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o
+obj-$(CONFIG_MFD_QNAP_MCU) += qnap-mcu.o
+
obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o rsmu_core.o
obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o
+
+obj-$(CONFIG_MFD_UPBOARD_FPGA) += upboard-fpga.o
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 251465a656d0..cff56deba24f 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -1445,7 +1445,7 @@ int axp20x_device_probe(struct axp20x_dev *axp20x)
}
}
- ret = mfd_add_devices(axp20x->dev, PLATFORM_DEVID_AUTO, axp20x->cells,
+ ret = mfd_add_devices(axp20x->dev, PLATFORM_DEVID_NONE, axp20x->cells,
axp20x->nr_cells, NULL, 0, NULL);
if (ret) {
@@ -1455,10 +1455,7 @@ int axp20x_device_probe(struct axp20x_dev *axp20x)
}
if (axp20x->variant != AXP288_ID)
- devm_register_sys_off_handler(axp20x->dev,
- SYS_OFF_MODE_POWER_OFF,
- SYS_OFF_PRIO_DEFAULT,
- axp20x_power_off, axp20x);
+ devm_register_power_off_handler(axp20x->dev, axp20x_power_off, axp20x);
dev_info(axp20x->dev, "AXP20X driver loaded\n");
diff --git a/drivers/mfd/cs42l43-i2c.c b/drivers/mfd/cs42l43-i2c.c
index f0ad4002652d..a2ab001a600a 100644
--- a/drivers/mfd/cs42l43-i2c.c
+++ b/drivers/mfd/cs42l43-i2c.c
@@ -56,13 +56,6 @@ static int cs42l43_i2c_probe(struct i2c_client *i2c)
return cs42l43_dev_probe(cs42l43);
}
-static void cs42l43_i2c_remove(struct i2c_client *i2c)
-{
- struct cs42l43 *cs42l43 = dev_get_drvdata(&i2c->dev);
-
- cs42l43_dev_remove(cs42l43);
-}
-
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id cs42l43_of_match[] = {
{ .compatible = "cirrus,cs42l43", },
@@ -88,7 +81,6 @@ static struct i2c_driver cs42l43_i2c_driver = {
},
.probe = cs42l43_i2c_probe,
- .remove = cs42l43_i2c_remove,
};
module_i2c_driver(cs42l43_i2c_driver);
diff --git a/drivers/mfd/cs42l43-sdw.c b/drivers/mfd/cs42l43-sdw.c
index 3938d48039c4..023f7e1a30f8 100644
--- a/drivers/mfd/cs42l43-sdw.c
+++ b/drivers/mfd/cs42l43-sdw.c
@@ -187,15 +187,6 @@ static int cs42l43_sdw_probe(struct sdw_slave *sdw, const struct sdw_device_id *
return cs42l43_dev_probe(cs42l43);
}
-static int cs42l43_sdw_remove(struct sdw_slave *sdw)
-{
- struct cs42l43 *cs42l43 = dev_get_drvdata(&sdw->dev);
-
- cs42l43_dev_remove(cs42l43);
-
- return 0;
-}
-
static const struct sdw_device_id cs42l43_sdw_id[] = {
SDW_SLAVE_ENTRY(0x01FA, 0x4243, 0),
{}
@@ -209,7 +200,6 @@ static struct sdw_driver cs42l43_sdw_driver = {
},
.probe = cs42l43_sdw_probe,
- .remove = cs42l43_sdw_remove,
.id_table = cs42l43_sdw_id,
.ops = &cs42l43_sdw_ops,
};
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index b5ab5e613db7..103787f37443 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -29,7 +29,7 @@
#define CS42L43_RESET_DELAY_MS 20
-#define CS42L43_SDW_ATTACH_TIMEOUT_MS 500
+#define CS42L43_SDW_ATTACH_TIMEOUT_MS 5000
#define CS42L43_SDW_DETACH_TIMEOUT_MS 100
#define CS42L43_MCU_BOOT_STAGE1 1
@@ -48,6 +48,7 @@
#define CS42L43_MCU_SUPPORTED_REV 0x2105
#define CS42L43_MCU_SHADOW_REGS_REQUIRED_REV 0x2200
+#define CS42L43_BIOS_SHADOW_REGS_REQUIRED_REV 0x1002
#define CS42L43_MCU_SUPPORTED_BIOS_REV 0x0001
#define CS42L43_VDDP_DELAY_US 50
@@ -773,7 +774,8 @@ static int cs42l43_mcu_update_step(struct cs42l43 *cs42l43)
* Later versions of the firmwware require the driver to access some
* features through a set of shadow registers.
*/
- shadow = mcu_rev >= CS42L43_MCU_SHADOW_REGS_REQUIRED_REV;
+ shadow = (mcu_rev >= CS42L43_MCU_SHADOW_REGS_REQUIRED_REV) ||
+ (bios_rev >= CS42L43_BIOS_SHADOW_REGS_REQUIRED_REV);
ret = regmap_read(cs42l43->regmap, CS42L43_BOOT_CONTROL, &secure_cfg);
if (ret) {
@@ -982,7 +984,7 @@ static int cs42l43_power_up(struct cs42l43 *cs42l43)
/* vdd-p must be on for 50uS before any other supply */
usleep_range(CS42L43_VDDP_DELAY_US, 2 * CS42L43_VDDP_DELAY_US);
- gpiod_set_value_cansleep(cs42l43->reset, 1);
+ gpiod_set_raw_value_cansleep(cs42l43->reset, 1);
ret = regulator_bulk_enable(CS42L43_N_SUPPLIES, cs42l43->core_supplies);
if (ret) {
@@ -1003,7 +1005,7 @@ static int cs42l43_power_up(struct cs42l43 *cs42l43)
err_core_supplies:
regulator_bulk_disable(CS42L43_N_SUPPLIES, cs42l43->core_supplies);
err_reset:
- gpiod_set_value_cansleep(cs42l43->reset, 0);
+ gpiod_set_raw_value_cansleep(cs42l43->reset, 0);
regulator_disable(cs42l43->vdd_p);
return ret;
@@ -1025,7 +1027,7 @@ static int cs42l43_power_down(struct cs42l43 *cs42l43)
return ret;
}
- gpiod_set_value_cansleep(cs42l43->reset, 0);
+ gpiod_set_raw_value_cansleep(cs42l43->reset, 0);
ret = regulator_disable(cs42l43->vdd_p);
if (ret) {
@@ -1036,6 +1038,15 @@ static int cs42l43_power_down(struct cs42l43 *cs42l43)
return 0;
}
+static void cs42l43_dev_remove(void *data)
+{
+ struct cs42l43 *cs42l43 = data;
+
+ cancel_work_sync(&cs42l43->boot_work);
+
+ cs42l43_power_down(cs42l43);
+}
+
int cs42l43_dev_probe(struct cs42l43 *cs42l43)
{
int i, ret;
@@ -1050,11 +1061,13 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
regcache_cache_only(cs42l43->regmap, true);
- cs42l43->reset = devm_gpiod_get_optional(cs42l43->dev, "reset", GPIOD_OUT_LOW);
+ cs42l43->reset = devm_gpiod_get_optional(cs42l43->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(cs42l43->reset))
return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->reset),
"Failed to get reset\n");
+ gpiod_set_raw_value_cansleep(cs42l43->reset, 0);
+
cs42l43->vdd_p = devm_regulator_get(cs42l43->dev, "vdd-p");
if (IS_ERR(cs42l43->vdd_p))
return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->vdd_p),
@@ -1080,6 +1093,10 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(cs42l43->dev, cs42l43_dev_remove, cs42l43);
+ if (ret)
+ return ret;
+
pm_runtime_set_autosuspend_delay(cs42l43->dev, CS42L43_AUTOSUSPEND_TIME_MS);
pm_runtime_use_autosuspend(cs42l43->dev);
pm_runtime_set_active(cs42l43->dev);
@@ -1098,14 +1115,6 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
}
EXPORT_SYMBOL_NS_GPL(cs42l43_dev_probe, "MFD_CS42L43");
-void cs42l43_dev_remove(struct cs42l43 *cs42l43)
-{
- cancel_work_sync(&cs42l43->boot_work);
-
- cs42l43_power_down(cs42l43);
-}
-EXPORT_SYMBOL_NS_GPL(cs42l43_dev_remove, "MFD_CS42L43");
-
static int cs42l43_suspend(struct device *dev)
{
struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
diff --git a/drivers/mfd/cs42l43.h b/drivers/mfd/cs42l43.h
index 8d1b1b0f5a47..f3da783930f5 100644
--- a/drivers/mfd/cs42l43.h
+++ b/drivers/mfd/cs42l43.h
@@ -25,6 +25,5 @@ bool cs42l43_precious_register(struct device *dev, unsigned int reg);
bool cs42l43_volatile_register(struct device *dev, unsigned int reg);
int cs42l43_dev_probe(struct cs42l43 *cs42l43);
-void cs42l43_dev_remove(struct cs42l43 *cs42l43);
#endif /* CS42L43_CORE_INT_H */
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index dc85801b9fa0..b06cd518413b 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -585,6 +585,7 @@ static int da9052_clear_fault_log(struct da9052 *da9052)
"Cannot reset FAULT_LOG values %d\n", ret);
}
+ da9052->fault_log = fault_log;
return ret;
}
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 992855bfda3e..8582ae65a802 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -81,7 +81,7 @@ static struct mfd_cell chtdc_ti_dev[] = {
static const struct regmap_config chtdc_ti_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 128,
+ .max_register = 0xff,
.cache_type = REGCACHE_NONE,
};
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index f14901660147..4b7d0cb9340f 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -834,8 +834,9 @@ static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
{ PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
{ PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
- { PCI_VDEVICE(INTEL, 0x3197), LPC_GLK},
{ PCI_VDEVICE(INTEL, 0x2b9c), LPC_COUGARMOUNTAIN},
+ { PCI_VDEVICE(INTEL, 0x3197), LPC_GLK},
+ { PCI_VDEVICE(INTEL, 0x31e8), LPC_GLK},
{ PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
{ PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
{ PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
diff --git a/drivers/mfd/qnap-mcu.c b/drivers/mfd/qnap-mcu.c
new file mode 100644
index 000000000000..4be39d8b2905
--- /dev/null
+++ b/drivers/mfd/qnap-mcu.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Core driver for the microcontroller unit in QNAP NAS devices that is
+ * connected via a dedicated UART port.
+ *
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/export.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/qnap-mcu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+
+/* The longest command found so far is 5 bytes long */
+#define QNAP_MCU_MAX_CMD_SIZE 5
+#define QNAP_MCU_MAX_DATA_SIZE 36
+#define QNAP_MCU_CHECKSUM_SIZE 1
+
+#define QNAP_MCU_RX_BUFFER_SIZE \
+ (QNAP_MCU_MAX_DATA_SIZE + QNAP_MCU_CHECKSUM_SIZE)
+
+#define QNAP_MCU_TX_BUFFER_SIZE \
+ (QNAP_MCU_MAX_CMD_SIZE + QNAP_MCU_CHECKSUM_SIZE)
+
+#define QNAP_MCU_ACK_LEN 2
+#define QNAP_MCU_VERSION_LEN 4
+
+#define QNAP_MCU_TIMEOUT_MS 500
+
+/**
+ * struct qnap_mcu_reply - Reply to a command
+ *
+ * @data: Buffer to store reply payload in
+ * @length: Expected reply length, including the checksum
+ * @received: Received number of bytes, so far
+ * @done: Triggered when the entire reply has been received
+ */
+struct qnap_mcu_reply {
+ u8 *data;
+ size_t length;
+ size_t received;
+ struct completion done;
+};
+
+/**
+ * struct qnap_mcu - QNAP NAS embedded controller
+ *
+ * @serdev: Pointer to underlying serdev
+ * @bus_lock: Lock to serialize access to the device
+ * @reply: Reply data structure
+ * @variant: Device variant specific information
+ * @version: MCU firmware version
+ */
+struct qnap_mcu {
+ struct serdev_device *serdev;
+ struct mutex bus_lock;
+ struct qnap_mcu_reply reply;
+ const struct qnap_mcu_variant *variant;
+ u8 version[QNAP_MCU_VERSION_LEN];
+};
+
+/*
+ * The QNAP-MCU uses a basic XOR checksum.
+ * It is always the last byte and XORs the whole previous message.
+ */
+static u8 qnap_mcu_csum(const u8 *buf, size_t size)
+{
+ u8 csum = 0;
+
+ while (size--)
+ csum ^= *buf++;
+
+ return csum;
+}
+
+static int qnap_mcu_write(struct qnap_mcu *mcu, const u8 *data, u8 data_size)
+{
+ unsigned char tx[QNAP_MCU_TX_BUFFER_SIZE];
+ size_t length = data_size + QNAP_MCU_CHECKSUM_SIZE;
+
+ if (length > sizeof(tx)) {
+ dev_err(&mcu->serdev->dev, "data too big for transmit buffer");
+ return -EINVAL;
+ }
+
+ memcpy(tx, data, data_size);
+ tx[data_size] = qnap_mcu_csum(data, data_size);
+
+ serdev_device_write_flush(mcu->serdev);
+
+ return serdev_device_write(mcu->serdev, tx, length, HZ);
+}
+
+static size_t qnap_mcu_receive_buf(struct serdev_device *serdev, const u8 *buf, size_t size)
+{
+ struct device *dev = &serdev->dev;
+ struct qnap_mcu *mcu = dev_get_drvdata(dev);
+ struct qnap_mcu_reply *reply = &mcu->reply;
+ const u8 *src = buf;
+ const u8 *end = buf + size;
+
+ if (!reply->length) {
+ dev_warn(dev, "Received %zu bytes, we were not waiting for\n", size);
+ return size;
+ }
+
+ while (src < end) {
+ reply->data[reply->received] = *src++;
+ reply->received++;
+
+ if (reply->received == reply->length) {
+ /* We don't expect any characters from the device now */
+ reply->length = 0;
+
+ complete(&reply->done);
+
+ /*
+ * We report the consumed number of bytes. If there
+ * are still bytes remaining (though there shouldn't)
+ * the serdev layer will re-execute this handler with
+ * the remainder of the Rx bytes.
+ */
+ return src - buf;
+ }
+ }
+
+ /*
+ * The only way to get out of the above loop and end up here
+ * is through consuming all of the supplied data, so here we
+ * report that we processed it all.
+ */
+ return size;
+}
+
+static const struct serdev_device_ops qnap_mcu_serdev_device_ops = {
+ .receive_buf = qnap_mcu_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+int qnap_mcu_exec(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size,
+ u8 *reply_data, size_t reply_data_size)
+{
+ unsigned char rx[QNAP_MCU_RX_BUFFER_SIZE];
+ size_t length = reply_data_size + QNAP_MCU_CHECKSUM_SIZE;
+ struct qnap_mcu_reply *reply = &mcu->reply;
+ int ret = 0;
+
+ if (length > sizeof(rx)) {
+ dev_err(&mcu->serdev->dev, "expected data too big for receive buffer");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mcu->bus_lock);
+
+ reply->data = rx,
+ reply->length = length,
+ reply->received = 0,
+ reinit_completion(&reply->done);
+
+ qnap_mcu_write(mcu, cmd_data, cmd_data_size);
+
+ serdev_device_wait_until_sent(mcu->serdev, msecs_to_jiffies(QNAP_MCU_TIMEOUT_MS));
+
+ if (!wait_for_completion_timeout(&reply->done, msecs_to_jiffies(QNAP_MCU_TIMEOUT_MS))) {
+ dev_err(&mcu->serdev->dev, "Command timeout\n");
+ ret = -ETIMEDOUT;
+ } else {
+ u8 crc = qnap_mcu_csum(rx, reply_data_size);
+
+ if (crc != rx[reply_data_size]) {
+ dev_err(&mcu->serdev->dev,
+ "Invalid Checksum received\n");
+ ret = -EIO;
+ } else {
+ memcpy(reply_data, rx, reply_data_size);
+ }
+ }
+
+ mutex_unlock(&mcu->bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qnap_mcu_exec);
+
+int qnap_mcu_exec_with_ack(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size)
+{
+ u8 ack[QNAP_MCU_ACK_LEN];
+ int ret;
+
+ ret = qnap_mcu_exec(mcu, cmd_data, cmd_data_size, ack, sizeof(ack));
+ if (ret)
+ return ret;
+
+ /* Should return @0 */
+ if (ack[0] != '@' || ack[1] != '0') {
+ dev_err(&mcu->serdev->dev, "Did not receive ack\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qnap_mcu_exec_with_ack);
+
+static int qnap_mcu_get_version(struct qnap_mcu *mcu)
+{
+ const u8 cmd[] = { '%', 'V' };
+ u8 rx[14];
+ int ret;
+
+ /* Reply is the 2 command-bytes + 4 bytes describing the version */
+ ret = qnap_mcu_exec(mcu, cmd, sizeof(cmd), rx, QNAP_MCU_VERSION_LEN + 2);
+ if (ret)
+ return ret;
+
+ memcpy(mcu->version, &rx[2], QNAP_MCU_VERSION_LEN);
+
+ return 0;
+}
+
+/*
+ * The MCU controls power to the peripherals but not the CPU.
+ *
+ * So using the PMIC to power off the system keeps the MCU and hard-drives
+ * running. This also then prevents the system from turning back on until
+ * the MCU is turned off by unplugging the power cable.
+ * Turning off the MCU alone on the other hand turns off the hard drives,
+ * LEDs, etc while the main SoC stays running - including its network ports.
+ */
+static int qnap_mcu_power_off(struct sys_off_data *data)
+{
+ const u8 cmd[] = { '@', 'C', '0' };
+ struct qnap_mcu *mcu = data->cb_data;
+ int ret;
+
+ ret = qnap_mcu_exec_with_ack(mcu, cmd, sizeof(cmd));
+ if (ret) {
+ dev_err(&mcu->serdev->dev, "MCU poweroff failed %d\n", ret);
+ return NOTIFY_STOP;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const struct qnap_mcu_variant qnap_ts433_mcu = {
+ .baud_rate = 115200,
+ .num_drives = 4,
+ .fan_pwm_min = 51, /* Specified in original model.conf */
+ .fan_pwm_max = 255,
+ .usb_led = true,
+};
+
+static struct mfd_cell qnap_mcu_cells[] = {
+ { .name = "qnap-mcu-input", },
+ { .name = "qnap-mcu-leds", },
+ { .name = "qnap-mcu-hwmon", }
+};
+
+static int qnap_mcu_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct qnap_mcu *mcu;
+ int ret;
+
+ mcu = devm_kzalloc(dev, sizeof(*mcu), GFP_KERNEL);
+ if (!mcu)
+ return -ENOMEM;
+
+ mcu->serdev = serdev;
+ dev_set_drvdata(dev, mcu);
+
+ mcu->variant = of_device_get_match_data(dev);
+ if (!mcu->variant)
+ return -ENODEV;
+
+ mutex_init(&mcu->bus_lock);
+ init_completion(&mcu->reply.done);
+
+ serdev_device_set_client_ops(serdev, &qnap_mcu_serdev_device_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, mcu->variant->baud_rate);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set parity\n");
+
+ ret = qnap_mcu_get_version(mcu);
+ if (ret)
+ return ret;
+
+ ret = devm_register_sys_off_handler(dev,
+ SYS_OFF_MODE_POWER_OFF_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ &qnap_mcu_power_off, mcu);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register poweroff handler\n");
+
+ for (int i = 0; i < ARRAY_SIZE(qnap_mcu_cells); i++) {
+ qnap_mcu_cells[i].platform_data = mcu->variant;
+ qnap_mcu_cells[i].pdata_size = sizeof(*mcu->variant);
+ }
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, qnap_mcu_cells,
+ ARRAY_SIZE(qnap_mcu_cells), NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add child devices\n");
+
+ return 0;
+}
+
+static const struct of_device_id qnap_mcu_dt_ids[] = {
+ { .compatible = "qnap,ts433-mcu", .data = &qnap_ts433_mcu },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qnap_mcu_dt_ids);
+
+static struct serdev_device_driver qnap_mcu_drv = {
+ .probe = qnap_mcu_probe,
+ .driver = {
+ .name = "qnap-mcu",
+ .of_match_table = qnap_mcu_dt_ids,
+ },
+};
+module_serdev_device_driver(qnap_mcu_drv);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("QNAP MCU core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index d8a603d95aa6..081827bc0596 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -170,11 +170,7 @@ static int stpmic1_probe(struct i2c_client *i2c)
return ret;
}
- ret = devm_register_sys_off_handler(ddata->dev,
- SYS_OFF_MODE_POWER_OFF,
- SYS_OFF_PRIO_DEFAULT,
- stpmic1_power_off,
- ddata);
+ ret = devm_register_power_off_handler(ddata->dev, stpmic1_power_off, ddata);
if (ret) {
dev_err(ddata->dev, "failed to register sys-off handler: %d\n", ret);
return ret;
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 3e1d699ba934..226915ca3c93 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -12,22 +12,16 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hwspinlock.h>
-#include <linux/io.h>
-#include <linux/init.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/platform_data/syscon.h>
-#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/mfd/syscon.h>
#include <linux/slab.h>
-static struct platform_driver syscon_driver;
-
-static DEFINE_SPINLOCK(syscon_list_slock);
+static DEFINE_MUTEX(syscon_list_lock);
static LIST_HEAD(syscon_list);
struct syscon {
@@ -54,6 +48,8 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
struct resource res;
struct reset_control *reset;
+ WARN_ON(!mutex_is_locked(&syscon_list_lock));
+
struct syscon *syscon __free(kfree) = kzalloc(sizeof(*syscon), GFP_KERNEL);
if (!syscon)
return ERR_PTR(-ENOMEM);
@@ -146,9 +142,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
syscon->regmap = regmap;
syscon->np = np;
- spin_lock(&syscon_list_slock);
list_add_tail(&syscon->list, &syscon_list);
- spin_unlock(&syscon_list_slock);
return_ptr(syscon);
@@ -169,7 +163,7 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
{
struct syscon *entry, *syscon = NULL;
- spin_lock(&syscon_list_slock);
+ mutex_lock(&syscon_list_lock);
list_for_each_entry(entry, &syscon_list, list)
if (entry->np == np) {
@@ -177,10 +171,13 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
break;
}
- spin_unlock(&syscon_list_slock);
-
- if (!syscon)
- syscon = of_syscon_register(np, check_res);
+ if (!syscon) {
+ if (of_device_is_compatible(np, "syscon"))
+ syscon = of_syscon_register(np, check_res);
+ else
+ syscon = ERR_PTR(-EINVAL);
+ }
+ mutex_unlock(&syscon_list_lock);
if (IS_ERR(syscon))
return ERR_CAST(syscon);
@@ -212,7 +209,7 @@ int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap)
return -ENOMEM;
/* check if syscon entry already exists */
- spin_lock(&syscon_list_slock);
+ mutex_lock(&syscon_list_lock);
list_for_each_entry(entry, &syscon_list, list)
if (entry->np == np) {
@@ -225,12 +222,12 @@ int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap)
/* register the regmap in syscon list */
list_add_tail(&syscon->list, &syscon_list);
- spin_unlock(&syscon_list_slock);
+ mutex_unlock(&syscon_list_lock);
return 0;
err_unlock:
- spin_unlock(&syscon_list_slock);
+ mutex_unlock(&syscon_list_lock);
kfree(syscon);
return ret;
}
@@ -244,9 +241,6 @@ EXPORT_SYMBOL_GPL(device_node_to_regmap);
struct regmap *syscon_node_to_regmap(struct device_node *np)
{
- if (!of_device_is_compatible(np, "syscon"))
- return ERR_PTR(-EINVAL);
-
return device_node_get_regmap(np, true);
}
EXPORT_SYMBOL_GPL(syscon_node_to_regmap);
@@ -336,62 +330,3 @@ struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np,
return regmap;
}
EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle_optional);
-
-static int syscon_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct syscon_platform_data *pdata = dev_get_platdata(dev);
- struct syscon *syscon;
- struct regmap_config syscon_config = syscon_regmap_config;
- struct resource *res;
- void __iomem *base;
-
- syscon = devm_kzalloc(dev, sizeof(*syscon), GFP_KERNEL);
- if (!syscon)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
- base = devm_ioremap(dev, res->start, resource_size(res));
- if (!base)
- return -ENOMEM;
-
- syscon_config.max_register = resource_size(res) - 4;
- if (!syscon_config.max_register)
- syscon_config.max_register_is_0 = true;
-
- if (pdata)
- syscon_config.name = pdata->label;
- syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_config);
- if (IS_ERR(syscon->regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(syscon->regmap);
- }
-
- platform_set_drvdata(pdev, syscon);
-
- dev_dbg(dev, "regmap %pR registered\n", res);
-
- return 0;
-}
-
-static const struct platform_device_id syscon_ids[] = {
- { "syscon", },
- { }
-};
-
-static struct platform_driver syscon_driver = {
- .driver = {
- .name = "syscon",
- },
- .probe = syscon_probe,
- .id_table = syscon_ids,
-};
-
-static int __init syscon_init(void)
-{
- return platform_driver_register(&syscon_driver);
-}
-postcore_initcall(syscon_init);
diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
index 57ff5cb294a6..081c5a30b04a 100644
--- a/drivers/mfd/tps65219.c
+++ b/drivers/mfd/tps65219.c
@@ -110,19 +110,12 @@ static const struct resource tps65219_regulator_resources[] = {
};
static const struct mfd_cell tps65219_cells[] = {
- {
- .name = "tps65219-regulator",
- .resources = tps65219_regulator_resources,
- .num_resources = ARRAY_SIZE(tps65219_regulator_resources),
- },
- { .name = "tps65219-gpio", },
+ MFD_CELL_RES("tps65219-regulator", tps65219_regulator_resources),
+ MFD_CELL_NAME("tps65219-gpio"),
};
-static const struct mfd_cell tps65219_pwrbutton_cell = {
- .name = "tps65219-pwrbutton",
- .resources = tps65219_pwrbutton_resources,
- .num_resources = ARRAY_SIZE(tps65219_pwrbutton_resources),
-};
+static const struct mfd_cell tps65219_pwrbutton_cell =
+ MFD_CELL_RES("tps65219-pwrbutton", tps65219_pwrbutton_resources);
static const struct regmap_config tps65219_regmap_config = {
.reg_bits = 8,
diff --git a/drivers/mfd/upboard-fpga.c b/drivers/mfd/upboard-fpga.c
new file mode 100644
index 000000000000..5a330e2f2229
--- /dev/null
+++ b/drivers/mfd/upboard-fpga.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UP Board FPGA driver.
+ *
+ * FPGA provides more GPIO driving power, LEDS and pin mux function.
+ *
+ * Copyright (c) AAEON. All rights reserved.
+ * Copyright (C) 2024 Bootlin
+ *
+ * Author: Gary Wang <garywang@aaeon.com.tw>
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/upboard-fpga.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+
+#define UPBOARD_AAEON_MANUFACTURER_ID 0x01
+#define UPBOARD_MANUFACTURER_ID_MASK GENMASK(7, 0)
+
+#define UPBOARD_ADDRESS_SIZE 7
+#define UPBOARD_REGISTER_SIZE 16
+
+#define UPBOARD_READ_FLAG BIT(UPBOARD_ADDRESS_SIZE)
+
+#define UPBOARD_FW_ID_MAJOR_SUPPORTED 0x0
+
+#define UPBOARD_FW_ID_BUILD_MASK GENMASK(15, 12)
+#define UPBOARD_FW_ID_MAJOR_MASK GENMASK(11, 8)
+#define UPBOARD_FW_ID_MINOR_MASK GENMASK(7, 4)
+#define UPBOARD_FW_ID_PATCH_MASK GENMASK(3, 0)
+
+static int upboard_fpga_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct upboard_fpga *fpga = context;
+ int i;
+
+ /* Clear to start new transaction */
+ gpiod_set_value(fpga->clear_gpio, 0);
+ gpiod_set_value(fpga->clear_gpio, 1);
+
+ reg |= UPBOARD_READ_FLAG;
+
+ /* Send clock and addr from strobe & datain pins */
+ for (i = UPBOARD_ADDRESS_SIZE; i >= 0; i--) {
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ gpiod_set_value(fpga->datain_gpio, !!(reg & BIT(i)));
+ gpiod_set_value(fpga->strobe_gpio, 1);
+ }
+
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ *val = 0;
+
+ /* Read data from dataout pin */
+ for (i = UPBOARD_REGISTER_SIZE - 1; i >= 0; i--) {
+ gpiod_set_value(fpga->strobe_gpio, 1);
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ *val |= gpiod_get_value(fpga->dataout_gpio) << i;
+ }
+
+ gpiod_set_value(fpga->strobe_gpio, 1);
+
+ return 0;
+}
+
+static int upboard_fpga_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct upboard_fpga *fpga = context;
+ int i;
+
+ /* Clear to start new transcation */
+ gpiod_set_value(fpga->clear_gpio, 0);
+ gpiod_set_value(fpga->clear_gpio, 1);
+
+ /* Send clock and addr from strobe & datain pins */
+ for (i = UPBOARD_ADDRESS_SIZE; i >= 0; i--) {
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ gpiod_set_value(fpga->datain_gpio, !!(reg & BIT(i)));
+ gpiod_set_value(fpga->strobe_gpio, 1);
+ }
+
+ gpiod_set_value(fpga->strobe_gpio, 0);
+
+ /* Write data to datain pin */
+ for (i = UPBOARD_REGISTER_SIZE - 1; i >= 0; i--) {
+ gpiod_set_value(fpga->datain_gpio, !!(val & BIT(i)));
+ gpiod_set_value(fpga->strobe_gpio, 1);
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ }
+
+ gpiod_set_value(fpga->strobe_gpio, 1);
+
+ return 0;
+}
+
+static const struct regmap_range upboard_up_readable_ranges[] = {
+ regmap_reg_range(UPBOARD_REG_PLATFORM_ID, UPBOARD_REG_FIRMWARE_ID),
+ regmap_reg_range(UPBOARD_REG_FUNC_EN0, UPBOARD_REG_FUNC_EN0),
+ regmap_reg_range(UPBOARD_REG_GPIO_EN0, UPBOARD_REG_GPIO_EN1),
+ regmap_reg_range(UPBOARD_REG_GPIO_DIR0, UPBOARD_REG_GPIO_DIR1),
+};
+
+static const struct regmap_range upboard_up_writable_ranges[] = {
+ regmap_reg_range(UPBOARD_REG_FUNC_EN0, UPBOARD_REG_FUNC_EN0),
+ regmap_reg_range(UPBOARD_REG_GPIO_EN0, UPBOARD_REG_GPIO_EN1),
+ regmap_reg_range(UPBOARD_REG_GPIO_DIR0, UPBOARD_REG_GPIO_DIR1),
+};
+
+static const struct regmap_access_table upboard_up_readable_table = {
+ .yes_ranges = upboard_up_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(upboard_up_readable_ranges),
+};
+
+static const struct regmap_access_table upboard_up_writable_table = {
+ .yes_ranges = upboard_up_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(upboard_up_writable_ranges),
+};
+
+static const struct regmap_config upboard_up_regmap_config = {
+ .reg_bits = UPBOARD_ADDRESS_SIZE,
+ .val_bits = UPBOARD_REGISTER_SIZE,
+ .max_register = UPBOARD_REG_MAX,
+ .reg_read = upboard_fpga_read,
+ .reg_write = upboard_fpga_write,
+ .fast_io = false,
+ .cache_type = REGCACHE_NONE,
+ .rd_table = &upboard_up_readable_table,
+ .wr_table = &upboard_up_writable_table,
+};
+
+static const struct regmap_range upboard_up2_readable_ranges[] = {
+ regmap_reg_range(UPBOARD_REG_PLATFORM_ID, UPBOARD_REG_FIRMWARE_ID),
+ regmap_reg_range(UPBOARD_REG_FUNC_EN0, UPBOARD_REG_FUNC_EN1),
+ regmap_reg_range(UPBOARD_REG_GPIO_EN0, UPBOARD_REG_GPIO_EN2),
+ regmap_reg_range(UPBOARD_REG_GPIO_DIR0, UPBOARD_REG_GPIO_DIR2),
+};
+
+static const struct regmap_range upboard_up2_writable_ranges[] = {
+ regmap_reg_range(UPBOARD_REG_FUNC_EN0, UPBOARD_REG_FUNC_EN1),
+ regmap_reg_range(UPBOARD_REG_GPIO_EN0, UPBOARD_REG_GPIO_EN2),
+ regmap_reg_range(UPBOARD_REG_GPIO_DIR0, UPBOARD_REG_GPIO_DIR2),
+};
+
+static const struct regmap_access_table upboard_up2_readable_table = {
+ .yes_ranges = upboard_up2_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(upboard_up2_readable_ranges),
+};
+
+static const struct regmap_access_table upboard_up2_writable_table = {
+ .yes_ranges = upboard_up2_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(upboard_up2_writable_ranges),
+};
+
+static const struct regmap_config upboard_up2_regmap_config = {
+ .reg_bits = UPBOARD_ADDRESS_SIZE,
+ .val_bits = UPBOARD_REGISTER_SIZE,
+ .max_register = UPBOARD_REG_MAX,
+ .reg_read = upboard_fpga_read,
+ .reg_write = upboard_fpga_write,
+ .fast_io = false,
+ .cache_type = REGCACHE_NONE,
+ .rd_table = &upboard_up2_readable_table,
+ .wr_table = &upboard_up2_writable_table,
+};
+
+static const struct mfd_cell upboard_up_mfd_cells[] = {
+ { .name = "upboard-pinctrl" },
+ { .name = "upboard-leds" },
+};
+
+static const struct upboard_fpga_data upboard_up_fpga_data = {
+ .type = UPBOARD_UP_FPGA,
+ .regmap_config = &upboard_up_regmap_config,
+};
+
+static const struct upboard_fpga_data upboard_up2_fpga_data = {
+ .type = UPBOARD_UP2_FPGA,
+ .regmap_config = &upboard_up2_regmap_config,
+};
+
+static int upboard_fpga_gpio_init(struct upboard_fpga *fpga)
+{
+ fpga->enable_gpio = devm_gpiod_get(fpga->dev, "enable", GPIOD_ASIS);
+ if (IS_ERR(fpga->enable_gpio))
+ return PTR_ERR(fpga->enable_gpio);
+
+ fpga->clear_gpio = devm_gpiod_get(fpga->dev, "clear", GPIOD_OUT_LOW);
+ if (IS_ERR(fpga->clear_gpio))
+ return PTR_ERR(fpga->clear_gpio);
+
+ fpga->strobe_gpio = devm_gpiod_get(fpga->dev, "strobe", GPIOD_OUT_LOW);
+ if (IS_ERR(fpga->strobe_gpio))
+ return PTR_ERR(fpga->strobe_gpio);
+
+ fpga->datain_gpio = devm_gpiod_get(fpga->dev, "datain", GPIOD_OUT_LOW);
+ if (IS_ERR(fpga->datain_gpio))
+ return PTR_ERR(fpga->datain_gpio);
+
+ fpga->dataout_gpio = devm_gpiod_get(fpga->dev, "dataout", GPIOD_IN);
+ if (IS_ERR(fpga->dataout_gpio))
+ return PTR_ERR(fpga->dataout_gpio);
+
+ gpiod_set_value(fpga->enable_gpio, 1);
+
+ return 0;
+}
+
+static int upboard_fpga_get_firmware_version(struct upboard_fpga *fpga)
+{
+ unsigned int platform_id, manufacturer_id;
+ int ret;
+
+ if (!fpga)
+ return -ENOMEM;
+
+ ret = regmap_read(fpga->regmap, UPBOARD_REG_PLATFORM_ID, &platform_id);
+ if (ret)
+ return ret;
+
+ manufacturer_id = platform_id & UPBOARD_MANUFACTURER_ID_MASK;
+ if (manufacturer_id != UPBOARD_AAEON_MANUFACTURER_ID)
+ return dev_err_probe(fpga->dev, -ENODEV,
+ "driver not compatible with custom FPGA FW from manufacturer id %#02x.",
+ manufacturer_id);
+
+ ret = regmap_read(fpga->regmap, UPBOARD_REG_FIRMWARE_ID, &fpga->firmware_version);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(UPBOARD_FW_ID_MAJOR_MASK, fpga->firmware_version) !=
+ UPBOARD_FW_ID_MAJOR_SUPPORTED)
+ return dev_err_probe(fpga->dev, -ENODEV,
+ "unsupported FPGA FW v%lu.%lu.%lu build %#02lx",
+ FIELD_GET(UPBOARD_FW_ID_MAJOR_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_MINOR_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_PATCH_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_BUILD_MASK, fpga->firmware_version));
+ return 0;
+}
+
+static ssize_t upboard_fpga_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct upboard_fpga *fpga = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "FPGA FW v%lu.%lu.%lu build %#02lx\n",
+ FIELD_GET(UPBOARD_FW_ID_MAJOR_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_MINOR_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_PATCH_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_BUILD_MASK, fpga->firmware_version));
+}
+
+static DEVICE_ATTR_RO(upboard_fpga_version);
+
+static struct attribute *upboard_fpga_attrs[] = {
+ &dev_attr_upboard_fpga_version.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(upboard_fpga);
+
+static int upboard_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct upboard_fpga *fpga;
+ int ret;
+
+ fpga = devm_kzalloc(dev, sizeof(*fpga), GFP_KERNEL);
+ if (!fpga)
+ return -ENOMEM;
+
+ fpga->fpga_data = device_get_match_data(dev);
+
+ fpga->dev = dev;
+
+ platform_set_drvdata(pdev, fpga);
+
+ fpga->regmap = devm_regmap_init(dev, NULL, fpga, fpga->fpga_data->regmap_config);
+ if (IS_ERR(fpga->regmap))
+ return PTR_ERR(fpga->regmap);
+
+ ret = upboard_fpga_gpio_init(fpga);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize FPGA common GPIOs");
+
+ ret = upboard_fpga_get_firmware_version(fpga);
+ if (ret)
+ return ret;
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, upboard_up_mfd_cells,
+ ARRAY_SIZE(upboard_up_mfd_cells), NULL, 0, NULL);
+}
+
+static const struct acpi_device_id upboard_fpga_acpi_match[] = {
+ { "AANT0F01", (kernel_ulong_t)&upboard_up2_fpga_data },
+ { "AANT0F04", (kernel_ulong_t)&upboard_up_fpga_data },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, upboard_fpga_acpi_match);
+
+static struct platform_driver upboard_fpga_driver = {
+ .driver = {
+ .name = "upboard-fpga",
+ .acpi_match_table = ACPI_PTR(upboard_fpga_acpi_match),
+ .dev_groups = upboard_fpga_groups,
+ },
+ .probe = upboard_fpga_probe,
+};
+
+module_platform_driver(upboard_fpga_driver);
+
+MODULE_AUTHOR("Gary Wang <garywang@aaeon.com.tw>");
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
+MODULE_DESCRIPTION("UP Board FPGA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index d34d58ce46db..ef03d6cec9ff 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -10,7 +10,6 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/platform_data/syscon.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/stat.h>
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 5efc4151bf58..15307f5e4307 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -9,11 +9,13 @@ config CXL_BASE
select PPC_64S_HASH_MMU
config CXL
- tristate "Support for IBM Coherent Accelerators (CXL)"
+ tristate "Support for IBM Coherent Accelerators (CXL) (DEPRECATED)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
- default m
help
+ The cxl driver is deprecated and will be removed in a future
+ kernel release.
+
Select this option to enable driver support for IBM Coherent
Accelerators (CXL). CXL is otherwise known as Coherent Accelerator
Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index cf6bd8a43056..e26ee85279fa 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -295,6 +295,8 @@ int cxl_of_probe(struct platform_device *pdev)
int ret;
int slice = 0, slice_ok = 0;
+ dev_err_once(&pdev->dev, "DEPRECATION: cxl is deprecated and will be removed in a future kernel release\n");
+
pr_devel("in %s\n", __func__);
np = pdev->dev.of_node;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 3d52f9b92d0d..92bf7c5c7b35 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1726,6 +1726,8 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
int slice;
int rc;
+ dev_err_once(&dev->dev, "DEPRECATED: cxl is deprecated and will be removed in a future kernel release\n");
+
if (cxl_pci_is_vphb_device(dev)) {
dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
return -ENODEV;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d996d39c0d6f..5241528f8b90 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -19,7 +19,6 @@
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
@@ -557,8 +556,7 @@ int mmc_cqe_recovery(struct mmc_host *host)
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_STOP_TRANSMISSION;
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.flags = MMC_RSP_R1B_NO_CRC | MMC_CMD_AC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
@@ -567,8 +565,7 @@ int mmc_cqe_recovery(struct mmc_host *host)
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
cmd.arg = 1; /* Discard entire queue */
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.flags = MMC_RSP_R1B_NO_CRC | MMC_CMD_AC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 48bda70145ee..bdb22998357e 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -14,7 +14,6 @@
#include <linux/idr.h>
#include <linux/of.h>
#include <linux/pagemap.h>
-#include <linux/pm_wakeup.h>
#include <linux/export.h>
#include <linux/leds.h>
#include <linux/slab.h>
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4d6844261912..ab662f502fe7 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -441,7 +441,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
else
mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
mq->tag_set.numa_node = NUMA_NO_NODE;
- mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ mq->tag_set.flags = BLK_MQ_F_BLOCKING;
mq->tag_set.nr_hw_queues = 1;
mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
mq->tag_set.driver_data = mq;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 9566837c9848..4b19b8a16b09 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -458,6 +458,8 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
if (mmc_card_sd_combo(card))
max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
+ max_dtr = min_not_zero(max_dtr, card->quirk_max_rate);
+
return max_dtr;
}
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 7847f0c8b465..e5f151d092cd 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1343,6 +1343,25 @@ static int bcm2835_add_host(struct bcm2835_host *host)
return 0;
}
+static int bcm2835_suspend(struct device *dev)
+{
+ struct bcm2835_host *host = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static int bcm2835_resume(struct device *dev)
+{
+ struct bcm2835_host *host = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(host->clk);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm2835_pm_ops, bcm2835_suspend,
+ bcm2835_resume);
+
static int bcm2835_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1471,6 +1490,7 @@ static struct platform_driver bcm2835_driver = {
.name = "sdhost-bcm2835",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = bcm2835_match,
+ .pm = pm_ptr(&bcm2835_pm_ops),
},
};
module_platform_driver(bcm2835_driver);
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
index d5f4b6972f63..cb8044093402 100644
--- a/drivers/mmc/host/cqhci-crypto.c
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -25,22 +25,16 @@ static const struct cqhci_crypto_alg_entry {
static inline struct cqhci_host *
cqhci_host_from_crypto_profile(struct blk_crypto_profile *profile)
{
- struct mmc_host *mmc =
- container_of(profile, struct mmc_host, crypto_profile);
-
- return mmc->cqe_private;
+ return mmc_from_crypto_profile(profile)->cqe_private;
}
-static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
- const union cqhci_crypto_cfg_entry *cfg,
- int slot)
+static void cqhci_crypto_program_key(struct cqhci_host *cq_host,
+ const union cqhci_crypto_cfg_entry *cfg,
+ int slot)
{
u32 slot_offset = cq_host->crypto_cfg_register + slot * sizeof(*cfg);
int i;
- if (cq_host->ops->program_key)
- return cq_host->ops->program_key(cq_host, cfg, slot);
-
/* Clear CFGE */
cqhci_writel(cq_host, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
@@ -55,7 +49,6 @@ static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
/* Write dword 16, which includes the new value of CFGE */
cqhci_writel(cq_host, le32_to_cpu(cfg->reg_val[16]),
slot_offset + 16 * sizeof(cfg->reg_val[0]));
- return 0;
}
static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
@@ -72,7 +65,6 @@ static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
int i;
int cap_idx = -1;
union cqhci_crypto_cfg_entry cfg = {};
- int err;
BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID != 0);
for (i = 0; i < cq_host->crypto_capabilities.num_crypto_cap; i++) {
@@ -99,10 +91,10 @@ static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
memcpy(cfg.crypto_key, key->raw, key->size);
}
- err = cqhci_crypto_program_key(cq_host, &cfg, slot);
+ cqhci_crypto_program_key(cq_host, &cfg, slot);
memzero_explicit(&cfg, sizeof(cfg));
- return err;
+ return 0;
}
static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
@@ -113,7 +105,8 @@ static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
*/
union cqhci_crypto_cfg_entry cfg = {};
- return cqhci_crypto_program_key(cq_host, &cfg, slot);
+ cqhci_crypto_program_key(cq_host, &cfg, slot);
+ return 0;
}
static int cqhci_crypto_keyslot_evict(struct blk_crypto_profile *profile,
@@ -170,7 +163,6 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
struct mmc_host *mmc = cq_host->mmc;
struct device *dev = mmc_dev(mmc);
struct blk_crypto_profile *profile = &mmc->crypto_profile;
- unsigned int num_keyslots;
unsigned int cap_idx;
enum blk_crypto_mode_num blk_mode_num;
unsigned int slot;
@@ -180,6 +172,9 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
goto out;
+ if (cq_host->ops->uses_custom_crypto_profile)
+ goto profile_initialized;
+
cq_host->crypto_capabilities.reg_val =
cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP));
@@ -198,9 +193,8 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
* CCAP.CFGC is off by one, so the actual number of crypto
* configurations (a.k.a. keyslots) is CCAP.CFGC + 1.
*/
- num_keyslots = cq_host->crypto_capabilities.config_count + 1;
-
- err = devm_blk_crypto_profile_init(dev, profile, num_keyslots);
+ err = devm_blk_crypto_profile_init(
+ dev, profile, cq_host->crypto_capabilities.config_count + 1);
if (err)
goto out;
@@ -228,9 +222,11 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
cq_host->crypto_cap_array[cap_idx].sdus_mask * 512;
}
+profile_initialized:
+
/* Clear all the keyslots so that we start in a known state. */
- for (slot = 0; slot < num_keyslots; slot++)
- cqhci_crypto_clear_keyslot(cq_host, slot);
+ for (slot = 0; slot < profile->num_slots; slot++)
+ profile->ll_ops.keyslot_evict(profile, NULL, slot);
/* CQHCI crypto requires the use of 128-bit task descriptors. */
cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index fab9d74445ba..ce189a1866b9 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -289,13 +289,11 @@ struct cqhci_host_ops {
u64 *data);
void (*pre_enable)(struct mmc_host *mmc);
void (*post_disable)(struct mmc_host *mmc);
-#ifdef CONFIG_MMC_CRYPTO
- int (*program_key)(struct cqhci_host *cq_host,
- const union cqhci_crypto_cfg_entry *cfg, int slot);
-#endif
void (*set_tran_desc)(struct cqhci_host *cq_host, u8 **desc,
dma_addr_t addr, int len, bool end, bool dma64);
-
+#ifdef CONFIG_MMC_CRYPTO
+ bool uses_custom_crypto_profile;
+#endif
};
static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
diff --git a/drivers/mmc/host/dw_mmc-hi3798mv200.c b/drivers/mmc/host/dw_mmc-hi3798mv200.c
index cce174b5249b..5791a975a944 100644
--- a/drivers/mmc/host/dw_mmc-hi3798mv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798mv200.c
@@ -181,7 +181,6 @@ static int dw_mci_hi3798mv200_init(struct dw_mci *host)
{
struct dw_mci_hi3798mv200_priv *priv;
struct device_node *np = host->dev->of_node;
- int ret;
priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -199,15 +198,12 @@ static int dw_mci_hi3798mv200_init(struct dw_mci *host)
return dev_err_probe(host->dev, PTR_ERR(priv->drive_clk),
"failed to get enabled ciu-drive clock\n");
- priv->crg_reg = syscon_regmap_lookup_by_phandle(np, "hisilicon,sap-dll-reg");
+ priv->crg_reg = syscon_regmap_lookup_by_phandle_args(np, "hisilicon,sap-dll-reg",
+ 1, &priv->sap_dll_offset);
if (IS_ERR(priv->crg_reg))
return dev_err_probe(host->dev, PTR_ERR(priv->crg_reg),
"failed to get CRG reg\n");
- ret = of_property_read_u32_index(np, "hisilicon,sap-dll-reg", 1, &priv->sap_dll_offset);
- if (ret)
- return dev_err_probe(host->dev, ret, "failed to get sample DLL register offset\n");
-
host->priv = priv;
return 0;
}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index af445d3f8e2a..4b6e91372526 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -414,6 +414,7 @@ struct mtk_mmc_compatible {
u8 clk_div_bits;
bool recheck_sdio_irq;
bool hs400_tune; /* only used for MT8173 */
+ bool needs_top_base;
u32 pad_tune_reg;
bool async_fifo;
bool data_tune;
@@ -587,6 +588,7 @@ static const struct mtk_mmc_compatible mt7986_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
+ .needs_top_base = true,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
@@ -627,6 +629,7 @@ static const struct mtk_mmc_compatible mt8183_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
+ .needs_top_base = true,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
@@ -653,6 +656,7 @@ static const struct mtk_mmc_compatible mt8196_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
+ .needs_top_base = true,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
@@ -1097,11 +1101,12 @@ static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
u32 resp;
switch (mmc_resp_type(cmd)) {
- /* Actually, R1, R5, R6, R7 are the same */
+ /* Actually, R1, R5, R6, R7 are the same */
case MMC_RSP_R1:
resp = 0x1;
break;
case MMC_RSP_R1B:
+ case MMC_RSP_R1B_NO_CRC:
resp = 0x7;
break;
case MMC_RSP_R2:
@@ -1351,7 +1356,8 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
* CRC error.
*/
msdc_reset_hw(host);
- if (events & MSDC_INT_RSPCRCERR) {
+ if (events & MSDC_INT_RSPCRCERR &&
+ mmc_resp_type(cmd) != MMC_RSP_R1B_NO_CRC) {
cmd->error = -EILSEQ;
host->error |= REQ_CMD_EIO;
} else if (events & MSDC_INT_CMDTMO) {
@@ -2885,9 +2891,13 @@ static int msdc_drv_probe(struct platform_device *pdev)
if (IS_ERR(host->base))
return PTR_ERR(host->base);
- host->top_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(host->top_base))
- host->top_base = NULL;
+ host->dev_comp = of_device_get_match_data(&pdev->dev);
+
+ if (host->dev_comp->needs_top_base) {
+ host->top_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(host->top_base))
+ return PTR_ERR(host->top_base);
+ }
ret = mmc_regulator_get_supply(mmc);
if (ret)
@@ -2949,7 +2959,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
- host->dev_comp = of_device_get_match_data(&pdev->dev);
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index e7a286c3216f..0a9affd12532 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -995,7 +995,7 @@ static int mxcmci_probe(struct platform_device *pdev)
struct mxcmci_host *host;
struct resource *res;
int ret = 0, irq;
- bool dat3_card_detect = false;
+ bool dat3_card_detect;
dma_cap_mask_t mask;
struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
@@ -1048,9 +1048,9 @@ static int mxcmci_probe(struct platform_device *pdev)
if (pdata)
dat3_card_detect = pdata->dat3_card_detect;
- else if (mmc_card_is_removable(mmc)
- && !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
- dat3_card_detect = true;
+ else
+ dat3_card_detect = mmc_card_is_removable(mmc) &&
+ !of_property_present(pdev->dev.of_node, "cd-gpios");
ret = mmc_regulator_get_supply(mmc);
if (ret)
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 48d3b0aae5a0..0c6eb60a95fd 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -115,8 +115,6 @@ static int sd_response_type(struct mmc_command *cmd)
return SD_RSP_TYPE_R0;
case MMC_RSP_R1:
return SD_RSP_TYPE_R1;
- case MMC_RSP_R1_NO_CRC:
- return SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
case MMC_RSP_R1B:
return SD_RSP_TYPE_R1b;
case MMC_RSP_R2:
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 107c78df53cf..d229c2b83ea9 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -313,9 +313,6 @@ static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host,
case MMC_RSP_R1:
rsp_type = SD_RSP_TYPE_R1;
break;
- case MMC_RSP_R1_NO_CRC:
- rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
- break;
case MMC_RSP_R1B:
rsp_type = SD_RSP_TYPE_R1b;
break;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index d1ce9193ece9..e6c5c82f64fa 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -822,8 +822,6 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
struct acpi_device *device;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
- struct resource *iomem;
- resource_size_t len;
size_t priv_size;
int quirks = 0;
int err;
@@ -844,17 +842,6 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_byt_defer(dev))
return -EPROBE_DEFER;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem)
- return -ENOMEM;
-
- len = resource_size(iomem);
- if (len < 0x100)
- dev_err(dev, "Invalid iomem size!\n");
-
- if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
- return -ENOMEM;
-
priv_size = slot ? slot->priv_size : 0;
host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size);
if (IS_ERR(host))
@@ -876,10 +863,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
goto err_free;
}
- host->ioaddr = devm_ioremap(dev, iomem->start,
- resource_size(iomem));
- if (host->ioaddr == NULL) {
- err = -ENOMEM;
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->ioaddr)) {
+ err = PTR_ERR(host->ioaddr);
goto err_free;
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d55d045ef236..ff78a7c6a04c 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -304,6 +304,7 @@ static struct esdhc_soc_data usdhc_s32g2_data = {
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
| ESDHC_FLAG_SKIP_ERR004536 | ESDHC_FLAG_SKIP_CD_WAKE,
+ .quirks = SDHCI_QUIRK_NO_LED,
};
static struct esdhc_soc_data usdhc_imx7ulp_data = {
@@ -1647,7 +1648,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
* Retrieving and requesting the actual WP GPIO will happen
* in the call to mmc_of_parse().
*/
- if (of_property_read_bool(np, "wp-gpios"))
+ if (of_property_present(np, "wp-gpios"))
boarddata->wp_type = ESDHC_WP_GPIO;
of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 319f0ebbe652..e3d39311fdc7 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -134,9 +134,18 @@
/* Timeout value to avoid infinite waiting for pwr_irq */
#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+/* Max load for eMMC Vdd supply */
+#define MMC_VMMC_MAX_LOAD_UA 570000
+
/* Max load for eMMC Vdd-io supply */
#define MMC_VQMMC_MAX_LOAD_UA 325000
+/* Max load for SD Vdd supply */
+#define SD_VMMC_MAX_LOAD_UA 800000
+
+/* Max load for SD Vdd-io supply */
+#define SD_VQMMC_MAX_LOAD_UA 22000
+
#define msm_host_readl(msm_host, host, offset) \
msm_host->var_ops->msm_readl_relaxed(host, offset)
@@ -1403,11 +1412,48 @@ static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level)
return ret;
}
-static int sdhci_msm_set_vmmc(struct mmc_host *mmc)
+static void msm_config_vmmc_regulator(struct mmc_host *mmc, bool hpm)
+{
+ int load;
+
+ if (!hpm)
+ load = 0;
+ else if (!mmc->card)
+ load = max(MMC_VMMC_MAX_LOAD_UA, SD_VMMC_MAX_LOAD_UA);
+ else if (mmc_card_mmc(mmc->card))
+ load = MMC_VMMC_MAX_LOAD_UA;
+ else if (mmc_card_sd(mmc->card))
+ load = SD_VMMC_MAX_LOAD_UA;
+ else
+ return;
+
+ regulator_set_load(mmc->supply.vmmc, load);
+}
+
+static void msm_config_vqmmc_regulator(struct mmc_host *mmc, bool hpm)
+{
+ int load;
+
+ if (!hpm)
+ load = 0;
+ else if (!mmc->card)
+ load = max(MMC_VQMMC_MAX_LOAD_UA, SD_VQMMC_MAX_LOAD_UA);
+ else if (mmc_card_sd(mmc->card))
+ load = SD_VQMMC_MAX_LOAD_UA;
+ else
+ return;
+
+ regulator_set_load(mmc->supply.vqmmc, load);
+}
+
+static int sdhci_msm_set_vmmc(struct sdhci_msm_host *msm_host,
+ struct mmc_host *mmc, bool hpm)
{
if (IS_ERR(mmc->supply.vmmc))
return 0;
+ msm_config_vmmc_regulator(mmc, hpm);
+
return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd);
}
@@ -1420,6 +1466,8 @@ static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host,
if (msm_host->vqmmc_enabled == level)
return 0;
+ msm_config_vqmmc_regulator(mmc, level);
+
if (level) {
/* Set the IO voltage regulator to default voltage level */
if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
@@ -1642,7 +1690,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
}
if (pwr_state) {
- ret = sdhci_msm_set_vmmc(mmc);
+ ret = sdhci_msm_set_vmmc(msm_host, mmc,
+ pwr_state & REQ_BUS_ON);
if (!ret)
ret = sdhci_msm_set_vqmmc(msm_host, mmc,
pwr_state & REQ_BUS_ON);
@@ -1807,12 +1856,19 @@ out:
#ifdef CONFIG_MMC_CRYPTO
+static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops; /* forward decl */
+
static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
struct cqhci_host *cq_host)
{
struct mmc_host *mmc = msm_host->mmc;
+ struct blk_crypto_profile *profile = &mmc->crypto_profile;
struct device *dev = mmc_dev(mmc);
struct qcom_ice *ice;
+ union cqhci_crypto_capabilities caps;
+ union cqhci_crypto_cap_entry cap;
+ int err;
+ int i;
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
@@ -1827,8 +1883,37 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
return PTR_ERR_OR_ZERO(ice);
msm_host->ice = ice;
- mmc->caps2 |= MMC_CAP2_CRYPTO;
+ /* Initialize the blk_crypto_profile */
+
+ caps.reg_val = cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP));
+
+ /* The number of keyslots supported is (CFGC+1) */
+ err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1);
+ if (err)
+ return err;
+
+ profile->ll_ops = sdhci_msm_crypto_ops;
+ profile->max_dun_bytes_supported = 4;
+ profile->dev = dev;
+
+ /*
+ * Currently this driver only supports AES-256-XTS. All known versions
+ * of ICE support it, but to be safe make sure it is really declared in
+ * the crypto capability registers. The crypto capability registers
+ * also give the supported data unit size(s).
+ */
+ for (i = 0; i < caps.num_crypto_cap; i++) {
+ cap.reg_val = cpu_to_le32(cqhci_readl(cq_host,
+ CQHCI_CRYPTOCAP +
+ i * sizeof(__le32)));
+ if (cap.algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS &&
+ cap.key_size == CQHCI_CRYPTO_KEY_SIZE_256)
+ profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |=
+ cap.sdus_mask * 512;
+ }
+
+ mmc->caps2 |= MMC_CAP2_CRYPTO;
return 0;
}
@@ -1854,35 +1939,55 @@ static __maybe_unused int sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
return 0;
}
-/*
- * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires
- * vendor-specific SCM calls for this; it doesn't support the standard way.
- */
-static int sdhci_msm_program_key(struct cqhci_host *cq_host,
- const union cqhci_crypto_cfg_entry *cfg,
- int slot)
+static inline struct sdhci_msm_host *
+sdhci_msm_host_from_crypto_profile(struct blk_crypto_profile *profile)
{
- struct sdhci_host *host = mmc_priv(cq_host->mmc);
+ struct mmc_host *mmc = mmc_from_crypto_profile(profile);
+ struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- union cqhci_crypto_cap_entry cap;
- if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
- return qcom_ice_evict_key(msm_host->ice, slot);
+ return msm_host;
+}
+
+/*
+ * Program a key into a QC ICE keyslot. QC ICE requires a QC-specific SCM call
+ * for this; it doesn't support the standard way.
+ */
+static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
+{
+ struct sdhci_msm_host *msm_host =
+ sdhci_msm_host_from_crypto_profile(profile);
/* Only AES-256-XTS has been tested so far. */
- cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
- if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
- cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
- return -EINVAL;
+ if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
+ return -EOPNOTSUPP;
return qcom_ice_program_key(msm_host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
- cfg->crypto_key,
- cfg->data_unit_size, slot);
+ key->raw,
+ key->crypto_cfg.data_unit_size / 512,
+ slot);
}
+static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
+{
+ struct sdhci_msm_host *msm_host =
+ sdhci_msm_host_from_crypto_profile(profile);
+
+ return qcom_ice_evict_key(msm_host->ice, slot);
+}
+
+static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops = {
+ .keyslot_program = sdhci_msm_ice_keyslot_program,
+ .keyslot_evict = sdhci_msm_ice_keyslot_evict,
+};
+
#else /* CONFIG_MMC_CRYPTO */
static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
@@ -1988,7 +2093,7 @@ static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
.enable = sdhci_msm_cqe_enable,
.disable = sdhci_msm_cqe_disable,
#ifdef CONFIG_MMC_CRYPTO
- .program_key = sdhci_msm_program_key,
+ .uses_custom_crypto_profile = true,
#endif
};
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 45a474ccab1c..04c1c54df791 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -297,7 +297,6 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host,
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE: c |= RESP_NONE; break;
case MMC_RSP_R1:
- case MMC_RSP_R1_NO_CRC:
c |= RESP_R1; break;
case MMC_RSP_R1B: c |= RESP_R1B; break;
case MMC_RSP_R2: c |= RESP_R2; break;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 47ead84407cd..ee7e1d908986 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -329,7 +329,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
goto out_list_del;
ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (ret)
goto out_kfree_tag_set;
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index b1df7f627161..94f33c8be031 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -1214,6 +1214,8 @@ spinand_select_op_variant(struct spinand_device *spinand,
if (ret)
break;
+ spi_mem_adjust_op_freq(spinand->spimem, &op);
+
if (!spi_mem_supports_op(spinand->spimem, &op))
break;
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 66949d9f0cc5..b6f374ded390 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -89,7 +89,7 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor,
op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->dummy.nbytes)
- op->dummy.buswidth = spi_nor_get_protocol_data_nbits(proto);
+ op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->data.nbytes)
op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 60d0155be869..2836905f0152 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -383,7 +383,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE;
- dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ dev->tag_set.flags = BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1;
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index a2abfade82dd..70814303aab8 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -84,7 +84,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
sizeof(ipversion))) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
ipversion >>= 4;
@@ -94,7 +94,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
proto = htons(ETH_P_IPV6);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
@@ -108,7 +108,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
ipv4_is_multicast(tunnel_hdr->daddr)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else {
@@ -124,7 +124,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
(addr_type & IPV6_ADDR_MULTICAST)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
}
@@ -136,7 +136,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
proto,
!net_eq(bareudp->net,
dev_net(bareudp->dev)))) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
@@ -144,7 +144,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
skb_dst_set(skb, &tun_dst->dst);
@@ -194,7 +194,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
len = skb->len;
err = gro_cells_receive(&bareudp->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(bareudp->dev, len);
+ dev_dstats_rx_add(bareudp->dev, len);
return 0;
drop:
@@ -589,7 +589,7 @@ static void bareudp_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
dev->lltx = true;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 681643ab3780..5ec3170b896a 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -85,8 +85,6 @@ const char *can_get_state_str(const enum can_state state)
default:
return "<unknown>";
}
-
- return "<unknown>";
}
EXPORT_SYMBOL_GPL(can_get_state_str);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index cdf0ec9fa7f3..21a61b86f67d 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1073,9 +1073,10 @@ static int grcan_open(struct net_device *dev)
if (err)
goto exit_close_candev;
+ napi_enable(&priv->napi);
+
spin_lock_irqsave(&priv->lock, flags);
- napi_enable(&priv->napi);
grcan_start(dev);
if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(dev);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index fee012b57f33..fa04a7ced02b 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -999,7 +999,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
@@ -1234,11 +1235,15 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
+ const struct can_berr_counter *bec,
struct can_frame *cf,
enum can_state new_state,
enum can_state tx_state,
enum can_state rx_state)
{
+ enum can_state old_state;
+
+ old_state = can->can.state;
can_change_state(can->can.dev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1254,6 +1259,18 @@ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
can_bus_off(ndev);
}
}
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
}
static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
@@ -1288,7 +1305,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
struct net_device *ndev = can->can.dev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct can_frame *cf = NULL;
old_state = can->can.state;
@@ -1297,16 +1314,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
- skb = alloc_can_err_skb(ndev, &cf);
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(ndev, &cf);
if (new_state != old_state) {
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- if (skb)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
}
can->err_rep_cnt++;
@@ -1319,18 +1330,19 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
- if (!skb) {
- ndev->stats.rx_dropped++;
- return -ENOMEM;
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ if (!skb) {
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
}
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
- cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
-
return 0;
}
@@ -1359,6 +1371,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
{
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
+ int ret = 0;
old_state = can->can.state;
@@ -1372,25 +1385,15 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
struct can_frame *cf;
skb = alloc_can_err_skb(ndev, &cf);
- if (!skb) {
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
+ if (skb) {
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ netif_rx(skb);
+ } else {
ndev->stats.rx_dropped++;
- return -ENOMEM;
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ret = -ENOMEM;
}
-
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
-
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
}
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
@@ -1398,7 +1401,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
if (bec.txerr || bec.rxerr)
mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
- return 0;
+ return ret;
}
static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 97cd8bbf2e32..d025d4163fd1 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1785,6 +1785,13 @@ static void m_can_stop(struct net_device *dev)
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
+
+ if (cdev->ops->deinit) {
+ ret = cdev->ops->deinit(cdev);
+ if (ret)
+ netdev_err(dev, "failed to deinitialize: %pe\n",
+ ERR_PTR(ret));
+ }
}
static int m_can_close(struct net_device *dev)
@@ -2466,6 +2473,7 @@ int m_can_class_suspend(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
@@ -2478,6 +2486,9 @@ int m_can_class_suspend(struct device *dev)
if (cdev->pm_wake_source) {
hrtimer_cancel(&cdev->hrtimer);
m_can_write(cdev, M_CAN_IE, IR_RF0N);
+
+ if (cdev->ops->deinit)
+ ret = cdev->ops->deinit(cdev);
} else {
m_can_stop(ndev);
}
@@ -2489,7 +2500,7 @@ int m_can_class_suspend(struct device *dev)
cdev->can.state = CAN_STATE_SLEEPING;
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
@@ -2497,14 +2508,13 @@ int m_can_class_resume(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
pinctrl_pm_select_default_state(dev);
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
- int ret;
-
ret = m_can_clk_start(cdev);
if (ret)
return ret;
@@ -2517,6 +2527,10 @@ int m_can_class_resume(struct device *dev)
* again.
*/
cdev->active_interrupts |= IR_RF0N | IR_TEFN;
+
+ if (cdev->ops->init)
+ ret = cdev->ops->init(cdev);
+
m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
} else {
ret = m_can_start(ndev);
@@ -2530,7 +2544,7 @@ int m_can_class_resume(struct device *dev)
netif_start_queue(ndev);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index ef39e8e527ab..bd4746c63af3 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -68,6 +68,7 @@ struct m_can_ops {
int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
const void *val, size_t val_count);
int (*init)(struct m_can_classdev *cdev);
+ int (*deinit)(struct m_can_classdev *cdev);
};
struct m_can_tx_op {
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 2f73bf3abad8..e5c162f8c589 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -92,6 +92,8 @@
#define TCAN4X5X_MODE_STANDBY BIT(6)
#define TCAN4X5X_MODE_NORMAL BIT(7)
+#define TCAN4X5X_NWKRQ_VOLTAGE_VIO BIT(19)
+
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
@@ -267,9 +269,24 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
if (ret)
return ret;
+ if (tcan4x5x->nwkrq_voltage_vio) {
+ ret = regmap_set_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_NWKRQ_VOLTAGE_VIO);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
+static int tcan4x5x_deinit(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_STANDBY);
+};
+
static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
@@ -318,6 +335,14 @@ static const struct tcan4x5x_version_info
return &tcan4x5x_versions[TCAN4X5X];
}
+static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ tcan4x5x->nwkrq_voltage_vio =
+ of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio");
+}
+
static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
const struct tcan4x5x_version_info *version_info)
{
@@ -359,6 +384,7 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
static const struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
+ .deinit = tcan4x5x_deinit,
.read_reg = tcan4x5x_read_reg,
.write_reg = tcan4x5x_write_reg,
.write_fifo = tcan4x5x_write_fifo,
@@ -392,7 +418,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->power = NULL;
}
- m_can_class_get_clocks(mcan_class);
+ mcan_class->cclk = devm_clk_get(mcan_class->dev, "cclk");
if (IS_ERR(mcan_class->cclk)) {
dev_err(&spi->dev, "no CAN clock source defined\n");
freq = TCAN4X5X_EXT_CLK_DEF;
@@ -453,6 +479,8 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_power;
}
+ tcan4x5x_get_dt_data(mcan_class);
+
tcan4x5x_check_wake(priv);
ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0);
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
index e62c030d3e1e..203399d5e8cc 100644
--- a/drivers/net/can/m_can/tcan4x5x.h
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -42,6 +42,8 @@ struct tcan4x5x_priv {
struct tcan4x5x_map_buf map_buf_rx;
struct tcan4x5x_map_buf map_buf_tx;
+
+ bool nwkrq_voltage_vio;
};
static inline void
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index c42ebe9da55a..2d555f854008 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -230,18 +230,9 @@ static int sp_probe(struct platform_device *pdev)
return -ENODEV;
}
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res_mem->start,
- resource_size(res_mem), DRV_NAME))
- return -EBUSY;
-
- addr = devm_ioremap(&pdev->dev, res_mem->start,
- resource_size(res_mem));
- if (!addr)
- return -ENOMEM;
+ addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
if (of) {
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 4311c1f0eafd..6fcb301ef611 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -570,7 +570,7 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
- if (skb && state != CAN_STATE_BUS_OFF) {
+ if (likely(skb) && state != CAN_STATE_BUS_OFF) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 7d12776ab63e..dcb0bcbe0565 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -818,7 +818,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_completion(&priv->stop_comp);
init_completion(&priv->flush_comp);
init_completion(&priv->get_busparams_comp);
- priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
priv->dev = dev;
priv->netdev = netdev;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 3764b263add3..8e88b5917796 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -926,6 +926,42 @@ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
}
}
+static void kvaser_usb_hydra_change_state(struct kvaser_usb_net_priv *priv,
+ const struct can_berr_counter *bec,
+ struct can_frame *cf,
+ enum can_state new_state)
+{
+ struct net_device *netdev = priv->netdev;
+ enum can_state old_state = priv->can.state;
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec->txerr >= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec->txerr <= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ can_change_state(netdev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
+ if (priv->can.restart_ms == 0)
+ kvaser_usb_hydra_send_simple_cmd_async(priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ priv->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
+}
+
static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
u8 bus_status,
const struct can_berr_counter *bec)
@@ -951,41 +987,11 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
return;
skb = alloc_can_err_skb(netdev, &cf);
- if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec->txerr >= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec->txerr <= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- can_change_state(netdev, cf, tx_state, rx_state);
- }
-
- if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
-
- can_bus_off(netdev);
- }
-
- if (!skb) {
+ kvaser_usb_hydra_change_state(priv, bec, cf, new_state);
+ if (skb)
+ netif_rx(skb);
+ else
netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- priv->can.can_stats.restarts++;
-
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
- }
-
- netif_rx(skb);
}
static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
@@ -1078,9 +1084,8 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
{
struct net_device *netdev = priv->netdev;
struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- struct skb_shared_hwtstamps *shhwtstamps;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct can_berr_counter bec;
enum can_state new_state, old_state;
u8 bus_status;
@@ -1096,52 +1101,26 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
&new_state);
- skb = alloc_can_err_skb(netdev, &cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (new_state != old_state)
+ kvaser_usb_hydra_change_state(priv, &bec, cf, new_state);
- if (new_state != old_state) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec.txerr >= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec.txerr <= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
-
- can_change_state(netdev, cf, tx_state, rx_state);
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- if (new_state == CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- can_bus_off(netdev);
+ shhwtstamps->hwtstamp = hwtstamp;
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
}
}
- if (!skb) {
- stats->rx_dropped++;
- netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp = hwtstamp;
-
- cf->can_id |= CAN_ERR_BUSERROR;
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
- }
-
- netif_rx(skb);
-
priv->bec.txerr = bec.txerr;
priv->bec.rxerr = bec.rxerr;
}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 6b9122ab1464..6a45adcc45bd 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -1120,10 +1120,8 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
const struct kvaser_usb_err_summary *es)
{
- struct can_frame *cf;
- struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
- .len = CAN_ERR_DLC };
- struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
struct kvaser_usb_net_leaf_priv *leaf;
@@ -1143,18 +1141,10 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (!netif_running(priv->netdev))
return;
- /* Update all of the CAN interface's state and error counters before
- * trying any memory allocation that can actually fail with -ENOMEM.
- *
- * We send a temporary stack-allocated error CAN frame to
- * can_change_state() for the very same reason.
- *
- * TODO: Split can_change_state() responsibility between updating the
- * CAN interface's state and counters, and the setting up of CAN error
- * frame ID and data to userspace. Remove stack allocation afterwards.
- */
old_state = priv->can.state;
- kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, cf);
new_state = priv->can.state;
/* If there are errors, request status updates periodically as we do
@@ -1168,13 +1158,6 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
schedule_delayed_work(&leaf->chip_state_req_work,
msecs_to_jiffies(500));
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
- memcpy(cf, &tmp_cf, sizeof(*cf));
-
if (new_state != old_state) {
if (es->status &
(M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
@@ -1187,11 +1170,20 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (priv->can.restart_ms &&
old_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_RESTARTED;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
}
}
+ if (!skb) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ stats->rx_dropped++;
+ netdev_warn(priv->netdev, "No memory left for err_skb\n");
+ }
+ return;
+ }
+
switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 285785c942b0..79dc77835681 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2224,25 +2224,19 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
+bool b53_support_eee(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
- return 0;
+ return !is5325(dev) && !is5365(dev);
}
-EXPORT_SYMBOL(b53_get_mac_eee);
+EXPORT_SYMBOL(b53_support_eee);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
struct ethtool_keee *p = &dev->ports[port].eee;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
p->eee_enabled = e->eee_enabled;
b53_eee_enable_set(ds, port, e->eee_enabled);
@@ -2298,7 +2292,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phylink_get_caps = b53_phylink_get_caps,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 05141176daf5..9e9b5bc0c5d6 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -384,7 +384,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+bool b53_support_eee(struct dsa_switch *ds, int port);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 3f8a491ce885..4730982b6840 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -99,8 +99,8 @@ static void b53_serdes_an_restart(struct phylink_pcs *pcs)
SERDES_MII_BLK, reg);
}
-static void b53_serdes_get_state(struct phylink_pcs *pcs,
- struct phylink_link_state *state)
+static void b53_serdes_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
+ struct phylink_link_state *state)
{
struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
u8 lane = pcs_to_b53_pcs(pcs)->lane;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 43bde1f583ff..fa2bf3fa9019 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1232,7 +1232,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_wol = bcm_sf2_sw_set_wol,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 1c6d7fc16772..a2beb27459f1 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -127,10 +127,14 @@ static const struct of_device_id ksz9477_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_i2c_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
.of_match_table = ksz9477_dt_ids,
+ .pm = &ksz_i2c_pm_ops,
},
.probe = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 8a03baa6aecc..89f0796894af 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1339,6 +1339,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {false, false, true},
+ .ptp_capable = true,
.wr_table = &ksz8563_register_set,
.rd_table = &ksz8563_register_set,
},
@@ -1550,6 +1551,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
@@ -1677,6 +1679,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {true, true, true},
+ .ptp_capable = true,
},
[KSZ8567] = {
@@ -1712,6 +1715,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, false, false},
.gbit_capable = {false, false, false, false, false,
true, true},
+ .ptp_capable = true,
},
[KSZ9567] = {
@@ -1744,6 +1748,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
},
[LAN9370] = {
@@ -1773,6 +1778,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
.internal_phy = {true, true, true, true, false},
+ .ptp_capable = true,
},
[LAN9371] = {
@@ -1802,6 +1808,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true, true},
.supports_rgmii = {false, false, false, false, true, true},
.internal_phy = {true, true, true, true, false, false},
+ .ptp_capable = true,
},
[LAN9372] = {
@@ -1835,6 +1842,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9373] = {
@@ -1868,6 +1876,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, false,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9374] = {
@@ -1901,6 +1910,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9646] = {
@@ -2544,7 +2554,11 @@ static int ksz_mdio_register(struct ksz_device *dev)
bus->read = ksz_sw_mdio_read;
bus->write = ksz_sw_mdio_write;
bus->name = "ksz user smi";
- snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ if (ds->dst->index != 0) {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index);
+ } else {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ }
}
ret = ksz_parse_dt_phy_config(dev, bus, mdio_np);
@@ -2805,16 +2819,21 @@ static int ksz_setup(struct dsa_switch *ds)
if (ret)
goto out_girq;
- ret = ksz_ptp_irq_setup(ds, dp->index);
- if (ret)
- goto out_pirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_irq_setup(ds, dp->index);
+ if (ret)
+ goto out_pirq;
+ }
}
}
- ret = ksz_ptp_clock_register(ds);
- if (ret) {
- dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret);
- goto out_ptpirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_clock_register(ds);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register PTP clock: %d\n",
+ ret);
+ goto out_ptpirq;
+ }
}
ret = ksz_mdio_register(dev);
@@ -2834,9 +2853,10 @@ static int ksz_setup(struct dsa_switch *ds)
return 0;
out_ptp_clock_unregister:
- ksz_ptp_clock_unregister(ds);
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
out_ptpirq:
- if (dev->irq > 0)
+ if (dev->irq > 0 && dev->info->ptp_capable)
dsa_switch_for_each_user_port(dp, dev->ds)
ksz_ptp_irq_free(ds, dp->index);
out_pirq:
@@ -2855,11 +2875,13 @@ static void ksz_teardown(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
- ksz_ptp_clock_unregister(ds);
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
if (dev->irq > 0) {
dsa_switch_for_each_user_port(dp, dev->ds) {
- ksz_ptp_irq_free(ds, dp->index);
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
ksz_irq_free(&dev->ports[dp->index].pirq);
}
@@ -3444,12 +3466,12 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
return -EOPNOTSUPP;
}
-static int ksz_validate_eee(struct dsa_switch *ds, int port)
+static bool ksz_support_eee(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
if (!dev->info->internal_phy[port])
- return -EOPNOTSUPP;
+ return false;
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
@@ -3461,41 +3483,16 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
case LAN9646_CHIP_ID:
- return 0;
+ return true;
}
- return -EOPNOTSUPP;
-}
-
-static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
-
- /* There is no documented control of Tx LPI configuration. */
- e->tx_lpi_enabled = true;
-
- /* There is no documented control of Tx LPI timer. According to tests
- * Tx LPI timer seems to be set by default to minimal value.
- */
- e->tx_lpi_timer = 0;
-
- return 0;
+ return false;
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
struct ksz_device *dev = ds->priv;
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
if (!e->tx_lpi_enabled) {
dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
@@ -4593,6 +4590,23 @@ static int ksz_hsr_leave(struct dsa_switch *ds, int port,
return 0;
}
+static int ksz_suspend(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ cancel_delayed_work_sync(&dev->mib_read);
+ return 0;
+}
+
+static int ksz_resume(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
+ return 0;
+}
+
static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.connect_tag_protocol = ksz_connect_tag_protocol,
@@ -4633,6 +4647,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_max_mtu = ksz_max_mtu,
.get_wol = ksz_get_wol,
.set_wol = ksz_set_wol,
+ .suspend = ksz_suspend,
+ .resume = ksz_resume,
.get_ts_info = ksz_get_ts_info,
.port_hwtstamp_get = ksz_hwtstamp_get,
.port_hwtstamp_set = ksz_hwtstamp_set,
@@ -4641,7 +4657,7 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.cls_flower_add = ksz_cls_flower_add,
.cls_flower_del = ksz_cls_flower_del,
.port_setup_tc = ksz_setup_tc,
- .get_mac_eee = ksz_get_mac_eee,
+ .support_eee = ksz_support_eee,
.set_mac_eee = ksz_set_mac_eee,
.port_get_default_prio = ksz_port_get_default_prio,
.port_set_default_prio = ksz_port_set_default_prio,
@@ -5132,6 +5148,24 @@ void ksz_switch_remove(struct ksz_device *dev)
}
EXPORT_SYMBOL(ksz_switch_remove);
+#ifdef CONFIG_PM_SLEEP
+int ksz_switch_suspend(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_suspend(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_suspend);
+
+int ksz_switch_resume(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_resume(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_resume);
+#endif
+
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index b3bb75ca0796..af17a9c030d4 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -92,6 +92,7 @@ struct ksz_chip_data {
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
bool internal_phy[KSZ_MAX_NUM_PORTS];
bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ bool ptp_capable;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
};
@@ -444,6 +445,8 @@ struct ksz_dev_ops {
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
+int ksz_switch_suspend(struct device *dev);
+int ksz_switch_resume(struct device *dev);
void ksz_init_mib_timer(struct ksz_device *dev);
bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port);
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 108a958dc356..b633d263098c 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -239,10 +239,14 @@ static const struct spi_device_id ksz_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_spi_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct spi_driver ksz_spi_driver = {
.driver = {
.name = "ksz-switch",
.of_match_table = ksz_dt_ids,
+ .pm = &ksz_spi_pm_ops,
},
.id_table = ksz_spi_ids,
.probe = ksz_spi_probe,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 086b8b3d5b40..1c83af805209 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2994,7 +2994,7 @@ static int mt753x_pcs_validate(struct phylink_pcs *pcs,
return 0;
}
-static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
@@ -3085,18 +3085,6 @@ mt753x_setup(struct dsa_switch *ds)
return ret;
}
-static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- struct mt7530_priv *priv = ds->priv;
- u32 eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
-
- e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
- e->tx_lpi_timer = LPI_THRESH_GET(eeecr);
-
- return 0;
-}
-
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
@@ -3238,7 +3226,7 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
.phylink_get_caps = mt753x_phylink_get_caps,
- .get_mac_eee = mt753x_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mt753x_set_mac_eee,
.conduit_state_change = mt753x_conduit_state_change,
.port_setup_tc = mt753x_setup_tc,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3a792f79270d..68d1e891752b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -394,7 +394,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
kthread_init_delayed_work(&chip->irq_poll_work,
mv88e6xxx_irq_poll);
- chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
+ chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev));
if (IS_ERR(chip->kworker))
return PTR_ERR(chip->kworker);
@@ -1289,9 +1289,6 @@ static size_t mv88e6095_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_PORT)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1301,9 +1298,6 @@ static size_t mv88e6250_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & STATS_TYPE_BANK0))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1313,9 +1307,6 @@ static size_t mv88e6320_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
MV88E6XXX_G1_STATS_OP_HIST_RX);
@@ -1326,9 +1317,6 @@ static size_t mv88e6390_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
0);
@@ -1341,6 +1329,9 @@ static size_t mv88e6xxx_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
{
int ret = 0;
+ if (!(stat->type & chip->info->stats_type))
+ return 0;
+
if (chip->info->ops->stats_get_stat) {
mv88e6xxx_reg_lock(chip);
ret = chip->info->ops->stats_get_stat(chip, port, stat, data);
@@ -1522,13 +1513,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
@@ -5645,6 +5629,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5665,6 +5650,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5687,6 +5673,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5708,6 +5695,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6095_ops,
@@ -5730,6 +5718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5754,6 +5743,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5776,6 +5766,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6131_ops,
@@ -5800,6 +5791,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5823,6 +5815,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5848,6 +5841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5872,6 +5866,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5897,6 +5892,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5921,6 +5917,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5946,6 +5943,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5968,6 +5966,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5992,6 +5991,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.atu_move_port_mask = 0x1f,
@@ -6016,6 +6016,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6039,6 +6040,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6063,6 +6065,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6087,6 +6090,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6114,6 +6118,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6138,6 +6143,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6161,6 +6167,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6184,6 +6191,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6208,6 +6216,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6233,6 +6242,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -6259,6 +6269,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -6283,6 +6294,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6307,6 +6319,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6332,6 +6345,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6359,6 +6373,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6383,6 +6398,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6408,6 +6424,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6433,6 +6450,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -7074,7 +7092,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_sset_count = mv88e6xxx_get_sset_count,
.port_max_mtu = mv88e6xxx_get_max_mtu,
.port_change_mtu = mv88e6xxx_change_mtu,
- .get_mac_eee = mv88e6xxx_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mv88e6xxx_set_mac_eee,
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
.get_eeprom = mv88e6xxx_get_eeprom,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 9fe8e8a7856b..86bf113c9bfa 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -144,6 +144,7 @@ struct mv88e6xxx_info {
unsigned int age_time_coeff;
unsigned int g1_irqs;
unsigned int g2_irqs;
+ int stats_type;
bool pvt;
/* Mark certain ports as invalid. This is required for example for the
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 5a27d047a38e..75ed1fa500a5 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -55,6 +55,7 @@ static irqreturn_t mv88e6185_pcs_handle_irq(int irq, void *dev_id)
}
static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e6185_pcs *mpcs = pcs_to_mv88e6185_pcs(pcs);
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
index 88f624b65470..143fe21d1834 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6352.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
@@ -158,6 +158,7 @@ static void marvell_c22_pcs_disable(struct phylink_pcs *pcs)
}
static void marvell_c22_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
index d758a6c1b226..59f63d6beec8 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/mii.h>
+#include <linux/string_choices.h>
#include "chip.h"
#include "global2.h"
@@ -257,6 +258,7 @@ static int mv88e639x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e639x_sgmii_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
@@ -395,6 +397,7 @@ static void mv88e639x_xg_pcs_disable(struct mv88e639x_pcs *mpcs)
}
static void mv88e639x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -748,7 +751,7 @@ static int mv88e6393x_sgmii_apply_2500basex_an(struct mv88e639x_pcs *mpcs,
if (err)
dev_err(mpcs->mdio.dev.parent,
"failed to %s 2500basex fix: %pe\n",
- enable ? "enable" : "disable", ERR_PTR(err));
+ str_enable_disable(enable), ERR_PTR(err));
return err;
}
@@ -889,6 +892,7 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -896,7 +900,7 @@ static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
int err;
if (state->interface != PHY_INTERFACE_MODE_USXGMII)
- return mv88e639x_xg_pcs_get_state(pcs, state);
+ return mv88e639x_xg_pcs_get_state(pcs, neg_mode, state);
state->link = false;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dc777ddce1f3..66b1b7277281 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/property.h>
+#include <linux/string_choices.h>
#include "chip.h"
#include "global2.h"
@@ -176,7 +177,7 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
dev_dbg(chip->dev, "p%d: %s link %s\n", port,
reg & MV88E6XXX_PORT_MAC_CTL_FORCE_LINK ? "Force" : "Unforce",
- reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP ? "up" : "down");
+ str_up_down(reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP));
return 0;
}
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3aa9c997018a..0a4e682a55ef 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1316,6 +1316,14 @@ static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
}
+static void felix_get_ts_stats(struct dsa_switch *ds, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -2237,6 +2245,7 @@ static const struct dsa_switch_ops felix_switch_ops = {
.get_stats64 = felix_get_stats64,
.get_pause_stats = felix_get_pause_stats,
.get_rmon_stats = felix_get_rmon_stats,
+ .get_ts_stats = felix_get_ts_stats,
.get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
.get_eth_mac_stats = felix_get_eth_mac_stats,
.get_eth_phy_stats = felix_get_eth_phy_stats,
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 59b4a7240b58..e8cb4da15dbe 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -342,7 +342,7 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
@@ -394,7 +394,7 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -1019,7 +1019,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
of_get_phy_mode(port, &mode);
- if (of_property_read_bool(port, "phy-handle") &&
+ if (of_property_present(port, "phy-handle") &&
mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
@@ -1491,7 +1491,7 @@ static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
return container_of(pcs, struct qca8k_pcs, pcs);
}
-static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
@@ -2016,7 +2016,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.set_ageing_time = qca8k_set_ageing_time,
- .get_mac_eee = qca8k_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
.port_disable = qca8k_port_disable,
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 560c74c4ac3d..13005f10edb7 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -557,13 +557,6 @@ exit:
return ret;
}
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int qca8k_port_configure_learning(struct dsa_switch *ds, int port,
bool learning)
{
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 3664a2e2f1f6..d046679265fa 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -16,7 +16,7 @@
#define QCA8K_ETHERNET_MDIO_PRIORITY 7
#define QCA8K_ETHERNET_PHY_PRIORITY 6
-#define QCA8K_ETHERNET_TIMEOUT 5
+#define QCA8K_ETHERNET_TIMEOUT msecs_to_jiffies(5)
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
@@ -520,7 +520,6 @@ int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 23374178a176..4c4a95d4380c 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -21,6 +21,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "realtek.h"
#include "realtek-smi.h"
@@ -1522,7 +1523,7 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
rb = priv->chip_data;
dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
- vlan_filtering ? "enable" : "disable");
+ str_enable_disable(vlan_filtering));
/* If the port is not in the member set, the frame will be dropped */
ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
@@ -1884,7 +1885,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan
static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
- dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ dev_dbg(priv->dev, "%s VLAN\n", str_enable_disable(enable));
return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
@@ -1892,7 +1893,7 @@ static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
- dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ dev_dbg(priv->dev, "%s VLAN 4k\n", str_enable_disable(enable));
return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index baba204ad62f..3d790f8c6f4d 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -26,12 +26,8 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
start, end);
} else if (rc == -ERANGE) {
- if ((start - end + 1) > 64)
- pr_err("Field %d-%d too large for 64 bits!\n",
- start, end);
- else
- pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
- *val, start, end);
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
}
dump_stack();
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9a542e3c9b05..977b42bc1e8c 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -159,7 +159,7 @@ config ETHOC
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
config OA_TC6
- tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support"
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" if COMPILE_TEST
depends on SPI
select PHYLIB
help
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 63c8a2328142..c1295dfad0d0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -74,7 +74,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (threshold < time_since_last_napi && napi_scheduled) {
netdev_err(dev,
"napi handler hasn't been called for a long time but is scheduled\n");
- reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
schedule_reset:
/* Change the state of the device to trigger reset
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 72db9f9e7bee..c6bd803f5b0c 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev)
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
}
/*
@@ -889,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -920,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
lp->rx_ring_size, lp->tx_ring_size);
@@ -985,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1122,6 +1125,7 @@ clean_up:
lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return rc;
} /* end pcnet32_loopback_test */
@@ -2101,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev)
return -EAGAIN;
}
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
/* Check for a valid station address */
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -2266,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev)
goto err_free_ring;
}
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2300,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev)
lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return 0; /* Always succeed */
@@ -2315,6 +2321,7 @@ err_free_ring:
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
free_irq(dev->irq, dev);
return rc;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 6a716337f48b..268399dfcf22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -923,7 +923,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -945,14 +944,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
-
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
@@ -964,7 +956,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -1028,13 +1019,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index e641dbbea1e2..b854b6b42d77 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -421,18 +421,12 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
if (dev->of_node) {
struct clk *parent = clk_get_parent(pdata->clk);
+ long rate = rgmii_clock(pdata->phy_speed);
- switch (pdata->phy_speed) {
- case SPEED_10:
- clk_set_rate(parent, 2500000);
- break;
- case SPEED_100:
- clk_set_rate(parent, 25000000);
- break;
- default:
- clk_set_rate(parent, 125000000);
- break;
- }
+ if (rate < 0)
+ rate = 125000000;
+
+ clk_set_rate(parent, rate);
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index f93cb3da44b0..8fc75bcedb70 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -348,8 +348,6 @@ struct bcmasp_intf {
/* Used if per intf wol irq */
int wol_irq;
unsigned int wol_irq_enabled:1;
-
- struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -601,5 +599,4 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable);
#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 9da5ae29a105..a537c121d3e2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -348,58 +348,19 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
-{
- u32 reg;
-
- reg = umac_rl(intf, UMC_EEE_CTRL);
- if (enable)
- reg |= EEE_EN;
- else
- reg &= ~EEE_EN;
- umac_wl(intf, reg, UMC_EEE_CTRL);
-
- intf->eee.eee_enabled = enable;
-}
-
static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
-
if (!dev->phydev)
return -ENODEV;
- e->tx_lpi_enabled = p->tx_lpi_enabled;
- e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
-
return phy_ethtool_get_eee(dev->phydev, e);
}
static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
- int ret;
-
if (!dev->phydev)
return -ENODEV;
- if (!p->eee_enabled) {
- bcmasp_eee_enable_set(intf, false);
- } else {
- ret = phy_init_eee(dev->phydev, 0);
- if (ret) {
- netif_err(intf, hw, dev,
- "EEE initialization failed: %d\n", ret);
- return ret;
- }
-
- umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
- bcmasp_eee_enable_set(intf, true);
- }
-
return phy_ethtool_set_eee(dev->phydev, e);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index cfd50efbdbc0..45ec1a9214a2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -619,7 +619,6 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
- bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -677,8 +676,13 @@ static void bcmasp_adj_link(struct net_device *dev)
}
umac_wl(intf, reg, UMC_CMD);
- active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, active);
+ umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (phydev->enable_tx_lpi)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -1055,6 +1059,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
+
+ /* Set phylib's copy of the LPI timer */
+ phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
}
umac_reset(intf);
@@ -1331,7 +1338,8 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, true);
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
@@ -1373,7 +1381,8 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, false);
reg = umac_rl(intf, UMC_MPD_CTRL);
@@ -1404,9 +1413,6 @@ int bcmasp_interface_resume(struct bcmasp_intf *intf)
bcmasp_resume_from_wol(intf);
- if (intf->eee.eee_enabled)
- bcmasp_eee_enable_set(intf, true);
-
netif_device_attach(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aeaa74f03046..7b8b5b39c7bb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -81,7 +81,6 @@ MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
-#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 164
@@ -1343,13 +1342,13 @@ static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
skb_put(skb, len);
@@ -1842,7 +1841,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
@@ -2176,7 +2175,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
if (!xdp_active)
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
else
@@ -2855,6 +2854,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -4608,6 +4608,17 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags |= BNXT_FLAG_GRO;
}
+static void bnxt_init_ring_params(struct bnxt *bp)
+{
+ unsigned int rx_size;
+
+ bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
@@ -4622,12 +4633,11 @@ void bnxt_set_ring_params(struct bnxt *bp)
rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
- if (bp->flags & BNXT_FLAG_TPA)
+ if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
@@ -4667,7 +4677,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else {
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak,
+ bp->dev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -4708,7 +4721,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
@@ -4729,15 +4742,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -6564,6 +6592,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
+ u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6573,16 +6602,14 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- if (BNXT_RX_PAGE_MODE(bp)) {
- req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- } else {
+ if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
}
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
@@ -8307,16 +8334,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (rc)
goto func_qcfg_exit;
+ flags = le16_to_cpu(resp->flags);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
} else {
bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
- flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
@@ -9145,10 +9176,18 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
- /* allocate extra qps if fw supports RoCE fast qp destroy feature */
- extra_qps += fast_qpmd_qps;
- extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (BNXT_SW_RES_LMT(bp)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(u32, 65536,
+ max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp
+ * destroy feature
+ */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
if (fast_qpmd_qps)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
@@ -9184,14 +9223,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
- num_ah = min_t(u32, num_mr, 1024 * 128);
- ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
- if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
- ctxm->mrav_av_entries = num_ah;
+ if (BNXT_SW_RES_LMT(bp) &&
+ ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
+ num_ah = ctxm->mrav_av_entries;
+ num_mr = ctxm->max_entries - num_ah;
+ } else {
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+ }
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
if (rc)
@@ -9498,6 +9543,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
+ if (flags_ext2 &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
if (BNXT_PF(bp) &&
(flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
@@ -11556,6 +11604,26 @@ hwrm_phy_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcaps_output *resp;
+ struct hwrm_port_mac_qcaps_input *req;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a03)
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
+ if (rc)
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->mac_flags = resp->flags;
+ hwrm_req_drop(bp, req);
+}
+
static bool bnxt_support_dropped(u16 advertising, u16 supported)
{
u16 diff = advertising ^ supported;
@@ -15686,6 +15754,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
+
+ bp->mac_flags = 0;
+ bnxt_hwrm_mac_qcaps(bp);
+
if (!fw_dflt)
return 0;
@@ -16214,8 +16286,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bp->max_fltr < BNXT_MAX_FLTR)
bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
+ bnxt_init_ring_params(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7df7a2233307..2373f423a523 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -34,6 +34,9 @@
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
+#define BNXT_DEFAULT_RX_COPYBREAK 256
+#define BNXT_MAX_RX_COPYBREAK 1024
+
extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -2241,8 +2244,6 @@ struct bnxt {
#define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
#define BNXT_FLAG_JUMBO 0x10
#define BNXT_FLAG_STRIP_VLAN 0x20
- #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
- BNXT_FLAG_LRO)
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@@ -2263,6 +2264,9 @@ struct bnxt {
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_HDS 0x20000000
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO | BNXT_FLAG_HDS)
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -2270,6 +2274,11 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#ifdef CONFIG_BNXT_SRIOV
+#define BNXT_VF_IS_TRUSTED(bp) ((bp)->vf.flags & BNXT_VF_TRUST)
+#else
+#define BNXT_VF_IS_TRUSTED(bp) 0
+#endif
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
@@ -2342,7 +2351,7 @@ struct bnxt {
enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
int rx_nr_pages;
@@ -2482,6 +2491,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
+ #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
u32 fw_dbg_cap;
@@ -2501,6 +2511,8 @@ struct bnxt {
((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV)
#define BNXT_ROCE_VF_RESC_CAP(bp) \
((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
+#define BNXT_SW_RES_LMT(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2660,6 +2672,11 @@ struct bnxt {
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+ /* copied from flags in hwrm_port_mac_qcaps_output */
+ u8 mac_flags;
+#define BNXT_MAC_FL_NO_MAC_LPBK \
+ PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2762,6 +2779,8 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+#define BNXT_HDS_THRESHOLD_MAX 1023
+
static inline u32 bnxt_tx_avail(struct bnxt *bp,
const struct bnxt_tx_ring_info *txr)
{
@@ -2846,7 +2865,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index d87681d71106..9c5820839514 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -24,6 +24,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
+#include <net/netdev_queues.h>
#include <net/netlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -833,6 +834,8 @@ static void bnxt_get_ringparam(struct net_device *dev,
ering->rx_pending = bp->rx_ring_size;
ering->rx_jumbo_pending = bp->rx_agg_ring_size;
ering->tx_pending = bp->tx_ring_size;
+
+ kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX;
}
static int bnxt_set_ringparam(struct net_device *dev,
@@ -840,16 +843,35 @@ static int bnxt_set_ringparam(struct net_device *dev,
struct kernel_ethtool_ringparam *kernel_ering,
struct netlink_ext_ack *extack)
{
+ u8 tcp_data_split = kernel_ering->tcp_data_split;
struct bnxt *bp = netdev_priv(dev);
+ u8 hds_config_mod;
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
(ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
+ hds_config_mod = tcp_data_split != dev->cfg->hds_config;
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod)
+ return -EINVAL;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ hds_config_mod && BNXT_RX_PAGE_MODE(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached");
+ return -EINVAL;
+ }
+
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
+ if (hds_config_mod) {
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ bp->flags |= BNXT_FLAG_HDS;
+ else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ bp->flags &= ~BNXT_FLAG_HDS;
+ }
+
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
bnxt_set_ring_params(bp);
@@ -2050,7 +2072,8 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
int rc;
regs->version = 0;
- bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
@@ -4161,7 +4184,7 @@ err:
static void bnxt_get_pkgver(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- char buf[FW_VER_STR_LEN];
+ char buf[FW_VER_STR_LEN - 5];
int len;
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
@@ -4327,6 +4350,45 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
return 0;
}
+static int bnxt_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 rx_copybreak;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > BNXT_MAX_RX_COPYBREAK)
+ return -ERANGE;
+ if (rx_copybreak != bp->rx_copybreak) {
+ if (netif_running(dev))
+ return -EBUSY;
+ bp->rx_copybreak = rx_copybreak;
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = bp->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
u16 page_number, u8 bank,
u16 start_addr, u16 data_length,
@@ -4375,6 +4437,9 @@ static int bnxt_get_module_info(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
/* No point in going further if phy status indicates
* module is not inserted or if it is powered down or
* if it is of type 10GBase-T
@@ -4426,6 +4491,9 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
u16 start = eeprom->offset, length = eeprom->len;
int rc = 0;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
memset(data, 0, eeprom->len);
/* Read A0 portion of the EEPROM */
@@ -4480,6 +4548,12 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Module read not permitted on untrusted VF");
+ return -EPERM;
+ }
+
rc = bnxt_get_module_status(bp, extack);
if (rc)
return rc;
@@ -4777,7 +4851,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
cpr = &rxr->bnapi->cp_ring;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
cpr = rxr->rx_cpr;
- pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak));
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
@@ -4887,35 +4962,44 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
- buf[BNXT_MACLPBK_TEST_IDX] = 1;
- bnxt_hwrm_mac_loopback(bp, true);
- msleep(250);
rc = bnxt_half_open_nic(bp);
if (rc) {
- bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK)
+ goto skip_mac_loopback;
+
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
else
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
+skip_mac_loopback:
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK)
+ goto skip_phy_loopback;
+
bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_PHYLPBK_TEST_IDX] = 0;
+skip_phy_loopback:
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
if (do_ext_lpbk) {
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
bnxt_hwrm_phy_loopback(bp, true, true);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_EXTLPBK_TEST_IDX] = 0;
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
@@ -5309,6 +5393,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
@@ -5350,6 +5436,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
+ .get_tunable = bnxt_get_tunable,
+ .set_tunable = bnxt_set_tunable,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 0ed26e3a28f4..e4a7f37036ed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -298,6 +298,7 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
+ bool reset = false;
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
@@ -311,7 +312,9 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
- ops->ulp_irq_stop(ulp->handle);
+ if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
+ reset = true;
+ ops->ulp_irq_stop(ulp->handle, reset);
}
}
@@ -346,9 +349,36 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap,
- u16 max_id)
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ struct bnxt_ulp *ulp;
+
+ if (!bnxt_ulp_registered(edev))
+ return;
+ ulp = edev->ulp_tbl;
+
+ rcu_read_lock();
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ goto exit_unlock_rcu;
+ if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
+ goto exit_unlock_rcu;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -360,7 +390,6 @@ int bnxt_register_async_events(struct bnxt_en_dev *edev,
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
- return 0;
}
EXPORT_SYMBOL(bnxt_register_async_events);
@@ -417,6 +446,8 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->flags |= BNXT_EN_FLAG_VF;
if (BNXT_ROCE_VF_RESC_CAP(bp))
edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
+ if (BNXT_SW_RES_LMT(bp))
+ edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
edev->chip_num = bp->chip_num;
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 5d6aac60f236..7fa3b8d1ebd2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -30,7 +30,9 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- void (*ulp_irq_stop)(void *);
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -65,6 +67,8 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_VF 0x10
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
#define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x40
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
@@ -124,6 +128,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap, u16 max_id);
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f88b641533fc..e6c64e4bd66c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -395,6 +395,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
+ if (prog && bp->flags & BNXT_FLAG_HDS) {
+ netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
+ return -EOPNOTSUPP;
+ }
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
@@ -422,15 +426,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bnxt_set_rx_skb_mode(bp, true);
xdp_features_set_redirect_target(dev, true);
} else {
- int rx, tx;
-
xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index daa416fb1724..48496209fb16 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -530,19 +530,9 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
- switch (speed) {
- case SPEED_10:
- rate = 2500000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0)
return;
- }
rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
@@ -578,6 +568,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
@@ -608,7 +599,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
return 0;
}
-static void macb_pcs_get_state(struct phylink_pcs *pcs,
+static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
state->link = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 604dcfd49aa4..2f0b3e389e62 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6562,6 +6562,9 @@ static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
+ if (x->xso.dir != XFRM_DEV_OFFLOAD_IN)
+ return;
+
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9913952ccb42..49f6cab01ed5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -109,7 +109,7 @@ static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{0, 0}, /* 0 - 4 Gbps */
{0, 3}, /* 4 - 10 Gbps */
- {3, 6}, /* 10 - 40 Gbps */
+ {3, 6}, /* 10+ Gbps */
};
static void enic_init_affinity_hint(struct enic *enic)
@@ -428,6 +428,36 @@ static void enic_mtu_check(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (speed > ENIC_LINK_SPEED_10G)
+ index = ENIC_LINK_40G_INDEX;
+ else if (speed > ENIC_LINK_SPEED_4G)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static void enic_link_check(struct enic *enic)
{
int link_status = vnic_dev_link_status(enic->vdev);
@@ -436,6 +466,7 @@ static void enic_link_check(struct enic *enic)
if (link_status && !carrier_ok) {
netdev_info(enic->netdev, "Link UP\n");
netif_carrier_on(enic->netdev);
+ enic_set_rx_coal_setting(enic);
} else if (!link_status && carrier_ok) {
netdev_info(enic->netdev, "Link DOWN\n");
netif_carrier_off(enic->netdev);
@@ -1901,36 +1932,6 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
-static void enic_set_rx_coal_setting(struct enic *enic)
-{
- unsigned int speed;
- int index = -1;
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
-
- /* 1. Read the link speed from fw
- * 2. Pick the default range for the speed
- * 3. Update it in enic->rx_coalesce_setting
- */
- speed = vnic_dev_port_speed(enic->vdev);
- if (ENIC_LINK_SPEED_10G < speed)
- index = ENIC_LINK_40G_INDEX;
- else if (ENIC_LINK_SPEED_4G < speed)
- index = ENIC_LINK_10G_INDEX;
- else
- index = ENIC_LINK_4G_INDEX;
-
- rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
- rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
- rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
-
- /* Start with the value provided by UCSM */
- for (index = 0; index < enic->rq_count; index++)
- enic->cq[index].cur_rx_coal_timeval =
- enic->config.intr_timer_usec;
-
- rx_coal->use_adaptive_rx_coalesce = 1;
-}
-
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -3063,7 +3064,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_rfs_flw_tbl_init(enic);
- enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 95a5295d0361..0d030cb0b21c 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1966,23 +1966,41 @@ failed:
static void tsnep_queue_enable(struct tsnep_queue *queue)
{
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ netif_napi_set_irq(&queue->napi, queue->irq);
napi_enable(&queue->napi);
- tsnep_enable_irq(queue->adapter, queue->irq_mask);
+ tsnep_enable_irq(adapter, queue->irq_mask);
- if (queue->tx)
+ if (queue->tx) {
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, &queue->napi);
tsnep_tx_enable(queue->tx);
+ }
- if (queue->rx)
+ if (queue->rx) {
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, &queue->napi);
tsnep_rx_enable(queue->rx);
+ }
}
static void tsnep_queue_disable(struct tsnep_queue *queue)
{
- if (queue->tx)
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ if (queue->rx)
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+
+ if (queue->tx) {
tsnep_tx_disable(queue->tx, &queue->napi);
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
napi_disable(&queue->napi);
- tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ tsnep_disable_irq(adapter, queue->irq_mask);
/* disable RX after NAPI polling has been disabled, because RX can be
* enabled during NAPI polling
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 75401d2a5fb4..a2d7300925a8 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -81,8 +81,7 @@ config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
help
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index bf5baef5c3e0..4948b4906584 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2281,7 +2281,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
- new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+ new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
/* Release the initial buffer */
xdp_return_frame_rx_napi(xdpf);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index a293b08f36d4..147a93bf9fa9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -780,13 +780,14 @@ struct ethsw_dump_ctx {
static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
struct ethsw_dump_ctx *dump)
{
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 535969fa0fdb..6a6fc819dfde 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -146,6 +146,27 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
return 0;
}
+static bool enetc_tx_csum_offload_check(struct sk_buff *skb)
+{
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ case offsetof(struct udphdr, check):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool enetc_skb_is_ipv6(struct sk_buff *skb)
+{
+ return vlan_get_protocol(skb) == htons(ETH_P_IPV6);
+}
+
+static bool enetc_skb_is_tcp(struct sk_buff *skb)
+{
+ return skb->csum_offset == offsetof(struct tcphdr, check);
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
@@ -163,6 +184,29 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_addr_t dma;
u8 flags = 0;
+ enetc_clear_tx_bd(&temp_bd);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* Can not support TSD and checksum offload at the same time */
+ if (priv->active_offloads & ENETC_F_TXCSUM &&
+ enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) {
+ temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START,
+ skb_network_offset(skb));
+ temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ skb_network_header_len(skb) / 4);
+ temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T,
+ enetc_skb_is_ipv6(skb));
+ if (enetc_skb_is_tcp(skb))
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_TCP);
+ else
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_UDP);
+ flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ } else if (skb_checksum_help(skb)) {
+ return 0;
+ }
+ }
+
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
prefetchw(txbd);
@@ -173,7 +217,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
- temp_bd.lstatus = 0;
tx_swbd = &tx_ring->tx_swbd[i];
tx_swbd->dma = dma;
@@ -489,8 +532,233 @@ static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso
}
}
+static int enetc_lso_count_descs(const struct sk_buff *skb)
+{
+ /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD
+ * for linear area data but not include LSO header, namely
+ * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's
+ * okay, we only need to consider the worst case). And 1 BD
+ * for gap.
+ */
+ return skb_shinfo(skb)->nr_frags + 4;
+}
+
+static int enetc_lso_get_hdr_len(const struct sk_buff *skb)
+{
+ int hdr_len, tlen;
+
+ tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
+ hdr_len = skb_transport_offset(skb) + tlen;
+
+ return hdr_len;
+}
+
+static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso)
+{
+ lso->lso_seg_size = skb_shinfo(skb)->gso_size;
+ lso->ipv6 = enetc_skb_is_ipv6(skb);
+ lso->tcp = skb_is_gso_tcp(skb);
+ lso->l3_hdr_len = skb_network_header_len(skb);
+ lso->l3_start = skb_network_offset(skb);
+ lso->hdr_len = enetc_lso_get_hdr_len(skb);
+ lso->total_len = skb->len - lso->hdr_len;
+}
+
+static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso)
+{
+ union enetc_tx_bd txbd_tmp, *txbd;
+ struct enetc_tx_swbd *tx_swbd;
+ u16 frm_len, frm_len_ext;
+ u8 flags, e_flags = 0;
+ dma_addr_t addr;
+ char *hdr;
+
+ /* Get the first BD of the LSO BDs chain */
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Prepare LSO header: MAC + IP + TCP/UDP */
+ hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE;
+ memcpy(hdr, skb->data, lso->hdr_len);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ /* {frm_len_ext, frm_len} indicates the total length of
+ * large transmit data unit. frm_len contains the 16 least
+ * significant bits and frm_len_ext contains the 4 most
+ * significant bits.
+ */
+ frm_len = lso->total_len & 0xffff;
+ frm_len_ext = (lso->total_len >> 16) & 0xf;
+
+ /* Set the flags of the first BD */
+ flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO |
+ ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(frm_len);
+ txbd_tmp.flags = flags;
+
+ txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start);
+ /* l3_hdr_size in 32-bits (4 bytes) */
+ txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ lso->l3_hdr_len / 4);
+ if (lso->ipv6)
+ txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T;
+ else
+ txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS;
+
+ txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ?
+ ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP);
+
+ /* For the LSO header we do not set the dma address since
+ * we do not want it unmapped when we do cleanup. We still
+ * set len so that we count the bytes sent.
+ */
+ tx_swbd->len = lso->hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Get the next BD, and the next BD is extended BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ if (skb_vlan_tag_present(skb)) {
+ /* Setup the VLAN fields */
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = ENETC_TPID_8021Q;
+ e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size);
+ txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext);
+ *txbd = txbd_tmp;
+}
+
+static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso, int *count)
+{
+ union enetc_tx_bd txbd_tmp, *txbd = NULL;
+ struct enetc_tx_swbd *tx_swbd;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u8 flags = 0;
+ int len, f;
+
+ len = skb_headlen(skb) - lso->hdr_len;
+ if (len > 0) {
+ dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ if (txbd)
+ *txbd = txbd_tmp;
+
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ /* Last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ txbd_tmp.flags = flags;
+ *txbd = txbd_tmp;
+
+ tx_swbd->is_eof = 1;
+ tx_swbd->skb = skb;
+
+ return 0;
+}
+
+static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_lso_t lso = {0};
+ int err, i, count = 0;
+
+ /* Initialize the LSO handler */
+ enetc_lso_start(skb, &lso);
+ i = tx_ring->next_to_use;
+
+ enetc_lso_map_hdr(tx_ring, skb, &i, &lso);
+ /* First BD and an extend BD */
+ count += 2;
+
+ err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count);
+ if (err)
+ goto dma_err;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+dma_err:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (--count);
+
+ return 0;
+}
+
static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
int hdr_len, total_len, data_len;
struct enetc_tx_swbd *tx_swbd;
union enetc_tx_bd *txbd;
@@ -556,7 +824,7 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
bd_data_num++;
tso_build_data(skb, &tso, size);
- if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ if (unlikely(bd_data_num >= priv->max_frags && data_len))
goto err_chained_bd;
}
@@ -594,7 +862,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count, err;
+ int count;
/* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
@@ -608,16 +876,28 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
if (skb_is_gso(skb)) {
- if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ /* LSO data unit lengths of up to 256KB are supported */
+ if (priv->active_offloads & ENETC_F_LSO &&
+ (skb->len - enetc_lso_get_hdr_len(skb)) <=
+ ENETC_LSO_MAX_DATA_LEN) {
+ if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_tso_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ count = enetc_lso_hw_offload(tx_ring, skb);
+ } else {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
} else {
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
@@ -627,11 +907,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- err = skb_checksum_help(skb);
- if (err)
- goto drop_packet_err;
- }
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb);
enetc_unlock_mdio();
@@ -640,7 +915,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
if (unlikely(!count))
goto drop_packet_err;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags))
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
@@ -908,7 +1183,8 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
!test_bit(ENETC_TX_DOWN, &priv->flags) &&
- (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ (enetc_bd_unused(tx_ring) >=
+ ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@@ -1759,6 +2035,9 @@ void enetc_get_si_caps(struct enetc_si *si)
rss = enetc_rd(hw, ENETC_SIRSSCAPR);
si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
+
+ if (val & ENETC_SIPCAPR0_LSO)
+ si->hw_features |= ENETC_SI_F_LSO;
}
EXPORT_SYMBOL_GPL(enetc_get_si_caps);
@@ -2055,6 +2334,14 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
return 0;
}
+static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
+{
+ enetc_wr(hw, ENETC4_SILSOSFMR0,
+ SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK,
+ ENETC4_TCP_NL_SEG_FLAGS_DMASK));
+ enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
+}
+
int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
@@ -2068,6 +2355,9 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+ if (si->hw_features & ENETC_SI_F_LSO)
+ enetc_set_lso_flags_mask(hw);
+
/* TODO: RSS support for i.MX95 will be supported later, and the
* is_enetc_rev1() condition will be removed
*/
@@ -3269,17 +3559,21 @@ EXPORT_SYMBOL_GPL(enetc_pci_remove);
static const struct enetc_drvdata enetc_pf_data = {
.sysclk_freq = ENETC_CLK_400M,
.pmac_offset = ENETC_PMAC_OFFSET,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
.eth_ops = &enetc_pf_ethtool_ops,
};
static const struct enetc_drvdata enetc4_pf_data = {
.sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
.pmac_offset = ENETC4_PMAC_OFFSET,
.eth_ops = &enetc4_pf_ethtool_ops,
};
static const struct enetc_drvdata enetc_vf_data = {
.sysclk_freq = ENETC_CLK_400M,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
.eth_ops = &enetc_vf_ethtool_ops,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 72fa03dbc2dd..4ad4eb5c5a74 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -41,6 +41,18 @@ struct enetc_tx_swbd {
u8 qbv_en:1;
};
+struct enetc_lso_t {
+ bool ipv6;
+ bool tcp;
+ u8 l3_hdr_len;
+ u8 hdr_len; /* LSO header length */
+ u8 l3_start;
+ u16 lso_seg_size;
+ int total_len; /* total data length, not include LSO header */
+};
+
+#define ENETC_LSO_MAX_DATA_LEN SZ_256K
+
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
@@ -59,9 +71,16 @@ struct enetc_rx_swbd {
/* ENETC overhead: optional extension BD + 1 BD gap */
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
+/* For LS1028A, max # of chained Tx BDs is 15, including head and
+ * extension BD.
+ */
#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+/* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
+ * including head and extension BD, but the range of MAX_SKB_FRAGS
+ * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
+ */
+#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS
+#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
struct enetc_ring_stats {
unsigned int packets;
@@ -231,9 +250,12 @@ enum enetc_errata {
#define ENETC_SI_F_PSFP BIT(0)
#define ENETC_SI_F_QBV BIT(1)
#define ENETC_SI_F_QBU BIT(2)
+#define ENETC_SI_F_LSO BIT(3)
struct enetc_drvdata {
u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
+ u8 tx_csum:1;
+ u8 max_frags;
u64 sysclk_freq;
const struct ethtool_ops *eth_ops;
};
@@ -341,6 +363,8 @@ enum enetc_active_offloads {
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
ENETC_F_QBU = BIT(11),
+ ENETC_F_TXCSUM = BIT(12),
+ ENETC_F_LSO = BIT(13),
};
enum enetc_flags_bit {
@@ -375,6 +399,7 @@ struct enetc_ndev_priv {
u16 msg_enable;
u8 preemptible_tcs;
+ u8 max_frags; /* The maximum number of BDs for fragments */
enum enetc_active_offloads active_offloads;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index 26b220677448..695cb07c74bc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -12,6 +12,29 @@
#define NXP_ENETC_VENDOR_ID 0x1131
#define NXP_ENETC_PF_DEV_ID 0xe101
+/**********************Station interface registers************************/
+/* Station interface LSO segmentation flag mask register 0/1 */
+#define ENETC4_SILSOSFMR0 0x1300
+#define SILSOSFMR0_TCP_MID_SEG GENMASK(27, 16)
+#define SILSOSFMR0_TCP_1ST_SEG GENMASK(11, 0)
+#define SILSOSFMR0_VAL_SET(first, mid) (FIELD_PREP(SILSOSFMR0_TCP_MID_SEG, mid) | \
+ FIELD_PREP(SILSOSFMR0_TCP_1ST_SEG, first))
+
+#define ENETC4_SILSOSFMR1 0x1304
+#define SILSOSFMR1_TCP_LAST_SEG GENMASK(11, 0)
+#define ENETC4_TCP_FLAGS_FIN BIT(0)
+#define ENETC4_TCP_FLAGS_SYN BIT(1)
+#define ENETC4_TCP_FLAGS_RST BIT(2)
+#define ENETC4_TCP_FLAGS_PSH BIT(3)
+#define ENETC4_TCP_FLAGS_ACK BIT(4)
+#define ENETC4_TCP_FLAGS_URG BIT(5)
+#define ENETC4_TCP_FLAGS_ECE BIT(6)
+#define ENETC4_TCP_FLAGS_CWR BIT(7)
+#define ENETC4_TCP_FLAGS_NS BIT(8)
+/* According to tso_build_hdr(), clear all special flags for not last packet. */
+#define ENETC4_TCP_NL_SEG_FLAGS_DMASK (ENETC4_TCP_FLAGS_FIN | \
+ ENETC4_TCP_FLAGS_RST | ENETC4_TCP_FLAGS_PSH)
+
/***************************ENETC port registers**************************/
#define ENETC4_ECAPR0 0x0
#define ECAPR0_RFS BIT(2)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 55ba949230ff..4098f01479bc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -25,6 +25,7 @@
#define ENETC_SIPCAPR0 0x20
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR0_RFS BIT(2)
+#define ENETC_SIPCAPR0_LSO BIT(1)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -554,11 +555,23 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
union enetc_tx_bd {
struct {
__le64 addr;
- __le16 buf_len;
+ union {
+ __le16 buf_len;
+ __le16 hdr_len; /* For LSO, ENETC 4.1 and later */
+ };
__le16 frm_len;
union {
struct {
- u8 reserved[3];
+ u8 l3_aux0;
+#define ENETC_TX_BD_L3_START GENMASK(6, 0)
+#define ENETC_TX_BD_IPCS BIT(7)
+ u8 l3_aux1;
+#define ENETC_TX_BD_L3_HDR_LEN GENMASK(6, 0)
+#define ENETC_TX_BD_L3T BIT(7)
+ u8 l4_aux;
+#define ENETC_TX_BD_L4T GENMASK(7, 5)
+#define ENETC_TXBD_L4T_UDP 1
+#define ENETC_TXBD_L4T_TCP 2
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -569,23 +582,27 @@ union enetc_tx_bd {
__le32 tstamp;
__le16 tpid;
__le16 vid;
- u8 reserved[6];
+ __le16 lso_sg_size; /* For ENETC 4.1 and later */
+ __le16 frm_len_ext; /* For ENETC 4.1 and later */
+ u8 reserved[2];
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
struct {
__le32 tstamp;
- u8 reserved[10];
+ u8 reserved[8];
+ __le16 lso_err_count; /* For ENETC 4.1 and later */
u8 status;
u8 flags;
} wb; /* writeback descriptor */
};
enum enetc_txbd_flags {
- ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_LSO = BIT(1), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_W = BIT(2),
- ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TXSTART = BIT(4),
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
@@ -654,6 +671,8 @@ union enetc_rx_bd {
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
+#define ENETC_TPID_8021Q 0
+
struct enetc_cmd_rfse {
u8 smac_h[6];
u8 smac_m[6];
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
index 0eecfc833164..3fd9b0727875 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -101,6 +101,7 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -109,16 +110,24 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
ndev->priv_flags |= IFF_UNICAST_FLT;
+ if (si->drvdata->tx_csum)
+ priv->active_offloads |= ENETC_F_TXCSUM;
+
+ if (si->hw_features & ENETC_SI_F_LSO)
+ priv->active_offloads |= ENETC_F_LSO;
+
/* TODO: currently, i.MX95 ENETC driver does not support advanced features */
if (!is_enetc_rev1(si)) {
ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index a5f8ce576b6e..3768752b6008 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -136,6 +136,7 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -144,11 +145,13 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1cca0425d493..c81f2ea588f2 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -671,8 +671,6 @@ struct fec_enet_private {
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
- /* tx lpi eee mode */
- struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b55047c0237..68725506a095 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1591,19 +1591,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
fec_enet_tx_queue(ndev, i, budget);
}
-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
struct bufdesc *bdp, int index)
{
struct page *new_page;
dma_addr_t phys_addr;
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
- WARN_ON(!new_page);
- rxq->rx_skb_info[index].page = new_page;
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ rxq->rx_skb_info[index].page = new_page;
rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+
+ return 0;
}
static u32
@@ -1698,6 +1701,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
+ __fec32 cbd_bufaddr;
u32 sub_len = 4;
#if !defined(CONFIG_M5272)
@@ -1766,12 +1770,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_skb_info[index].page;
+ cbd_bufaddr = bdp->cbd_bufaddr;
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
+ fec32_to_cpu(cbd_bufaddr),
pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- fec_enet_update_cbd(rxq, bdp, index);
if (xdp_prog) {
xdp_buff_clear_frags_flag(&xdp);
@@ -2045,14 +2054,14 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
return us * (fep->clk_ref_rate / 1000) / 1000;
}
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer,
+ bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
unsigned int sleep_cycle, wake_cycle;
if (enable) {
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer);
wake_cycle = sleep_cycle;
} else {
sleep_cycle = 0;
@@ -2105,7 +2114,9 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_enable(&fep->napi);
}
if (fep->quirks & FEC_QUIRK_HAS_EEE)
- fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
+ fec_enet_eee_mode_set(ndev,
+ phy_dev->eee_cfg.tx_lpi_timer,
+ phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
netif_stop_queue(ndev);
@@ -3181,7 +3192,6 @@ static int
fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3189,8 +3199,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->tx_lpi_timer = p->tx_lpi_timer;
-
return phy_ethtool_get_eee(ndev->phydev, edata);
}
@@ -3198,7 +3206,6 @@ static int
fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3206,8 +3213,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- p->tx_lpi_timer = edata->tx_lpi_timer;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index fb416d60dcd7..11887458f050 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2690,13 +2690,12 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
{
struct fman *fman;
struct device_node *fm_node, *muram_node;
+ void __iomem *base_addr;
struct resource *res;
u32 val, range[2];
int err, irq;
struct clk *clk;
u32 clk_rate;
- phys_addr_t phys_base_addr;
- resource_size_t mem_size;
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
@@ -2724,18 +2723,6 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_node_put;
fman->dts_params.err_irq = err;
- /* Get the FM address */
- res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
- __func__);
- goto fman_node_put;
- }
-
- phys_base_addr = res->start;
- mem_size = resource_size(res);
-
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
@@ -2803,24 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
}
}
- fman->dts_params.res =
- devm_request_mem_region(&of_dev->dev, phys_base_addr,
- mem_size, "fman");
- if (!fman->dts_params.res) {
- err = -EBUSY;
- dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
- __func__);
- goto fman_free;
- }
-
- fman->dts_params.base_addr =
- devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (!fman->dts_params.base_addr) {
- err = -ENOMEM;
+ base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res);
+ if (IS_ERR(base_addr)) {
+ err = PTR_ERR(base_addr);
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
+ fman->dts_params.base_addr = base_addr;
+ fman->dts_params.res = res;
+
fman->dev = &of_dev->dev;
err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 85617bb94959..b3e2a596ad2c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -755,12 +755,12 @@ static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
return container_of(pcs, struct fman_mac, pcs);
}
-static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
+static void dtsec_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
+ phylink_mii_c22_pcs_get_state(dtsec->tbidev, neg_mode, state);
}
static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 6663c1768089..88510f822759 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -34,6 +34,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
@@ -132,7 +133,6 @@ static const struct ucc_geth_info ugeth_primary_info = {
.transmitFlowControl = 1,
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
- .prel = 7,
.maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
.minFrameLength = 64,
.maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
@@ -1205,34 +1205,6 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
return 0;
}
-static int init_check_frame_length_mode(int length_check,
- u32 __iomem *maccfg2_register)
-{
- u32 value = 0;
-
- value = in_be32(maccfg2_register);
-
- if (length_check)
- value |= MACCFG2_LC;
- else
- value &= ~MACCFG2_LC;
-
- out_be32(maccfg2_register, value);
- return 0;
-}
-
-static int init_preamble_length(u8 preamble_length,
- u32 __iomem *maccfg2_register)
-{
- if ((preamble_length < 3) || (preamble_length > 7))
- return -EINVAL;
-
- clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
- preamble_length << MACCFG2_PREL_SHIFT);
-
- return 0;
-}
-
static int init_rx_parameters(int reject_broadcast,
int receive_short_frames,
int promiscuous, u32 __iomem *upsmr_register)
@@ -1287,94 +1259,11 @@ static int init_min_frame_len(u16 min_frame_length,
return 0;
}
-static int adjust_enet_interface(struct ucc_geth_private *ugeth)
+static bool phy_interface_mode_is_reduced(phy_interface_t interface)
{
- struct ucc_geth_info *ug_info;
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- int ret_val;
- u32 upsmr, maccfg2;
- u16 value;
-
- ugeth_vdbg("%s: IN", __func__);
-
- ug_info = ugeth->ug_info;
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- /* Set MACCFG2 */
- maccfg2 = in_be32(&ug_regs->maccfg2);
- maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
- if ((ugeth->max_speed == SPEED_10) ||
- (ugeth->max_speed == SPEED_100))
- maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
- else if (ugeth->max_speed == SPEED_1000)
- maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
- maccfg2 |= ug_info->padAndCrc;
- out_be32(&ug_regs->maccfg2, maccfg2);
-
- /* Set UPSMR */
- upsmr = in_be32(&uf_regs->upsmr);
- upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
- UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
- upsmr |= UCC_GETH_UPSMR_RPM;
- switch (ugeth->max_speed) {
- case SPEED_10:
- upsmr |= UCC_GETH_UPSMR_R10M;
- fallthrough;
- case SPEED_100:
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
- upsmr |= UCC_GETH_UPSMR_RMM;
- }
- }
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- upsmr |= UCC_GETH_UPSMR_TBIM;
- }
- if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
- upsmr |= UCC_GETH_UPSMR_SGMM;
-
- out_be32(&uf_regs->upsmr, upsmr);
-
- /* Disable autonegotiation in tbi mode, because by default it
- comes up in autonegotiation mode. */
- /* Note that this depends on proper setting in utbipar register. */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
- struct phy_device *tbiphy;
-
- if (!ug_info->tbi_node)
- pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
-
- tbiphy = of_phy_find_device(ug_info->tbi_node);
- if (!tbiphy)
- pr_warn("Could not get TBI device\n");
-
- value = phy_read(tbiphy, ENET_TBI_MII_CR);
- value &= ~0x1000; /* Turn off autonegotiation */
- phy_write(tbiphy, ENET_TBI_MII_CR, value);
-
- put_device(&tbiphy->mdio.dev);
- }
-
- init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
-
- ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
- if (ret_val != 0) {
- if (netif_msg_probe(ugeth))
- pr_err("Preamble length must be between 3 and 7 inclusive\n");
- return ret_val;
- }
-
- return 0;
+ return phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_RTBI;
}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
@@ -1545,108 +1434,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
/* allow to xmit again */
netif_tx_wake_all_queues(ugeth->ndev);
- __netdev_watchdog_up(ugeth->ndev);
-}
-
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the ugeth structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-
-static void adjust_link(struct net_device *dev)
-{
- struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- struct phy_device *phydev = ugeth->phydev;
- int new_state = 0;
-
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- if (phydev->link) {
- u32 tempval = in_be32(&ug_regs->maccfg2);
- u32 upsmr = in_be32(&uf_regs->upsmr);
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
- if (phydev->duplex != ugeth->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- tempval &= ~(MACCFG2_FDX);
- else
- tempval |= MACCFG2_FDX;
- ugeth->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != ugeth->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case SPEED_1000:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_BYTE);
- break;
- case SPEED_100:
- case SPEED_10:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_NIBBLE);
- /* if reduced mode, re-set UPSMR.R10M */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (phydev->speed == SPEED_10)
- upsmr |= UCC_GETH_UPSMR_R10M;
- else
- upsmr &= ~UCC_GETH_UPSMR_R10M;
- }
- break;
- default:
- if (netif_msg_link(ugeth))
- pr_warn(
- "%s: Ack! Speed (%d) is not 10/100/1000!",
- dev->name, phydev->speed);
- break;
- }
- ugeth->oldspeed = phydev->speed;
- }
-
- if (!ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 1;
- }
-
- if (new_state) {
- /*
- * To change the MAC configuration we need to disable
- * the controller. To do so, we have to either grab
- * ugeth->lock, which is a bad idea since 'graceful
- * stop' commands might take quite a while, or we can
- * quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
- }
- } else if (ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(ugeth))
- phy_print_status(phydev);
+ netdev_watchdog_up(ugeth->ndev);
}
/* Initialize TBI PHY interface for communicating with the
@@ -1664,8 +1452,7 @@ static void uec_configure_serdes(struct net_device *dev)
struct phy_device *tbiphy;
if (!ug_info->tbi_node) {
- dev_warn(&dev->dev, "SGMII mode requires that the device "
- "tree specify a tbi-handle\n");
+ dev_warn(&dev->dev, "SGMII mode requires that the device tree specify a tbi-handle\n");
return;
}
@@ -1696,34 +1483,145 @@ static void uec_configure_serdes(struct net_device *dev)
put_device(&tbiphy->mdio.dev);
}
-/* Configure the PHY for dev.
- * returns 0 if success. -1 if failure
- */
-static int init_phy(struct net_device *dev)
+static void ugeth_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct ucc_geth_private *priv = netdev_priv(dev);
- struct ucc_geth_info *ug_info = priv->ug_info;
- struct phy_device *phydev;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
+ struct ucc_fast __iomem *uf_regs = ugeth->uccf->uf_regs;
+ u32 old_maccfg2, maccfg2 = in_be32(&ug_regs->maccfg2);
+ u32 old_upsmr, upsmr = in_be32(&uf_regs->upsmr);
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
+ old_maccfg2 = maccfg2;
+ old_upsmr = upsmr;
- phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
- priv->phy_interface);
- if (!phydev) {
- dev_err(&dev->dev, "Could not attach to PHY\n");
- return -ENODEV;
+ /* No length check */
+ maccfg2 &= ~MACCFG2_LC;
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+ UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
+
+ if (speed == SPEED_10 || speed == SPEED_100)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (speed == SPEED_1000)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+
+ maccfg2 |= ug_info->padAndCrc;
+
+ if (phy_interface_mode_is_reduced(interface)) {
+
+ if (interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
+
+ switch (speed) {
+ case SPEED_10:
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ fallthrough;
+ case SPEED_100:
+ if (interface != PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_RMM;
+ }
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
- uec_configure_serdes(dev);
+ if (interface == PHY_INTERFACE_MODE_TBI ||
+ interface == PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_TBIM;
- phy_set_max_speed(phydev, priv->max_speed);
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ upsmr |= UCC_GETH_UPSMR_SGMM;
- priv->phydev = phydev;
+ if (duplex == DUPLEX_HALF)
+ maccfg2 &= ~(MACCFG2_FDX);
+ else
+ maccfg2 |= MACCFG2_FDX;
- return 0;
+ if (maccfg2 != old_maccfg2 || upsmr != old_upsmr) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, maccfg2);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
+
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(ndev);
+
+ if (!phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 0;
+ ug_info->receiveFlowControl = rx_pause;
+ ug_info->transmitFlowControl = tx_pause;
+
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ u16 value;
+
+ if (state->interface == PHY_INTERFACE_MODE_TBI ||
+ state->interface == PHY_INTERFACE_MODE_RTBI) {
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ pr_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+ put_device(&tbiphy->mdio.dev);
+ }
+
+ if (phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 1;
+
+ init_flow_control_params(ug_info->aufc, 1, 1,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
}
static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
@@ -1995,7 +1893,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
- struct phy_device *phydev = ugeth->phydev;
ugeth_vdbg("%s: IN", __func__);
@@ -2004,7 +1901,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
* Must be done before disabling the controller
* or deadlock may happen.
*/
- phy_stop(phydev);
+ phylink_stop(ugeth->phylink);
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -3246,12 +3143,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
goto err;
}
- err = adjust_enet_interface(ugeth);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
- goto err;
- }
-
/* Set MACSTNADDR1, MACSTNADDR2 */
/* For more details see the hardware spec. */
init_mac_station_addr_regs(dev->dev_addr[0],
@@ -3263,12 +3154,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
- err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
- goto err;
- }
-
return 0;
err:
ucc_geth_stop(ugeth);
@@ -3291,10 +3176,10 @@ static int ucc_geth_open(struct net_device *dev)
return -EINVAL;
}
- err = init_phy(dev);
+ err = phylink_of_phy_connect(ugeth->phylink, ugeth->dev->of_node, 0);
if (err) {
- netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
- return err;
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
}
err = ucc_geth_init_mac(ugeth);
@@ -3310,13 +3195,13 @@ static int ucc_geth_open(struct net_device *dev)
goto err;
}
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
napi_enable(&ugeth->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
- qe_alive_during_sleep() || ugeth->phydev->irq);
+ qe_alive_during_sleep() || dev->phydev->irq);
device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
return err;
@@ -3337,8 +3222,7 @@ static int ucc_geth_close(struct net_device *dev)
cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
+ phylink_disconnect_phy(ugeth->phylink);
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3372,7 +3256,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
ucc_geth_stop(ugeth);
ucc_geth_init_mac(ugeth);
/* Must start PHY here */
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
netif_tx_start_all_queues(dev);
}
@@ -3397,6 +3281,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ bool mac_wol = false;
if (!netif_running(ndev))
return 0;
@@ -3410,14 +3295,17 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
*/
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- if (ugeth->wol_en & WAKE_MAGIC) {
+ if (ugeth->wol_en & WAKE_MAGIC && !ugeth->phy_wol_en) {
setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
- } else if (!(ugeth->wol_en & WAKE_PHY)) {
- phy_stop(ugeth->phydev);
+ mac_wol = true;
}
+ rtnl_lock();
+ phylink_suspend(ugeth->phylink, mac_wol);
+ rtnl_unlock();
+
return 0;
}
@@ -3451,12 +3339,9 @@ static int ucc_geth_resume(struct platform_device *ofdev)
}
}
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
-
- phy_stop(ugeth->phydev);
- phy_start(ugeth->phydev);
+ rtnl_lock();
+ phylink_resume(ugeth->phylink);
+ rtnl_unlock();
napi_enable(&ugeth->napi);
netif_device_attach(ndev);
@@ -3469,32 +3354,6 @@ static int ucc_geth_resume(struct platform_device *ofdev)
#define ucc_geth_resume NULL
#endif
-static phy_interface_t to_phy_interface(const char *phy_connection_type)
-{
- if (strcasecmp(phy_connection_type, "mii") == 0)
- return PHY_INTERFACE_MODE_MII;
- if (strcasecmp(phy_connection_type, "gmii") == 0)
- return PHY_INTERFACE_MODE_GMII;
- if (strcasecmp(phy_connection_type, "tbi") == 0)
- return PHY_INTERFACE_MODE_TBI;
- if (strcasecmp(phy_connection_type, "rmii") == 0)
- return PHY_INTERFACE_MODE_RMII;
- if (strcasecmp(phy_connection_type, "rgmii") == 0)
- return PHY_INTERFACE_MODE_RGMII;
- if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
- return PHY_INTERFACE_MODE_RGMII_ID;
- if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
- return PHY_INTERFACE_MODE_RGMII_TXID;
- if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
- return PHY_INTERFACE_MODE_RGMII_RXID;
- if (strcasecmp(phy_connection_type, "rtbi") == 0)
- return PHY_INTERFACE_MODE_RTBI;
- if (strcasecmp(phy_connection_type, "sgmii") == 0)
- return PHY_INTERFACE_MODE_SGMII;
-
- return PHY_INTERFACE_MODE_MII;
-}
-
static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
@@ -3502,10 +3361,7 @@ static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (!ugeth->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(ugeth->phydev, rq, cmd);
+ return phylink_mii_ioctl(ugeth->phylink, rq, cmd);
}
static const struct net_device_ops ucc_geth_netdev_ops = {
@@ -3513,7 +3369,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
@@ -3553,6 +3408,12 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which,
return 0;
}
+struct phylink_mac_ops ugeth_mac_ops = {
+ .mac_link_up = ugeth_mac_link_up,
+ .mac_link_down = ugeth_mac_link_down,
+ .mac_config = ugeth_mac_config,
+};
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3560,23 +3421,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
+ struct device_node *phy_node;
+ struct phylink *phylink;
struct resource res;
- int err, ucc_num, max_speed = 0;
+ int err, ucc_num;
const unsigned int *prop;
phy_interface_t phy_interface;
- static const int enet_to_speed[] = {
- SPEED_10, SPEED_10, SPEED_10,
- SPEED_100, SPEED_100, SPEED_100,
- SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
- };
- static const phy_interface_t enet_to_phy_interface[] = {
- PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
- PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
- PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
- PHY_INTERFACE_MODE_SGMII,
- };
ugeth_vdbg("%s: IN", __func__);
@@ -3612,57 +3462,35 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
- ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!ug_info->phy_node && of_phy_is_fixed_link(np)) {
- /*
- * In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- err = of_phy_register_fixed_link(np);
- if (err)
- return err;
- ug_info->phy_node = of_node_get(np);
- }
-
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
- /* get the phy interface type, or default to MII */
- prop = of_get_property(np, "phy-connection-type", NULL);
- if (!prop) {
- /* handle interface property present in old trees */
- prop = of_get_property(ug_info->phy_node, "interface", NULL);
- if (prop != NULL) {
- phy_interface = enet_to_phy_interface[*prop];
- max_speed = enet_to_speed[*prop];
- } else
- phy_interface = PHY_INTERFACE_MODE_MII;
- } else {
- phy_interface = to_phy_interface((const char *)prop);
- }
-
- /* get speed, or derive from PHY interface */
- if (max_speed == 0)
- switch (phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_TBI:
- case PHY_INTERFACE_MODE_RTBI:
- case PHY_INTERFACE_MODE_SGMII:
- max_speed = SPEED_1000;
- break;
- default:
- max_speed = SPEED_100;
- break;
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ prop = of_get_property(phy_node, "interface", NULL);
+ if (prop) {
+ dev_err(&ofdev->dev,
+ "Device-tree property 'interface' is no longer supported. Please use 'phy-connection-type' instead.");
+ of_node_put(phy_node);
+ err = -EINVAL;
+ goto err_put_tbi;
}
+ of_node_put(phy_node);
+ }
+
+ err = of_get_phy_mode(np, &phy_interface);
+ if (err) {
+ dev_err(&ofdev->dev, "Invalid phy-connection-type");
+ goto err_put_tbi;
+ }
- if (max_speed == SPEED_1000) {
+ if (phy_interface == PHY_INTERFACE_MODE_GMII ||
+ phy_interface_mode_is_rgmii(phy_interface) ||
+ phy_interface == PHY_INTERFACE_MODE_TBI ||
+ phy_interface == PHY_INTERFACE_MODE_RTBI ||
+ phy_interface == PHY_INTERFACE_MODE_SGMII) {
unsigned int snums = qe_get_num_of_snums();
- /* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
@@ -3691,7 +3519,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth));
if (!dev) {
err = -ENOMEM;
- goto err_deregister_fixed_link;
+ goto err_put_tbi;
}
ugeth = netdev_priv(dev);
@@ -3718,23 +3546,50 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
- ugeth->phy_interface = phy_interface;
- ugeth->max_speed = max_speed;
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
+ ugeth->phylink_config.dev = &dev->dev;
+ ugeth->phylink_config.type = PHYLINK_NETDEV;
+
+ ugeth->phylink_config.mac_capabilities =
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ugeth->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(ugeth->phylink_config.supported_interfaces);
+
+ if (ug_info->tbi_node) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TBI,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RTBI,
+ ugeth->phylink_config.supported_interfaces);
+ }
+
+ phylink = phylink_create(&ugeth->phylink_config, dev_fwnode(&dev->dev),
+ phy_interface, &ugeth_mac_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_put_tbi;
+ }
+
+ ugeth->phylink = phylink;
err = devm_register_netdev(&ofdev->dev, dev);
if (err) {
if (netif_msg_probe(ugeth))
pr_err("%s: Cannot register net device, aborting\n",
dev->name);
- goto err_deregister_fixed_link;
+ goto err_destroy_phylink;
}
err = of_get_ethdev_address(np, dev);
if (err == -EPROBE_DEFER)
- goto err_deregister_fixed_link;
+ goto err_destroy_phylink;
ugeth->ug_info = ug_info;
ugeth->dev = device;
@@ -3743,11 +3598,11 @@ static int ucc_geth_probe(struct platform_device* ofdev)
return 0;
-err_deregister_fixed_link:
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+err_destroy_phylink:
+ phylink_destroy(phylink);
+err_put_tbi:
of_node_put(ug_info->tbi_node);
- of_node_put(ug_info->phy_node);
+
return err;
}
@@ -3755,13 +3610,10 @@ static void ucc_geth_remove(struct platform_device* ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
ucc_geth_memclean(ugeth);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+ phylink_destroy(ugeth->phylink);
of_node_put(ugeth->ug_info->tbi_node);
- of_node_put(ugeth->ug_info->phy_node);
}
static const struct of_device_id ucc_geth_match[] = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 4294ed096ebb..38789faae706 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/phylink.h>
#include <linux/if_ether.h>
#include <soc/fsl/qe/immap_qe.h>
@@ -921,7 +922,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
#define UCC_GETH_MACCFG1_INIT 0
-#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1 | \
+ (7 << MACCFG2_PREL_SHIFT))
/* Ethernet Address Type. */
enum enet_addr_type {
@@ -1073,6 +1075,9 @@ struct ucc_geth_tad_params {
u16 vid;
};
+struct phylink;
+struct phylink_config;
+
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
@@ -1088,7 +1093,6 @@ struct ucc_geth_info {
u8 miminumInterFrameGapEnforcement;
u8 backToBackInterFrameGap;
int ipAddressAlignment;
- int lengthCheckRx;
u32 mblinterval;
u16 nortsrbytetime;
u8 fracsiz;
@@ -1114,7 +1118,6 @@ struct ucc_geth_info {
int transmitFlowControl;
u8 maxGroupAddrInHash;
u8 maxIndAddrInHash;
- u8 prel;
u16 maxFrameLength;
u16 minFrameLength;
u16 maxD1Length;
@@ -1125,7 +1128,6 @@ struct ucc_geth_info {
u32 eventRegMask;
u16 pausePeriod;
u16 extensionField;
- struct device_node *phy_node;
struct device_node *tbi_node;
u8 weightfactor[NUM_TX_QUEUES];
u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
@@ -1210,14 +1212,12 @@ struct ucc_geth_private {
u16 skb_dirtytx[NUM_TX_QUEUES];
struct ugeth_mii_info *mii_info;
- struct phy_device *phydev;
- phy_interface_t phy_interface;
- int max_speed;
uint32_t msg_enable;
- int oldspeed;
- int oldduplex;
- int oldlink;
- int wol_en;
+ u32 wol_en;
+ u32 phy_wol_en;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct device_node *node;
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 699f346faf5c..1fb49e5a414a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -104,14 +104,8 @@ static int
uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- phy_ethtool_ksettings_get(phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(ugeth->phylink, cmd);
}
static int
@@ -119,12 +113,8 @@ uec_set_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_ksettings_set(phydev, cmd);
+ return phylink_ethtool_ksettings_set(ugeth->phylink, cmd);
}
static void
@@ -133,12 +123,7 @@ uec_get_pauseparam(struct net_device *netdev,
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- pause->autoneg = ugeth->phydev->autoneg;
-
- if (ugeth->ug_info->receiveFlowControl)
- pause->rx_pause = 1;
- if (ugeth->ug_info->transmitFlowControl)
- pause->tx_pause = 1;
+ return phylink_ethtool_get_pauseparam(ugeth->phylink, pause);
}
static int
@@ -146,30 +131,11 @@ uec_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- int ret = 0;
ugeth->ug_info->receiveFlowControl = pause->rx_pause;
ugeth->ug_info->transmitFlowControl = pause->tx_pause;
- if (ugeth->phydev->autoneg) {
- if (netif_running(netdev)) {
- /* FIXME: automatically restart */
- netdev_info(netdev, "Please re-open the interface\n");
- }
- } else {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
-
- ret = init_flow_control_params(ug_info->aufc,
- ug_info->receiveFlowControl,
- ug_info->transmitFlowControl,
- ug_info->pausePeriod,
- ug_info->extensionField,
- &ugeth->uccf->uf_regs->upsmr,
- &ugeth->ug_regs->uempr,
- &ugeth->ug_regs->maccfg1);
- }
-
- return ret;
+ return phylink_ethtool_set_pauseparam(ugeth->phylink, pause);
}
static uint32_t
@@ -343,28 +309,42 @@ uec_get_drvinfo(struct net_device *netdev,
static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (phydev && phydev->irq)
- wol->supported |= WAKE_PHY;
+ phylink_ethtool_get_wol(ugeth->phylink, wol);
+
if (qe_alive_during_sleep())
wol->supported |= WAKE_MAGIC;
- wol->wolopts = ugeth->wol_en;
+ wol->wolopts |= ugeth->wol_en;
}
static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
+ int ret = 0;
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
- else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
+ ret = phylink_ethtool_set_wol(ugeth->phylink, wol);
+ if (ret == -EOPNOTSUPP) {
+ ugeth->phy_wol_en = 0;
+ } else if (ret) {
+ return ret;
+ } else {
+ ugeth->phy_wol_en = wol->wolopts;
+ goto out;
+ }
+
+ /* If the PHY isn't handling the WoL and the MAC is asked to more than
+ * WAKE_MAGIC, error-out
+ */
+ if (!ugeth->phy_wol_en &&
+ wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
+
+ if (wol->wolopts & WAKE_MAGIC &&
+ !qe_alive_during_sleep())
return -EINVAL;
+out:
ugeth->wol_en = wol->wolopts;
device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 060e0e674938..aa7d723011d0 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -1128,20 +1128,6 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
return gve_adminq_execute_cmd(priv, &cmd);
}
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
-{
- union gve_adminq_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
- cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
- .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
- .parameter_value = cpu_to_be64(mtu),
- };
-
- return gve_adminq_execute_cmd(priv, &cmd);
-}
-
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval)
{
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 863683de9694..228217458275 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -612,7 +612,6 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval);
int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index f879426cb552..394debc62268 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -1146,8 +1146,7 @@ static void gve_handle_miss_completion(struct gve_priv *priv,
/* jiffies can wraparound but time comparisons can handle overflows. */
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT);
add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
*bytes += pending_packet->skb->len;
@@ -1191,8 +1190,7 @@ static void remove_miss_completions(struct gve_priv *priv,
pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT);
/* Maintain pending packet in another list so the packet can be
* unallocated at a later time.
*/
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index ae58ac38c206..7ea15f9ef849 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -5,4 +5,5 @@
obj-$(CONFIG_HIBMCGE) += hibmcge.o
-hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o
+hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
+ hbg_debugfs.o hbg_err.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 96daf058d387..b4300d8ea4ad 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -4,6 +4,7 @@
#ifndef __HBG_COMMON_H
#define __HBG_COMMON_H
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "hbg_reg.h"
@@ -33,6 +34,14 @@ enum hbg_tx_state {
enum hbg_nic_state {
HBG_NIC_STATE_EVENT_HANDLING = 0,
+ HBG_NIC_STATE_RESETTING,
+ HBG_NIC_STATE_RESET_FAIL,
+};
+
+enum hbg_reset_type {
+ HBG_RESET_TYPE_NONE = 0,
+ HBG_RESET_TYPE_FLR,
+ HBG_RESET_TYPE_FUNCTION,
};
struct hbg_buffer {
@@ -84,6 +93,7 @@ struct hbg_dev_specs {
u32 vlan_layers;
u32 max_mtu;
u32 min_mtu;
+ u32 uc_mac_num;
u32 max_frame_len;
u32 rx_buf_size;
@@ -114,6 +124,22 @@ struct hbg_mac {
u32 duplex;
u32 autoneg;
u32 link_status;
+ u32 pause_autoneg;
+};
+
+struct hbg_mac_table_entry {
+ u8 addr[ETH_ALEN];
+};
+
+struct hbg_mac_filter {
+ struct hbg_mac_table_entry *mac_table;
+ u32 table_max_len;
+ bool enabled;
+};
+
+/* saved for restore after rest */
+struct hbg_user_def {
+ struct ethtool_pauseparam pause_param;
};
struct hbg_priv {
@@ -126,6 +152,9 @@ struct hbg_priv {
struct hbg_vector vectors;
struct hbg_ring tx_ring;
struct hbg_ring rx_ring;
+ struct hbg_mac_filter filter;
+ enum hbg_reset_type reset_type;
+ struct hbg_user_def user_def;
};
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
new file mode 100644
index 000000000000..8473c43d171a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+#include "hbg_common.h"
+#include "hbg_debugfs.h"
+#include "hbg_hw.h"
+#include "hbg_irq.h"
+#include "hbg_txrx.h"
+
+static struct dentry *hbg_dbgfs_root;
+
+struct hbg_dbg_info {
+ const char *name;
+ int (*read)(struct seq_file *seq, void *data);
+};
+
+#define state_str_true_false(p, s) str_true_false(test_bit(s, &(p)->state))
+
+static void hbg_dbg_ring(struct hbg_priv *priv, struct hbg_ring *ring,
+ struct seq_file *s)
+{
+ u32 irq_mask = ring->dir == HBG_DIR_TX ? HBG_INT_MSK_TX_B :
+ HBG_INT_MSK_RX_B;
+
+ seq_printf(s, "ring used num: %u\n",
+ hbg_get_queue_used_num(ring));
+ seq_printf(s, "ring max num: %u\n", ring->len);
+ seq_printf(s, "ring head: %u, tail: %u\n", ring->head, ring->tail);
+ seq_printf(s, "fifo used num: %u\n",
+ hbg_hw_get_fifo_used_num(priv, ring->dir));
+ seq_printf(s, "fifo max num: %u\n",
+ hbg_get_spec_fifo_max_num(priv, ring->dir));
+ seq_printf(s, "irq enabled: %s\n",
+ str_true_false(hbg_hw_irq_is_enabled(priv, irq_mask)));
+}
+
+static int hbg_dbg_tx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->tx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_rx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->rx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_irq_info *info;
+ u32 i;
+
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ seq_printf(s,
+ "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n",
+ info->name,
+ str_true_false(hbg_hw_irq_is_enabled(priv,
+ info->mask)),
+ str_true_false(info->need_print),
+ info->count);
+ }
+
+ return 0;
+}
+
+static int hbg_dbg_mac_table(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_mac_filter *filter;
+ u32 i;
+
+ filter = &priv->filter;
+ seq_printf(s, "mac addr max count: %u\n", filter->table_max_len);
+ seq_printf(s, "filter enabled: %s\n", str_true_false(filter->enabled));
+
+ for (i = 0; i < filter->table_max_len; i++) {
+ if (is_zero_ether_addr(filter->mac_table[i].addr))
+ continue;
+
+ seq_printf(s, "[%u] %pM\n", i, filter->mac_table[i].addr);
+ }
+
+ return 0;
+}
+
+static const char * const reset_type_str[] = {"None", "FLR", "Function"};
+
+static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ seq_printf(s, "event handling state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING));
+ seq_printf(s, "resetting state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESETTING));
+ seq_printf(s, "reset fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
+ seq_printf(s, "last reset type: %s\n",
+ reset_type_str[priv->reset_type]);
+
+ return 0;
+}
+
+static const struct hbg_dbg_info hbg_dbg_infos[] = {
+ { "tx_ring", hbg_dbg_tx_ring },
+ { "rx_ring", hbg_dbg_rx_ring },
+ { "irq_info", hbg_dbg_irq_info },
+ { "mac_table", hbg_dbg_mac_table },
+ { "nic_state", hbg_dbg_nic_state },
+};
+
+static void hbg_debugfs_uninit(void *data)
+{
+ debugfs_remove_recursive((struct dentry *)data);
+}
+
+void hbg_debugfs_init(struct hbg_priv *priv)
+{
+ const char *name = pci_name(priv->pdev);
+ struct device *dev = &priv->pdev->dev;
+ struct dentry *root;
+ u32 i;
+
+ root = debugfs_create_dir(name, hbg_dbgfs_root);
+
+ for (i = 0; i < ARRAY_SIZE(hbg_dbg_infos); i++)
+ debugfs_create_devm_seqfile(dev, hbg_dbg_infos[i].name,
+ root, hbg_dbg_infos[i].read);
+
+ /* Ignore the failure because debugfs is not a key feature. */
+ devm_add_action_or_reset(dev, hbg_debugfs_uninit, root);
+}
+
+void hbg_debugfs_register(void)
+{
+ hbg_dbgfs_root = debugfs_create_dir("hibmcge", NULL);
+}
+
+void hbg_debugfs_unregister(void)
+{
+ debugfs_remove_recursive(hbg_dbgfs_root);
+ hbg_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
new file mode 100644
index 000000000000..80670d66bbeb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_DEBUGFS_H
+#define __HBG_DEBUGFS_H
+
+void hbg_debugfs_register(void);
+void hbg_debugfs_unregister(void);
+
+void hbg_debugfs_init(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
new file mode 100644
index 000000000000..4d1f4a33391a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
+#include "hbg_hw.h"
+
+static void hbg_restore_mac_table(struct hbg_priv *priv)
+{
+ struct hbg_mac_filter *filter = &priv->filter;
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < filter->table_max_len; i++)
+ if (!is_zero_ether_addr(filter->mac_table[i].addr)) {
+ addr = ether_addr_to_u64(filter->mac_table[i].addr);
+ hbg_hw_set_uc_addr(priv, addr, i);
+ }
+
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_restore_user_def_settings(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ hbg_restore_mac_table(priv);
+ hbg_hw_set_mtu(priv, priv->netdev->mtu);
+ hbg_hw_set_pause_enable(priv, pause_param->tx_pause,
+ pause_param->rx_pause);
+}
+
+int hbg_rebuild(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_restore_user_def_settings(priv);
+ return 0;
+}
+
+static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (netif_running(priv->netdev)) {
+ dev_warn(&priv->pdev->dev,
+ "failed to reset because port is up\n");
+ return -EBUSY;
+ }
+
+ priv->reset_type = type;
+ set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
+ if (ret) {
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ }
+
+ return ret;
+}
+
+static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (!test_bit(HBG_NIC_STATE_RESETTING, &priv->state) ||
+ type != priv->reset_type)
+ return 0;
+
+ ASSERT_RTNL();
+
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ ret = hbg_rebuild(priv);
+ if (ret) {
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
+ return ret;
+ }
+
+ dev_info(&priv->pdev->dev, "reset done\n");
+ return ret;
+}
+
+/* must be protected by rtnl lock */
+int hbg_reset(struct hbg_priv *priv)
+{
+ int ret;
+
+ ASSERT_RTNL();
+ ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
+ if (ret)
+ return ret;
+
+ return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
+}
+
+static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ rtnl_lock();
+ hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
+}
+
+static void hbg_pci_err_reset_done(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers hbg_pci_err_handler = {
+ .reset_prepare = hbg_pci_err_reset_prepare,
+ .reset_done = hbg_pci_err_reset_done,
+};
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv)
+{
+ pdrv->err_handler = &hbg_pci_err_handler;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
new file mode 100644
index 000000000000..d7828e446308
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_ERR_H
+#define __HBG_ERR_H
+
+#include <linux/pci.h>
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv);
+int hbg_reset(struct hbg_priv *priv);
+int hbg_rebuild(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index c3370114aef3..00364a438ec2 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -3,12 +3,193 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+
+enum hbg_reg_dump_type {
+ HBG_DUMP_REG_TYPE_SPEC = 0,
+ HBG_DUMP_REG_TYPE_MDIO,
+ HBG_DUMP_REG_TYPE_GMAC,
+ HBG_DUMP_REG_TYPE_PCU,
+};
+
+struct hbg_reg_info {
+ u32 type;
+ u32 offset;
+ u32 val;
+};
+
+#define HBG_DUMP_SPEC_I(offset) {HBG_DUMP_REG_TYPE_SPEC, offset, 0}
+#define HBG_DUMP_MDIO_I(offset) {HBG_DUMP_REG_TYPE_MDIO, offset, 0}
+#define HBG_DUMP_GMAC_I(offset) {HBG_DUMP_REG_TYPE_GMAC, offset, 0}
+#define HBG_DUMP_PCU_I(offset) {HBG_DUMP_REG_TYPE_PCU, offset, 0}
+
+static const struct hbg_reg_info hbg_dump_reg_infos[] = {
+ /* dev specs */
+ HBG_DUMP_SPEC_I(HBG_REG_SPEC_VALID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_EVENT_REQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_PHY_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_HIGH_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_UC_MAC_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MDIO_FREQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAX_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MIN_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_TX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_RX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_VLAN_LAYERS_ADDR),
+
+ /* mdio */
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_COMMAND_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_ADDR_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_WDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_RDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_STA_ADDR),
+
+ /* gmac */
+ HBG_DUMP_GMAC_I(HBG_REG_DUPLEX_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FC_TX_TIMER_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_LOW_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_HIGH_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MAX_FRAME_SIZE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_MODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PAUSE_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_AN_NEG_STATE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_TRANSMIT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_REC_FILT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LINE_LOOP_BACK_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_CF_CRC_STRIP_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MODE_CHANGE_EN_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LOOP_REG_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_RECV_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_VLAN_CODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_5_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_5_ADDR),
+
+ /* pcu */
+ HBG_DUMP_PCU_I(HBG_REG_TX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CFG_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_TX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_MAX_FRAME_LEN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DEBUG_ST_MCH_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_CURR_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_HIST_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_CFF_DATA_NUM_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_TX_PAUSE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CFF_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUF_SIZE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_PKT_MODE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST0_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST1_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST2_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_RST_EN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_CLR_ADDR),
+};
+
+static const u32 hbg_dump_type_base_array[] = {
+ [HBG_DUMP_REG_TYPE_SPEC] = 0,
+ [HBG_DUMP_REG_TYPE_MDIO] = HBG_REG_MDIO_BASE,
+ [HBG_DUMP_REG_TYPE_GMAC] = HBG_REG_SGMII_BASE,
+ [HBG_DUMP_REG_TYPE_PCU] = HBG_REG_SGMII_BASE,
+};
+
+static int hbg_ethtool_get_regs_len(struct net_device *netdev)
+{
+ return ARRAY_SIZE(hbg_dump_reg_infos) * sizeof(struct hbg_reg_info);
+}
+
+static void hbg_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_reg_info *info;
+ u32 i, offset = 0;
+
+ regs->version = 0;
+ for (i = 0; i < ARRAY_SIZE(hbg_dump_reg_infos); i++) {
+ info = data + offset;
+
+ *info = hbg_dump_reg_infos[i];
+ info->val = hbg_reg_read(priv, info->offset);
+ info->offset -= hbg_dump_type_base_array[info->type];
+
+ offset += sizeof(*info);
+ }
+}
+
+static void hbg_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &param->tx_pause, &param->rx_pause);
+}
+
+static int hbg_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ priv->mac.pause_autoneg = param->autoneg;
+ phy_set_asym_pause(priv->mac.phydev, param->rx_pause, param->tx_pause);
+
+ if (!param->autoneg)
+ hbg_hw_set_pause_enable(priv, param->tx_pause, param->rx_pause);
+
+ priv->user_def.pause_param = *param;
+ return 0;
+}
+
+static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (*flags != ETH_RESET_DEDICATED)
+ return -EOPNOTSUPP;
+
+ *flags = 0;
+ return hbg_reset(priv);
+}
static const struct ethtool_ops hbg_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = hbg_ethtool_get_regs_len,
+ .get_regs = hbg_ethtool_get_regs,
+ .get_pauseparam = hbg_ethtool_get_pauseparam,
+ .set_pauseparam = hbg_ethtool_set_pauseparam,
+ .reset = hbg_ethtool_reset,
+ .nway_reset = phy_ethtool_nway_reset,
};
void hbg_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 05295c2ad439..e7798f213645 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include "hbg_common.h"
@@ -67,6 +68,8 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
+ specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
+
mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
@@ -135,9 +138,13 @@ void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
}
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr)
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
{
- hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr);
+ u32 addr;
+
+ /* mac addr is u64, so the addr offset is 0x8 */
+ addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
+ hbg_reg_write64(priv, addr, mac_addr);
}
static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
@@ -161,8 +168,13 @@ static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
{
- hbg_hw_set_pcu_max_frame_len(priv, mtu);
- hbg_hw_set_mac_max_frame_len(priv, mtu);
+ u32 frame_len;
+
+ frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
+ ETH_HLEN + ETH_FCS_LEN;
+
+ hbg_hw_set_pcu_max_frame_len(priv, frame_len);
+ hbg_hw_set_mac_max_frame_len(priv, frame_len);
}
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
@@ -207,6 +219,34 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
HBG_REG_DUPLEX_B, duplex);
}
+/* only support uc filter */
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
+{
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
+}
+
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
+{
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
+}
+
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
+{
+ *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B);
+ *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B);
+}
+
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
+{
+ hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
+}
+
static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
{
u32 ctrl = 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
index 14fb39241c93..a4a049b5121d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
@@ -51,9 +51,13 @@ bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask);
void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable);
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu);
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable);
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr);
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index);
u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir);
void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc);
void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr);
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable);
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en);
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en);
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index 75505fb5cc4a..bb0f25ac9760 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -6,13 +6,13 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "hbg_common.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
#include "hbg_hw.h"
#include "hbg_irq.h"
#include "hbg_mdio.h"
#include "hbg_txrx.h"
-
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu);
+#include "hbg_debugfs.h"
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
@@ -55,11 +55,7 @@ static int hbg_hw_txrx_clear(struct hbg_priv *priv)
return ret;
/* After reset, regs need to be reconfigured */
- hbg_hw_init(priv);
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(priv->netdev->dev_addr));
- hbg_change_mtu(priv, priv->netdev->mtu);
-
- return 0;
+ return hbg_rebuild(priv);
}
static int hbg_net_stop(struct net_device *netdev)
@@ -74,31 +70,127 @@ static int hbg_net_stop(struct net_device *netdev)
return hbg_hw_txrx_clear(priv);
}
+static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC,
+ * The MAC filter will be enabled.
+ * Otherwise the filter will be disabled.
+ */
+ priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC));
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_set_mac_to_mac_table(struct hbg_priv *priv,
+ u32 index, const u8 *addr)
+{
+ if (addr) {
+ ether_addr_copy(priv->filter.mac_table[index].addr, addr);
+ hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index);
+ } else {
+ eth_zero_addr(priv->filter.mac_table[index].addr);
+ hbg_hw_set_uc_addr(priv, 0, index);
+ }
+}
+
+static int hbg_get_index_from_mac_table(struct hbg_priv *priv,
+ const u8 *addr, u32 *index)
+{
+ u32 i;
+
+ for (i = 0; i < priv->filter.table_max_len; i++)
+ if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) {
+ *index = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* already exists */
+ if (!hbg_get_index_from_mac_table(priv, addr, &index))
+ return 0;
+
+ for (index = 0; index < priv->filter.table_max_len; index++)
+ if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) {
+ hbg_set_mac_to_mac_table(priv, index, addr);
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* not exists */
+ if (hbg_get_index_from_mac_table(priv, addr, &index))
+ return;
+
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+}
+
+static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ return hbg_add_mac_to_filter(priv, addr);
+}
+
+static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (ether_addr_equal(netdev->dev_addr, (u8 *)addr))
+ return 0;
+
+ hbg_del_mac_from_filter(priv, addr);
+ return 0;
+}
+
+static void hbg_net_set_rx_mode(struct net_device *netdev)
+{
+ int ret;
+
+ ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync);
+
+ /* If ret != 0, overflow has occurred */
+ hbg_update_promisc_mode(netdev, !!ret);
+}
+
static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
{
struct hbg_priv *priv = netdev_priv(netdev);
u8 *mac_addr;
+ bool exists;
+ u32 index;
mac_addr = ((struct sockaddr *)addr)->sa_data;
if (!is_valid_ether_addr(mac_addr))
return -EADDRNOTAVAIL;
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(mac_addr));
- dev_addr_set(netdev, mac_addr);
+ /* The index of host mac is always 0.
+ * If new mac address already exists,
+ * delete the existing mac address and
+ * add it to the position with index 0.
+ */
+ exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index);
+ hbg_set_mac_to_mac_table(priv, 0, mac_addr);
+ if (exists)
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+ hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr));
+ dev_addr_set(netdev, mac_addr);
return 0;
}
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu)
-{
- u32 frame_len;
-
- frame_len = new_mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
- ETH_HLEN + ETH_FCS_LEN;
- hbg_hw_set_mtu(priv, frame_len);
-}
-
static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -106,7 +198,7 @@ static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
return -EBUSY;
- hbg_change_mtu(priv, new_mtu);
+ hbg_hw_set_mtu(priv, new_mtu);
WRITE_ONCE(netdev->mtu, new_mtu);
dev_dbg(&priv->pdev->dev,
@@ -142,8 +234,39 @@ static const struct net_device_ops hbg_netdev_ops = {
.ndo_set_mac_address = hbg_net_set_mac_address,
.ndo_change_mtu = hbg_net_change_mtu,
.ndo_tx_timeout = hbg_net_tx_timeout,
+ .ndo_set_rx_mode = hbg_net_set_rx_mode,
};
+static int hbg_mac_filter_init(struct hbg_priv *priv)
+{
+ struct hbg_dev_specs *dev_specs = &priv->dev_specs;
+ struct hbg_mac_filter *filter = &priv->filter;
+ struct hbg_mac_table_entry *tmp_table;
+
+ tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num,
+ sizeof(*tmp_table), GFP_KERNEL);
+ if (!tmp_table)
+ return -ENOMEM;
+
+ filter->mac_table = tmp_table;
+ filter->table_max_len = dev_specs->uc_mac_num;
+ filter->enabled = true;
+
+ hbg_hw_set_mac_filter_enable(priv, filter->enabled);
+ return 0;
+}
+
+static void hbg_init_user_def(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ priv->mac.pause_autoneg = HBG_STATUS_ENABLE;
+
+ pause_param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &pause_param->tx_pause,
+ &pause_param->rx_pause);
+}
+
static int hbg_init(struct hbg_priv *priv)
{
int ret;
@@ -160,7 +283,17 @@ static int hbg_init(struct hbg_priv *priv)
if (ret)
return ret;
- return hbg_mdio_init(priv);
+ ret = hbg_mdio_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_mac_filter_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_debugfs_init(priv);
+ hbg_init_user_def(priv);
+ return 0;
}
static int hbg_pci_init(struct pci_dev *pdev)
@@ -216,13 +349,15 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netdev->max_mtu = priv->dev_specs.max_mtu;
netdev->min_mtu = priv->dev_specs.min_mtu;
netdev->netdev_ops = &hbg_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- hbg_change_mtu(priv, ETH_DATA_LEN);
+ hbg_hw_set_mtu(priv, ETH_DATA_LEN);
hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
hbg_ethtool_set_ops(netdev);
@@ -245,7 +380,27 @@ static struct pci_driver hbg_driver = {
.id_table = hbg_pci_tbl,
.probe = hbg_probe,
};
-module_pci_driver(hbg_driver);
+
+static int __init hbg_module_init(void)
+{
+ int ret;
+
+ hbg_debugfs_register();
+ hbg_set_pci_err_handler(&hbg_driver);
+ ret = pci_register_driver(&hbg_driver);
+ if (ret)
+ hbg_debugfs_unregister();
+
+ return ret;
+}
+module_init(hbg_module_init);
+
+static void __exit hbg_module_exit(void)
+{
+ pci_unregister_driver(&hbg_driver);
+ hbg_debugfs_unregister();
+}
+module_exit(hbg_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index a3479fba8501..db6bc4cfb971 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -114,6 +114,19 @@ static void hbg_mdio_init_hw(struct hbg_priv *priv)
hbg_mdio_set_command(mac, cmd);
}
+static void hbg_flowctrl_cfg(struct hbg_priv *priv)
+{
+ struct phy_device *phydev = priv->mac.phydev;
+ bool rx_pause;
+ bool tx_pause;
+
+ if (!priv->mac.pause_autoneg)
+ return;
+
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
+ hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
+}
+
static void hbg_phy_adjust_link(struct net_device *netdev)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -140,6 +153,7 @@ static void hbg_phy_adjust_link(struct net_device *netdev)
priv->mac.duplex = phydev->duplex;
priv->mac.autoneg = phydev->autoneg;
hbg_hw_adjust_link(priv, speed, phydev->duplex);
+ hbg_flowctrl_cfg(priv);
}
priv->mac.link_status = phydev->link;
@@ -168,6 +182,7 @@ static int hbg_phy_connect(struct hbg_priv *priv)
return ret;
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index 57d81c6d7633..f12efc12f3c5 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -10,6 +10,8 @@
#define HBG_REG_MAC_ID_ADDR 0x0008
#define HBG_REG_PHY_ID_ADDR 0x000C
#define HBG_REG_MAC_ADDR_ADDR 0x0010
+#define HBG_REG_MAC_ADDR_HIGH_ADDR 0x0014
+#define HBG_REG_UC_MAC_NUM_ADDR 0x0018
#define HBG_REG_MDIO_FREQ_ADDR 0x0024
#define HBG_REG_MAX_MTU_ADDR 0x0028
#define HBG_REG_MIN_MTU_ADDR 0x002C
@@ -28,6 +30,7 @@
#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10)
#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5)
#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0)
+#define HBG_REG_MDIO_ADDR_ADDR (HBG_REG_MDIO_BASE + 0x0004)
#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008)
#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0)
#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C)
@@ -36,6 +39,10 @@
/* GMAC */
#define HBG_REG_SGMII_BASE 0x10000
#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008)
+#define HBG_REG_FD_FC_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x000C)
+#define HBG_REG_FC_TX_TIMER_ADDR (HBG_REG_SGMII_BASE + 0x001C)
+#define HBG_REG_FD_FC_ADDR_LOW_ADDR (HBG_REG_SGMII_BASE + 0x0020)
+#define HBG_REG_FD_FC_ADDR_HIGH_ADDR (HBG_REG_SGMII_BASE + 0x0024)
#define HBG_REG_DUPLEX_B BIT(0)
#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C)
#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040)
@@ -43,20 +50,42 @@
#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044)
#define HBG_REG_PORT_ENABLE_RX_B BIT(1)
#define HBG_REG_PORT_ENABLE_TX_B BIT(2)
+#define HBG_REG_PAUSE_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0048)
+#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
+#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
+#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
+#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
+#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
#define HBG_REG_CF_CRC_STRIP_B BIT(0)
#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4)
#define HBG_REG_MODE_CHANGE_EN_B BIT(0)
+#define HBG_REG_LOOP_REG_ADDR (HBG_REG_SGMII_BASE + 0x01DC)
#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
+#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
+#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
+#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
+#define HBG_REG_STATION_ADDR_HIGH_1_ADDR (HBG_REG_SGMII_BASE + 0x020C)
#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210)
#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214)
+#define HBG_REG_STATION_ADDR_LOW_3_ADDR (HBG_REG_SGMII_BASE + 0x0218)
+#define HBG_REG_STATION_ADDR_HIGH_3_ADDR (HBG_REG_SGMII_BASE + 0x021C)
+#define HBG_REG_STATION_ADDR_LOW_4_ADDR (HBG_REG_SGMII_BASE + 0x0220)
+#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224)
+#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228)
+#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C)
/* PCU */
+#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
+#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424)
+#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428)
#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
#define HBG_INT_MSK_WE_ERR_B BIT(31)
#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
@@ -78,11 +107,17 @@
#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438)
+#define HBG_REG_TX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x043C)
+#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
+#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
+#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
@@ -101,6 +136,10 @@
#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
+#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
+#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
+#define HBG_REG_BUS_RST_EN_ADDR (HBG_REG_SGMII_BASE + 0x0688)
#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694)
#define HBG_REG_IND_INTR_MASK_B BIT(0)
#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 851490346261..6b6ced37e490 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3019,115 +3019,6 @@ static struct platform_driver g_dsaf_driver = {
module_platform_driver(g_dsaf_driver);
-/**
- * hns_dsaf_roce_reset - reset dsaf and roce
- * @dsaf_fwnode: Pointer to framework node for the dasf
- * @dereset: false - request reset , true - drop reset
- * return 0 - success , negative -fail
- */
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
-{
- struct dsaf_device *dsaf_dev;
- struct platform_device *pdev;
- u32 mp;
- u32 sl;
- u32 credit;
- int i;
- static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- };
- static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- };
-
- /* find the platform device corresponding to fwnode */
- if (is_of_node(dsaf_fwnode)) {
- pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
- } else if (is_acpi_device_node(dsaf_fwnode)) {
- pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
- } else {
- pr_err("fwnode is neither OF or ACPI type\n");
- return -EINVAL;
- }
-
- /* check if we were a success in fetching pdev */
- if (!pdev) {
- pr_err("couldn't find platform device for node\n");
- return -ENODEV;
- }
-
- /* retrieve the dsaf_device from the driver data */
- dsaf_dev = dev_get_drvdata(&pdev->dev);
- if (!dsaf_dev) {
- dev_err(&pdev->dev, "dsaf_dev is NULL\n");
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* now, make sure we are running on compatible SoC */
- if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
- dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
- dsaf_dev->ae_dev.name);
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* do reset or de-reset according to the flag */
- if (!dereset) {
- /* reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- false);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
- } else {
- /* configure dsaf tx roce correspond to port map and sl map */
- mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(mp, 7 << i * 3, i * 3,
- port_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
-
- sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(sl, 3 << i * 2, i * 2,
- sl_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
-
- /* de-reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- true);
- msleep(SRST_TIME_INTERVAL);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
-
- /* enable dsaf channel rocee credit */
- credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
-
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
- }
-
- put_device(&pdev->dev);
-
- return 0;
-}
-EXPORT_SYMBOL(hns_dsaf_roce_reset);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HNS DSAF driver");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0eb03dff1a8b..653dfbb25d1b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -42,29 +42,6 @@ struct hns_mac_cb;
#define HNS_MAX_WAIT_CNT 10000
-enum dsaf_roce_port_mode {
- DSAF_ROCE_6PORT_MODE,
- DSAF_ROCE_4PORT_MODE,
- DSAF_ROCE_2PORT_MODE,
- DSAF_ROCE_CHAN_MODE_NUM,
-};
-
-enum dsaf_roce_port_num {
- DSAF_ROCE_PORT_0,
- DSAF_ROCE_PORT_1,
- DSAF_ROCE_PORT_2,
- DSAF_ROCE_PORT_3,
- DSAF_ROCE_PORT_4,
- DSAF_ROCE_PORT_5,
-};
-
-enum dsaf_roce_qos_sl {
- DSAF_ROCE_SL_0,
- DSAF_ROCE_SL_1,
- DSAF_ROCE_SL_2,
- DSAF_ROCE_SL_3,
-};
-
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -307,9 +284,6 @@ struct dsaf_misc_op {
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
- void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
- bool dereset);
- void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
@@ -463,6 +437,4 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 5df19c604d09..91391a49fcea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -326,69 +326,6 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
HNS_XGE_RESET_FUNC, port, dereset);
}
-/**
- * hns_dsaf_srst_chns - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- u32 reg_addr;
-
- if (!dereset)
- reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
- else
- reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
-
- dsaf_write_sub(dsaf_dev, reg_addr, msk);
-}
-
-/**
- * hns_dsaf_srst_chns_acpi - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_DSAF_CHN_RESET_FUNC,
- msk, dereset);
-}
-
-static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
-{
- if (!dereset) {
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
- } else {
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
- msleep(20);
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
- }
-}
-
-static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_ROCE_RESET_FUNC, 0, dereset);
-}
-
static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
bool dereset)
{
@@ -729,8 +666,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
misc_op->get_phy_if = hns_mac_get_phy_if;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -746,8 +681,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 46af467aa596..635b3a95dd82 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -195,11 +195,6 @@ void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
}
-void hns_rcb_start(struct hnae_queue *q, u32 val)
-{
- hns_rcb_ring_enable_hw(q, val);
-}
-
/**
*hns_rcb_common_init_commit_hw - make rcb common init completed
*@rcb_common: rcb common device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 0f4cc184ef39..68f81547dfb4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -116,7 +116,6 @@ int hns_rcb_buf_size2type(u32 buf_size);
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
-void hns_rcb_start(struct hnae_queue *q, u32 val);
int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index f81a43d2cdfc..486fb0e20bef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -469,7 +469,7 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
err = HINIC_MGMT_CMD_UNSUPPORTED;
} else if (err || !out_size || vlan_filter.status) {
dev_err(&pdev->dev,
- "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n",
err, vlan_filter.status, out_size);
err = -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 20bc40eec487..24ec9a4f1ffa 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -292,6 +292,7 @@ config ICE
select DIMLIB
select LIBIE
select NET_DEVLINK
+ select PACKING
select PLDMFW
select DPLL
help
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 98861cc6df7c..b9dd7b719832 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1180,126 +1180,6 @@ s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
}
/**
- * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
- * @hw: Pointer to hardware structure
- * @results: Pointer array to message, results[0] is pointer to message
- * @mbx: Pointer to mailbox information structure
- *
- * This function is a default handler for MAC/VLAN requests from the VF.
- * The assumption is that in this case it is acceptable to just directly
- * hand off the message from the VF to the underlying shared code.
- **/
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
-{
- struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u8 mac[ETH_ALEN];
- u32 *result;
- int err = 0;
- bool set;
- u16 vlan;
- u32 vid;
-
- /* we shouldn't be updating rules on a disabled interface */
- if (!FM10K_VF_FLAG_ENABLED(vf_info))
- err = FM10K_ERR_PARAM;
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
- result = results[FM10K_MAC_VLAN_MSG_VLAN];
-
- /* record VLAN id requested */
- err = fm10k_tlv_attr_get_u32(result, &vid);
- if (err)
- return err;
-
- set = !(vid & FM10K_VLAN_CLEAR);
- vid &= ~FM10K_VLAN_CLEAR;
-
- /* if the length field has been set, this is a multi-bit
- * update request. For multi-bit requests, simply disallow
- * them when the pf_vid has been set. In this case, the PF
- * should have already cleared the VLAN_TABLE, and if we
- * allowed them, it could allow a rogue VF to receive traffic
- * on a VLAN it was not assigned. In the single-bit case, we
- * need to modify requests for VLAN 0 to use the default PF or
- * SW vid when assigned.
- */
-
- if (vid >> 16) {
- /* prevent multi-bit requests when PF has
- * administratively set the VLAN for this VF
- */
- if (vf_info->pf_vid)
- return FM10K_ERR_PARAM;
- } else {
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
-
- vid = err;
- }
-
- /* update VSI info for VF in regards to VLAN table */
- err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
- result = results[FM10K_MAC_VLAN_MSG_MAC];
-
- /* record unicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* block attempts to set MAC for a locked device */
- if (is_valid_ether_addr(vf_info->mac) &&
- !ether_addr_equal(mac, vf_info->mac))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new unicast address */
- err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
- mac, vlan, set, 0);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
- result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
-
- /* record multicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* verify that the VF is allowed to request multicast */
- if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new multicast address */
- err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
- mac, vlan, set);
- }
-
- return err;
-}
-
-/**
* fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
* @vf_info: VF info structure containing capability flags
* @mode: Requested xcast mode
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 8e814df709d2..ad3696893cb1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -99,8 +99,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid);
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
- struct fm10k_mbx_info *);
s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d4255c2706fa..c67963bfe14e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -88,6 +88,7 @@ enum i40e_state {
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
+ __I40E_MDD_VF_PRINT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
__I40E_TIMEOUT_RECOVERY_PENDING,
@@ -191,6 +192,7 @@ enum i40e_pf_flags {
*/
I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
I40E_FLAG_VF_VLAN_PRUNING_ENA,
+ I40E_FLAG_MDD_AUTO_RESET_VF,
I40E_PF_FLAGS_NBITS, /* must be last */
};
@@ -572,7 +574,7 @@ struct i40e_pf {
int num_alloc_vfs; /* actual number of VFs allocated */
u32 vf_aq_requests;
u32 arq_overflows; /* Not fatal, possibly indicative of problems */
-
+ struct ratelimit_state mdd_message_rate_limit;
/* DCBx/DCBNL capability for PF that indicates
* whether DCBx is managed by firmware or host
* based agent (LLDPAD). Also, indicates what
@@ -1189,7 +1191,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
u32 i40e_get_current_fd_count(struct i40e_pf *pf);
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
@@ -1197,7 +1198,6 @@ void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan);
void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
@@ -1313,7 +1313,6 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
int i40e_get_partition_bw_setting(struct i40e_pf *pf);
int i40e_set_partition_bw_setting(struct i40e_pf *pf);
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index f73f5930fc58..175c1320c143 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1016,16 +1016,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
return status;
}
-int
-i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */ u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
-{
- return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
- cmd_details, true, aq_status);
-}
-
/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e8031f1a9b4f..370b4bddee44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1805,37 +1805,6 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
- * @cmd_details: pointer to command details structure or NULL
- **/
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- u16 flags = 0;
- int status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
- flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
- cmd->promiscuous_flags = cpu_to_le16(flags);
- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
- cmd->seid = cpu_to_le16(seid);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2436,136 +2405,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
}
/**
- * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
- * @hw: pointer to the hw struct
- * @opcode: AQ opcode for add or delete mirror rule
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @id: Destination VSI SEID or Rule ID
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
- * VEBs/VEPA elements only
- **/
-static int i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_delete_mirror_rule *cmd =
- (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
- struct i40e_aqc_add_delete_mirror_rule_completion *resp =
- (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- u16 buf_size;
- int status;
-
- buf_size = count * sizeof(*mr_list);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, opcode);
- cmd->seid = cpu_to_le16(sw_seid);
- cmd->rule_type = cpu_to_le16(rule_type &
- I40E_AQC_MIRROR_RULE_TYPE_MASK);
- cmd->num_entries = cpu_to_le16(count);
- /* Dest VSI for add, rule_id for delete */
- cmd->destination = cpu_to_le16(id);
- if (mr_list) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- }
-
- status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
- cmd_details);
- if (!status ||
- hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
- if (rule_id)
- *rule_id = le16_to_cpu(resp->rule_id);
- if (rules_used)
- *rules_used = le16_to_cpu(resp->mirror_rules_used);
- if (rules_free)
- *rules_free = le16_to_cpu(resp->mirror_rules_free);
- }
- return status;
-}
-
-/**
- * i40e_aq_add_mirrorrule - add a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @dest_vsi: SEID of VSI to which packets will be mirrored
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
- **/
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
- rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
- rule_type, dest_vsi, count, mr_list,
- cmd_details, rule_id, rules_used, rules_free);
-}
-
-/**
- * i40e_aq_delete_mirrorrule - delete a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @count: length of the list
- * @rule_id: Rule ID that is returned in the receive desc as part of
- * add_mirrorrule.
- * @mr_list: list of mirrored VLAN IDs to be removed
- * @cmd_details: pointer to command details structure or NULL
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
- **/
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
-{
- /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- /* count and mr_list shall be valid for rule_type INGRESS VLAN
- * mirroring. For other rule_type, count and rule_type should
- * not matter.
- */
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
- rule_type, rule_id, count, mr_list,
- cmd_details, NULL, rules_used, rules_free);
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF id to send msg
@@ -3180,41 +3019,6 @@ i40e_aq_update_nvm_exit:
}
/**
- * i40e_aq_rearrange_nvm
- * @hw: pointer to the hw struct
- * @rearrange_nvm: defines direction of rearrangement
- * @cmd_details: pointer to command details structure or NULL
- *
- * Rearrange NVM structure, available only for transition FW
- **/
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aqc_nvm_update *cmd;
- struct i40e_aq_desc desc;
- int status;
-
- cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
-
- rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
- I40E_AQ_NVM_REARRANGE_TO_STRUCT);
-
- if (!rearrange_nvm) {
- status = -EINVAL;
- goto i40e_aq_rearrange_nvm_exit;
- }
-
- cmd->command_flags |= rearrange_nvm;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-i40e_aq_rearrange_nvm_exit:
- return status;
-}
-
-/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
@@ -3335,44 +3139,6 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
- * i40e_aq_restore_lldp
- * @hw: pointer to the hw struct
- * @setting: pointer to factory setting variable or NULL
- * @restore: True if factory settings should be restored
- * @cmd_details: pointer to command details structure or NULL
- *
- * Restore LLDP Agent factory settings if @restore set to True. In other case
- * only returns factory setting in AQ response.
- **/
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_restore *cmd =
- (struct i40e_aqc_lldp_restore *)&desc.params.raw;
- int status;
-
- if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Restore LLDP not supported by current FW version.\n");
- return -ENODEV;
- }
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
-
- if (restore)
- cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (setting)
- *setting = cmd->command & 1;
-
- return status;
-}
-
-/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
@@ -4570,84 +4336,6 @@ phy_write_end:
}
/**
- * i40e_write_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Writes value to specified PHY register
- **/
-int i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_write_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
- * i40e_read_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Reads specified PHY register value
- **/
-int i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_read_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -4663,80 +4351,6 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
}
/**
- * i40e_blink_phy_link_led
- * @hw: pointer to the HW structure
- * @time: time how long led will blinks in secs
- * @interval: gap between LED on and off in msecs
- *
- * Blinks PHY link LED
- **/
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
-{
- u16 led_addr = I40E_PHY_LED_PROV_REG_1;
- u16 gpio_led_port;
- u8 phy_addr = 0;
- int status = 0;
- u16 led_ctl;
- u8 port_num;
- u16 led_reg;
- u32 i;
-
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- led_addr++) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
- if (status)
- goto phy_blinking_end;
- led_ctl = led_reg;
- if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
- led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
- if (status)
- goto phy_blinking_end;
- break;
- }
- }
-
- if (time > 0 && interval > 0) {
- for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
- if (status)
- goto restore_config;
- if (led_reg & I40E_PHY_LED_MANUAL_ON)
- led_reg = 0;
- else
- led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
- if (status)
- goto restore_config;
- msleep(interval);
- }
- }
-
-restore_config:
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
-
-phy_blinking_end:
- return status;
-}
-
-/**
* i40e_led_get_reg - read LED register
* @hw: pointer to the HW structure
* @led_addr: LED register address
@@ -5269,39 +4883,6 @@ i40e_find_segment_in_package(u32 segment_type,
(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
/**
- * i40e_find_section_in_profile
- * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
- * @profile: pointer to the i40e segment header to be searched
- *
- * This function searches i40e segment for a particular section type. On
- * success it returns a pointer to the section header, otherwise it will
- * return NULL.
- **/
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile)
-{
- struct i40e_profile_section_header *sec;
- struct i40e_section_table *sec_tbl;
- u32 sec_off;
- u32 i;
-
- if (profile->header.type != SEGMENT_TYPE_I40E)
- return NULL;
-
- I40E_SECTION_TABLE(profile, sec_tbl);
-
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec_off = sec_tbl->section_offset[i];
- sec = I40E_SECTION_HEADER(profile, sec_off);
- if (sec->section.type == section_type)
- return sec;
- }
-
- return NULL;
-}
-
-/**
* i40e_ddp_exec_aq_section - Execute generic AQ for DDP
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
@@ -5524,45 +5105,6 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
}
/**
- * i40e_add_pinfo_to_list
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package
- * @profile_info_sec: buffer for information section
- * @track_id: package tracking id
- *
- * Register a profile to the list of loaded profiles.
- */
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id)
-{
- struct i40e_profile_section_header *sec = NULL;
- struct i40e_profile_info *pinfo;
- u32 offset = 0, info = 0;
- int status = 0;
-
- sec = (struct i40e_profile_section_header *)profile_info_sec;
- sec->tbl_size = 1;
- sec->data_end = sizeof(struct i40e_profile_section_header) +
- sizeof(struct i40e_profile_info);
- sec->section.type = SECTION_TYPE_INFO;
- sec->section.offset = sizeof(struct i40e_profile_section_header);
- sec->section.size = sizeof(struct i40e_profile_info);
- pinfo = (struct i40e_profile_info *)(profile_info_sec +
- sec->section.offset);
- pinfo->track_id = track_id;
- pinfo->version = profile->version;
- pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
-
- status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
-
- return status;
-}
-
-/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 8db1eb0c1768..352e957443fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1491,19 +1491,6 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
}
/**
- * i40e_dcb_hw_get_num_tc
- * @hw: pointer to the hw struct
- *
- * Returns number of traffic classes configured in HW
- **/
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
-{
- u32 reg = rd32(hw, I40E_PRTDCB_GENC);
-
- return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg);
-}
-
-/**
* i40e_dcb_hw_rx_ets_bw_config
* @hw: pointer to the hw struct
* @bw_share: Bandwidth share indexed per traffic class
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index d76497566e40..d5662c639c41 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -253,7 +253,6 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
u8 pfc_en, u8 *prio_tc);
void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw);
void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
u8 *mode, u8 *prio_type);
void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 208c2f0857b6..6cd9da662ae1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -722,7 +722,7 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
dev_info(&pf->pdev->dev, " num MDD=%lld\n",
- vf->num_mdd_events);
+ vf->mdd_tx_events.count + vf->mdd_rx_events.count);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index bce5b76f1e7a..8a7a83f83ee5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -459,6 +459,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
I40E_PRIV_FLAG("vf-vlan-pruning",
I40E_FLAG_VF_VLAN_PRUNING_ENA, 0),
+ I40E_PRIV_FLAG("mdd-auto-reset-vf",
+ I40E_FLAG_MDD_AUTO_RESET_VF, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 0e1d9e2fbf38..65a702668e21 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1666,9 +1666,8 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* @vsi: VSI to remove from
* @f: the filter to remove from the list
*
- * This function should be called instead of i40e_del_filter only if you know
- * the exact filter you will remove already, such as via i40e_find_filter or
- * i40e_find_mac.
+ * This function requires you've found * the exact filter you will remove
+ * already, such as via i40e_find_filter or i40e_find_mac.
*
* NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
@@ -1698,29 +1697,6 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
}
/**
- * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
- * @vsi: the VSI to be searched
- * @macaddr: the MAC address
- * @vlan: the VLAN
- *
- * NOTE: This function is expected to be called with mac_filter_hash_lock
- * being held.
- * ANOTHER NOTE: This function MUST be called from within the context of
- * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
- * instead of list_for_each_entry().
- **/
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
-{
- struct i40e_mac_filter *f;
-
- if (!vsi || !macaddr)
- return;
-
- f = i40e_find_filter(vsi, macaddr, vlan);
- __i40e_del_filter(vsi, f);
-}
-
-/**
* i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
@@ -9629,19 +9605,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
}
/**
- * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
- * @pf: board private structure
- **/
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
-{
- u32 val, fcnt_prog;
-
- val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
- fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
- return fcnt_prog;
-}
-
-/**
* i40e_get_current_fd_count - Get total FD filters programmed for this PF
* @pf: board private structure
**/
@@ -11217,6 +11180,67 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
}
/**
+ * i40e_print_vf_mdd_event - print VF Tx/Rx malicious driver detect event
+ * @pf: board private structure
+ * @vf: pointer to the VF structure
+ * @is_tx: true - for Tx event, false - for Rx
+ */
+static void i40e_print_vf_mdd_event(struct i40e_pf *pf, struct i40e_vf *vf,
+ bool is_tx)
+{
+ dev_err(&pf->pdev->dev, is_tx ?
+ "%lld Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n" :
+ "%lld Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n",
+ is_tx ? vf->mdd_tx_events.count : vf->mdd_rx_events.count,
+ pf->hw.pf_id,
+ vf->vf_id,
+ vf->default_lan_addr.addr,
+ str_on_off(test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)));
+}
+
+/**
+ * i40e_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from i40e_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+static void i40e_print_vfs_mdd_events(struct i40e_pf *pf)
+{
+ unsigned int i;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ if (!__ratelimit(&pf->mdd_message_rate_limit))
+ return;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ struct i40e_vf *vf = &pf->vf[i];
+ bool is_printed = false;
+
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, false);
+ is_printed = true;
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, true);
+ is_printed = true;
+ }
+
+ if (is_printed && !test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF #%d\n",
+ i);
+ }
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the PF structure
*
@@ -11230,8 +11254,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg;
int i;
- if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__I40E_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ i40e_print_vfs_mdd_events(pf);
return;
+ }
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
@@ -11275,36 +11304,48 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* see if one of the VFs needs its hand slapped */
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ bool is_mdd_on_tx = false;
+ bool is_mdd_on_rx = false;
+
vf = &(pf->vf[i]);
reg = rd32(hw, I40E_VP_MDET_TX(i));
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_tx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_tx = true;
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_rx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_rx = true;
+ }
+
+ if ((is_mdd_on_tx || is_mdd_on_rx) &&
+ test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ if (is_mdd_on_rx)
+ i40e_print_vf_mdd_event(pf, vf, false);
+ if (is_mdd_on_tx)
+ i40e_print_vf_mdd_event(pf, vf, true);
+
+ i40e_vc_reset_vf(vf, true);
}
}
- /* re-enable mdd interrupt cause */
- clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw);
+
+ i40e_print_vfs_mdd_events(pf);
}
/**
@@ -12614,89 +12655,6 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf)
}
/**
- * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
- * @pf: board private structure
- **/
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
-{
- /* Commit temporary BW setting to permanent NVM image */
- enum i40e_admin_queue_err last_aq_status;
- u16 nvm_word;
- int ret;
-
- if (pf->hw.partition_id != 1) {
- dev_info(&pf->pdev->dev,
- "Commit BW only works on partition 1! This is partition %d",
- pf->hw.partition_id);
- ret = -EOPNOTSUPP;
- goto bw_commit_out;
- }
-
- /* Acquire NVM for read access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Read word 0x10 of NVM - SW compatibility word 1 */
- ret = i40e_aq_read_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word), &nvm_word,
- false, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Wait a bit for NVM release to complete */
- msleep(50);
-
- /* Acquire NVM for write access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
- /* Write it back out unchanged to initiate update NVM,
- * which will force a write of the shadow (alt) RAM to
- * the NVM - thus storing the bandwidth values permanently.
- */
- ret = i40e_aq_update_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word),
- &nvm_word, true, 0, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret)
- dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
-bw_commit_out:
-
- return ret;
-}
-
-/**
* i40e_is_total_port_shutdown_enabled - read NVM and return value
* if total port shutdown feature is enabled for this PF
* @pf: board private structure
@@ -15998,6 +15956,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* VF MDD event logs are rate limited to one second intervals */
+ ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1);
+
/* Reconfigure hardware for allowing smaller MSS in the case
* of TSO, so that we avoid the MDD being fired and causing
* a reset in the case of small MSS+TSO.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5a0699ca7ce5..099bb8ab7d70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -27,13 +27,6 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
int
-i40e_asq_send_command_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-int
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -72,8 +65,6 @@ int i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode);
int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
/* admin send queue commands */
@@ -141,9 +132,6 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -176,14 +164,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
@@ -220,9 +200,6 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
@@ -234,9 +211,6 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details);
@@ -458,13 +432,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
int i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
-int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
@@ -477,20 +445,12 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile);
int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id);
-
/* i40e_ddp */
int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index dfa785e39458..1120f8e4bb67 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -216,7 +216,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
* @notify_vf: notify vf about reset or not
* Reset VF handler.
**/
-static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
{
struct i40e_pf *pf = vf->pf;
int i;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 66f95e2f3146..5cf74f16f433 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -64,6 +64,12 @@ struct i40evf_channel {
u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
};
+struct i40e_mdd_vf_events {
+ u64 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u64 last_printed;
+};
+
/* VF information structure */
struct i40e_vf {
struct i40e_pf *pf;
@@ -92,7 +98,9 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
- u64 num_mdd_events; /* num of mdd events detected */
+ /* num of mdd tx and rx events detected */
+ struct i40e_mdd_vf_events mdd_rx_events;
+ struct i40e_mdd_vf_events mdd_tx_events;
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
@@ -120,6 +128,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf);
bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 4e885df789ef..e28f1905a4a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -395,32 +395,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
WARN_ON_ONCE(1);
}
-static int
-i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
@@ -486,8 +460,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!first)
first = bi;
- else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
+ else if (!xsk_buff_add_frag(first, bi)) {
+ xsk_buff_free(first);
break;
+ }
if (++next_to_process == count)
next_to_process = 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index a9e54866ae6b..cbfaaa5b7d02 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1180,7 +1180,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter)
q_vector = &adapter->q_vectors[q_idx];
napi = &q_vector->napi;
- napi_enable(napi);
+ napi_enable_locked(napi);
}
}
@@ -1196,7 +1196,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
- napi_disable(&q_vector->napi);
+ napi_disable_locked(&q_vector->napi);
}
}
@@ -1800,8 +1800,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
- netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll);
+ netif_napi_add_locked(adapter->netdev, &q_vector->napi,
+ iavf_napi_poll);
}
return 0;
@@ -1827,7 +1827,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
+ netif_napi_del_locked(&q_vector->napi);
}
kfree(adapter->q_vectors);
adapter->q_vectors = NULL;
@@ -1968,6 +1968,7 @@ err:
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
+ bool netdev_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
@@ -1976,7 +1977,7 @@ static void iavf_finish_config(struct work_struct *work)
* The dev->lock is needed to update the queue number
*/
rtnl_lock();
- mutex_lock(&adapter->netdev->lock);
+ netdev_lock(adapter->netdev);
mutex_lock(&adapter->crit_lock);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
@@ -1988,7 +1989,16 @@ static void iavf_finish_config(struct work_struct *work)
switch (adapter->state) {
case __IAVF_DOWN:
+ /* Set the real number of queues when reset occurs while
+ * state == __IAVF_DOWN
+ */
+ pairs = adapter->num_active_queues;
+ netif_set_real_num_rx_queues(adapter->netdev, pairs);
+ netif_set_real_num_tx_queues(adapter->netdev, pairs);
+
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ netdev_unlock(adapter->netdev);
+ netdev_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
@@ -2003,11 +2013,7 @@ static void iavf_finish_config(struct work_struct *work)
goto out;
}
}
-
- /* Set the real number of queues when reset occurs while
- * state == __IAVF_DOWN
- */
- fallthrough;
+ break;
case __IAVF_RUNNING:
pairs = adapter->num_active_queues;
netif_set_real_num_rx_queues(adapter->netdev, pairs);
@@ -2020,7 +2026,8 @@ static void iavf_finish_config(struct work_struct *work)
out:
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&adapter->netdev->lock);
+ if (!netdev_released)
+ netdev_unlock(adapter->netdev);
rtnl_unlock();
}
@@ -2713,12 +2720,16 @@ static void iavf_watchdog_task(struct work_struct *work)
struct iavf_adapter *adapter = container_of(work,
struct iavf_adapter,
watchdog_task.work);
+ struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
+ netdev_lock(netdev);
if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
+ if (adapter->state == __IAVF_REMOVE) {
+ netdev_unlock(netdev);
return;
+ }
goto restart_watchdog;
}
@@ -2730,30 +2741,35 @@ static void iavf_watchdog_task(struct work_struct *work)
case __IAVF_STARTUP:
iavf_startup(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
@@ -2765,6 +2781,7 @@ static void iavf_watchdog_task(struct work_struct *work)
* as it can loop forever
*/
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
@@ -2773,6 +2790,7 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, (5 * HZ));
return;
@@ -2780,6 +2798,7 @@ static void iavf_watchdog_task(struct work_struct *work)
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
@@ -2792,6 +2811,7 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
@@ -2811,12 +2831,14 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
HZ * 2);
return;
@@ -2847,6 +2869,7 @@ static void iavf_watchdog_task(struct work_struct *work)
case __IAVF_REMOVE:
default:
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
@@ -2858,12 +2881,14 @@ static void iavf_watchdog_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, HZ * 2);
return;
}
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
restart_watchdog:
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
@@ -2990,12 +3015,12 @@ static void iavf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- mutex_lock(&netdev->lock);
+ netdev_lock(netdev);
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state != __IAVF_REMOVE)
queue_work(adapter->wq, &adapter->reset_task);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return;
}
@@ -3043,7 +3068,7 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -3184,7 +3209,7 @@ continue_reset:
wake_up(&adapter->reset_waitqueue);
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return;
reset_err:
@@ -3195,7 +3220,7 @@ reset_err:
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3667,10 +3692,10 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
- mutex_lock(&netdev->lock);
+ netdev_lock(netdev);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return ret;
}
@@ -4340,14 +4365,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
+ netdev_lock(netdev);
while (!mutex_trylock(&adapter->crit_lock)) {
/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
* is already taken and iavf_open is called from an upper
* device's notifier reacting on NETDEV_REGISTER event.
* We have to leave here to avoid dead lock.
*/
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
+ netdev_unlock(netdev);
return -EBUSY;
+ }
usleep_range(500, 1000);
}
@@ -4396,6 +4424,7 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return 0;
@@ -4408,6 +4437,7 @@ err_setup_tx:
iavf_free_all_tx_resources(adapter);
err_unlock:
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return err;
}
@@ -4429,10 +4459,12 @@ static int iavf_close(struct net_device *netdev)
u64 aq_to_restore;
int status;
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
if (adapter->state <= __IAVF_DOWN_PENDING) {
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return 0;
}
@@ -4466,6 +4498,7 @@ static int iavf_close(struct net_device *netdev)
iavf_free_traffic_irqs(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
@@ -5342,6 +5375,7 @@ static int iavf_suspend(struct device *dev_d)
netif_device_detach(netdev);
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
if (netif_running(netdev)) {
@@ -5353,6 +5387,7 @@ static int iavf_suspend(struct device *dev_d)
iavf_reset_interrupt_capability(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return 0;
}
@@ -5451,6 +5486,7 @@ static void iavf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
@@ -5487,6 +5523,7 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_destroy(&hw->aq.asq_mutex);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
+ netdev_unlock(netdev);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 3307d551f431..9e0d9f710441 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -32,7 +32,8 @@ ice-y := ice_main.o \
ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
- devlink/devlink_port.o \
+ devlink/health.o \
+ devlink/port.o \
ice_sf_eth.o \
ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 415445cefdb2..d116e2b10bce 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,7 +6,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
@@ -368,14 +368,18 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
break;
case ICE_VERSION_RUNNING:
- err = devlink_info_version_running_put(req, key, ctx->buf);
+ err = devlink_info_version_running_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
goto out_free_ctx;
}
break;
case ICE_VERSION_STORED:
- err = devlink_info_version_stored_put(req, key, ctx->buf);
+ err = devlink_info_version_stored_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
goto out_free_ctx;
@@ -1207,9 +1211,15 @@ static int ice_devlink_reinit_up(struct ice_pf *pf)
struct ice_vsi *vsi = ice_get_main_vsi(pf);
int err;
+ err = ice_init_hw(&pf->hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
err = ice_init_dev(pf);
if (err)
- return err;
+ goto unroll_hw_init;
vsi->flags = ICE_VSI_FLAG_INIT;
@@ -1232,6 +1242,8 @@ err_load:
rtnl_unlock();
err_vsi_cfg:
ice_deinit_dev(pf);
+unroll_hw_init:
+ ice_deinit_hw(&pf->hw);
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
new file mode 100644
index 000000000000..ea40f7941259
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */
+#include "health.h"
+
+#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \
+ devlink_fmsg_put(fmsg, #name, (obj)->name)
+
+#define ICE_HEALTH_STATUS_DATA_SIZE 2
+
+struct ice_health_status {
+ enum ice_aqc_health_status code;
+ const char *description;
+ const char *solution;
+ const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE];
+};
+
+/*
+ * In addition to the health status codes provided below, the firmware might
+ * generate Health Status Codes that are not pertinent to the end-user.
+ * For instance, Health Code 0x1002 is triggered when the command fails.
+ * Such codes should be disregarded by the end-user.
+ * The below lookup requires to be sorted by code.
+ */
+
+static const char *const ice_common_port_solutions =
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
+static const char *const ice_port_number_label = "Port Number";
+static const char *const ice_update_nvm_solution = "Update to the latest NVM image.";
+
+static const struct ice_health_status ice_health_status_lookup[] = {
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.",
+ "Change or replace the module or cable.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM,
+ "Device cannot communicate with the module.",
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.",
+ "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.",
+ "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.",
+ "Change or replace the module or cable. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.",
+ NULL, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.",
+ "Change the module or configure the port option to match the current module speed. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT,
+ "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.",
+ "Check link partner connection and configuration.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED,
+ "Port speed is limited by PHY capabilities.",
+ "Change the module to align to port option.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD,
+ "Failed to load the firmware image in the external PHY.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.",
+ ice_update_nvm_solution, {"Extended Error"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.",
+ "If issue persists, call customer support.", {"Access Type"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.",
+ "Update to latest base driver and DDP package."},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.",
+ ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB,
+ "Supplied MIB file is invalid. DCB reverted to default configuration.",
+ "Disable FW-LLDP and check DCBx system configuration.",
+ {ice_port_number_label, "MIB ID"}},
+};
+
+static int ice_health_status_lookup_compare(const void *a, const void *b)
+{
+ return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code;
+}
+
+static const struct ice_health_status *ice_get_health_status(u16 code)
+{
+ struct ice_health_status key = { .code = code };
+
+ return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup),
+ sizeof(struct ice_health_status), ice_health_status_lookup_compare);
+}
+
+static void ice_describe_status_code(struct devlink_fmsg *fmsg,
+ struct ice_aqc_health_status_elem *hse)
+{
+ static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" };
+ const struct ice_health_status *health_code;
+ u32 internal_data[2];
+ u16 status_code;
+
+ status_code = le16_to_cpu(hse->health_status_code);
+
+ devlink_fmsg_put(fmsg, "Syndrome", status_code);
+ if (status_code) {
+ internal_data[0] = le32_to_cpu(hse->internal_data1);
+ internal_data[1] = le32_to_cpu(hse->internal_data2);
+
+ health_code = ice_get_health_status(status_code);
+ if (!health_code)
+ return;
+
+ devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description);
+ if (health_code->solution)
+ devlink_fmsg_string_pair_put(fmsg, "Possible Solution",
+ health_code->solution);
+
+ for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) {
+ if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA)
+ devlink_fmsg_u32_pair_put(fmsg,
+ health_code->data_label[i] ?
+ health_code->data_label[i] :
+ aux_label[i],
+ internal_data[i]);
+ }
+ }
+}
+
+static int
+ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack __always_unused *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static void ice_config_health_events(struct ice_pf *pf, bool enable)
+{
+ u8 enable_bits = 0;
+ int ret;
+
+ if (enable)
+ enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK;
+
+ ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits);
+ if (ret)
+ dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n",
+ str_enable_disable(enable), ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+}
+
+/**
+ * ice_process_health_status_event - Process the health status event from FW
+ * @pf: pointer to the PF structure
+ * @event: event structure containing the Health Status Event opcode
+ *
+ * Decode the Health Status Events and print the associated messages
+ */
+void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ const struct ice_aqc_health_status_elem *health_info;
+ u16 count;
+
+ health_info = (struct ice_aqc_health_status_elem *)event->msg_buf;
+ count = le16_to_cpu(event->desc.params.get_health_status.health_status_count);
+
+ if (count > (event->buf_len / sizeof(*health_info))) {
+ dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n");
+ return;
+ }
+
+ for (size_t i = 0; i < count; i++) {
+ const struct ice_health_status *health_code;
+ u16 status_code;
+
+ status_code = le16_to_cpu(health_info->health_status_code);
+ health_code = ice_get_health_status(status_code);
+
+ if (health_code) {
+ switch (le16_to_cpu(health_info->event_source)) {
+ case ICE_AQC_HEALTH_STATUS_GLOBAL:
+ pf->health_reporters.fw_status = *health_info;
+ devlink_health_report(pf->health_reporters.fw,
+ "FW syndrome reported", NULL);
+ break;
+ case ICE_AQC_HEALTH_STATUS_PF:
+ case ICE_AQC_HEALTH_STATUS_PORT:
+ pf->health_reporters.port_status = *health_info;
+ devlink_health_report(pf->health_reporters.port,
+ "Port syndrome reported", NULL);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n");
+ }
+ } else {
+ u32 data1, data2;
+ u16 source;
+
+ source = le16_to_cpu(health_info->event_source);
+ data1 = le32_to_cpu(health_info->internal_data1);
+ data2 = le32_to_cpu(health_info->internal_data2);
+ dev_dbg(ice_pf_to_dev(pf),
+ "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x",
+ status_code, source, data1, data2);
+ }
+ health_info++;
+ }
+}
+
+/**
+ * ice_devlink_health_report - boilerplate to call given @reporter
+ *
+ * @reporter: devlink health reporter to call, do nothing on NULL
+ * @msg: message to pass up, "event name" is fine
+ * @priv_ctx: typically some event struct
+ */
+static void ice_devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ if (!reporter)
+ return;
+
+ /* We do not do auto recovering, so return value of the below function
+ * will always be 0, thus we do ignore it.
+ */
+ devlink_health_report(reporter, msg, priv_ctx);
+}
+
+struct ice_mdd_event {
+ enum ice_mdd_src src;
+ u16 vf_num;
+ u16 queue;
+ u8 pf_num;
+ u8 event;
+};
+
+static const char *ice_mdd_src_to_str(enum ice_mdd_src src)
+{
+ switch (src) {
+ case ICE_MDD_SRC_TX_PQM:
+ return "tx_pqm";
+ case ICE_MDD_SRC_TX_TCLAN:
+ return "tx_tclan";
+ case ICE_MDD_SRC_TX_TDPU:
+ return "tx_tdpu";
+ case ICE_MDD_SRC_RX:
+ return "rx";
+ default:
+ return "invalid";
+ }
+}
+
+static int
+ice_mdd_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_mdd_event *mdd_event = priv_ctx;
+ const char *src;
+
+ if (!mdd_event)
+ return 0;
+
+ src = ice_mdd_src_to_str(mdd_event->src);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "src", src);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+/**
+ * ice_report_mdd_event - Report an MDD event through devlink health
+ * @pf: the PF device structure
+ * @src: the HW block that was the source of this MDD event
+ * @pf_num: the pf_num on which the MDD event occurred
+ * @vf_num: the vf_num on which the MDD event occurred
+ * @event: the event type of the MDD event
+ * @queue: the queue on which the MDD event occurred
+ *
+ * Report an MDD event that has occurred on this PF.
+ */
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue)
+{
+ struct ice_mdd_event ev = {
+ .src = src,
+ .pf_num = pf_num,
+ .vf_num = vf_num,
+ .event = event,
+ .queue = queue,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev);
+}
+
+/**
+ * ice_fmsg_put_ptr - put hex value of pointer into fmsg
+ *
+ * @fmsg: devlink fmsg under construction
+ * @name: name to pass
+ * @ptr: 64 bit value to print as hex and put into fmsg
+ */
+static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name,
+ void *ptr)
+{
+ char buf[sizeof(ptr) * 3];
+
+ sprintf(buf, "%p", ptr);
+ devlink_fmsg_put(fmsg, name, buf);
+}
+
+struct ice_tx_hang_event {
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ u16 queue;
+ u16 next_to_clean;
+ u16 next_to_use;
+ struct ice_tx_ring *tx_ring;
+};
+
+static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_tx_hang_event *event = priv_ctx;
+ struct sk_buff *skb;
+
+ if (!event)
+ return 0;
+
+ skb = event->tx_ring->tx_buf->skb;
+ devlink_fmsg_obj_nest_start(fmsg);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use);
+ devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name);
+ ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc);
+ ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma);
+ ice_fmsg_put_ptr(fmsg, "skb-ptr", skb);
+ devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc,
+ event->tx_ring->count * sizeof(struct ice_tx_desc));
+ devlink_fmsg_dump_skb(fmsg, skb);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+
+ buf->tx_ring = tx_ring;
+ buf->vsi_num = vsi_num;
+ buf->head = head;
+ buf->intr = intr;
+}
+
+void ice_report_tx_hang(struct ice_pf *pf)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+ struct ice_tx_ring *tx_ring = buf->tx_ring;
+
+ struct ice_tx_hang_event ev = {
+ .head = buf->head,
+ .intr = buf->intr,
+ .vsi_num = buf->vsi_num,
+ .queue = tx_ring->q_index,
+ .next_to_clean = tx_ring->next_to_clean,
+ .next_to_use = tx_ring->next_to_use,
+ .tx_ring = tx_ring,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev);
+}
+
+static struct devlink_health_reporter *
+ice_init_devlink_rep(struct ice_pf *pf,
+ const struct devlink_health_reporter_ops *ops)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct devlink_health_reporter *rep;
+ const u64 graceful_period = 0;
+
+ rep = devl_health_reporter_create(devlink, ops, graceful_period, pf);
+ if (IS_ERR(rep)) {
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_err(dev, "failed to create devlink %s health report er",
+ ops->name);
+ return NULL;
+ }
+ return rep;
+}
+
+#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \
+ ._field = ice_##_name##_reporter_##_field,
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ }
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \
+ }
+
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose);
+
+/**
+ * ice_health_init - allocate and init all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_init(struct ice_pf *pf)
+{
+ struct ice_health *reps = &pf->health_reporters;
+
+ reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops);
+ reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops);
+
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops);
+ reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops);
+ ice_config_health_events(pf, true);
+ }
+}
+
+/**
+ * ice_deinit_devl_reporter - destroy given devlink health reporter
+ * @reporter: reporter to destroy
+ */
+static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devl_health_reporter_destroy(reporter);
+}
+
+/**
+ * ice_health_deinit - deallocate all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_deinit(struct ice_pf *pf)
+{
+ ice_deinit_devl_reporter(pf->health_reporters.mdd);
+ ice_deinit_devl_reporter(pf->health_reporters.tx_hang);
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ ice_deinit_devl_reporter(pf->health_reporters.fw);
+ ice_deinit_devl_reporter(pf->health_reporters.port);
+ ice_config_health_events(pf, false);
+ }
+}
+
+static
+void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devlink_health_reporter_state_update(reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+/**
+ * ice_health_clear - clear devlink health issues after a reset
+ * @pf: the PF device structure
+ *
+ * Mark the PF in healthy state again after a reset has completed.
+ */
+void ice_health_clear(struct ice_pf *pf)
+{
+ ice_health_assign_healthy_state(pf->health_reporters.mdd);
+ ice_health_assign_healthy_state(pf->health_reporters.tx_hang);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h
new file mode 100644
index 000000000000..5edfc4d2adce
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _HEALTH_H_
+#define _HEALTH_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: health.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * devlink health mechanism for ice driver.
+ */
+
+struct ice_aqc_health_status_elem;
+struct ice_pf;
+struct ice_tx_ring;
+struct ice_rq_event_info;
+
+enum ice_mdd_src {
+ ICE_MDD_SRC_TX_PQM,
+ ICE_MDD_SRC_TX_TCLAN,
+ ICE_MDD_SRC_TX_TDPU,
+ ICE_MDD_SRC_RX,
+};
+
+/**
+ * struct ice_health - stores ice devlink health reporters and accompanied data
+ * @fw: devlink health reporter for FW Health Status events
+ * @mdd: devlink health reporter for MDD detection event
+ * @port: devlink health reporter for Port Health Status events
+ * @tx_hang: devlink health reporter for tx_hang event
+ * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from
+ * non-sleeping context
+ * @tx_ring: ring that the hang occurred on
+ * @head: descriptor head
+ * @intr: interrupt register value
+ * @vsi_num: VSI owning the queue that the hang occurred on
+ * @fw_status: buffer for last received FW Status event
+ * @port_status: buffer for last received Port Status event
+ */
+struct ice_health {
+ struct devlink_health_reporter *fw;
+ struct devlink_health_reporter *mdd;
+ struct devlink_health_reporter *port;
+ struct devlink_health_reporter *tx_hang;
+ struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf,
+ struct ice_tx_ring *tx_ring;
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ );
+ struct ice_aqc_health_status_elem fw_status;
+ struct ice_aqc_health_status_elem port_status;
+};
+
+void ice_process_health_status_event(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+
+void ice_health_init(struct ice_pf *pf);
+void ice_health_deinit(struct ice_pf *pf);
+void ice_health_clear(struct ice_pf *pf);
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr);
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue);
+void ice_report_tx_hang(struct ice_pf *pf);
+
+#endif /* _HEALTH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/port.c
index c6779d9dffff..767419a67fef 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/port.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_lib.h"
#include "ice_fltr.h"
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/port.h
index d60efc340945..d60efc340945 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/port.h
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2f5d6f974185..71e05d30f0fd 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -78,6 +78,7 @@
#include "ice_irq.h"
#include "ice_dpll.h"
#include "ice_adapter.h"
+#include "devlink/health.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -665,6 +666,7 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
struct device *hwmon_dev;
+ struct ice_health health_reporters;
u8 num_quanta_prof_used;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ef14cff9a333..01536a382e54 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -12,6 +12,13 @@
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TXQ_CTX_SZ 22
+
+typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
+
struct ice_aqc_generic {
__le32 param0;
__le32 param1;
@@ -1665,6 +1672,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
u8 global_scid[2];
u8 phy_scid[2];
@@ -1807,6 +1815,7 @@ struct ice_aqc_nvm_pass_comp_tbl {
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK 0x3
u8 component_response_code; /* Response only */
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
@@ -2084,10 +2093,10 @@ struct ice_aqc_add_txqs_perq {
__le16 txq_id;
u8 rsvd[2];
__le32 q_teid;
- u8 txq_ctx[22];
+ ice_txq_ctx_buf_t txq_ctx;
u8 rsvd2[2];
struct ice_aqc_txsched_elem info;
-};
+} __packed;
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
@@ -2510,6 +2519,87 @@ enum ice_aqc_fw_logging_mod {
ICE_AQC_FW_LOG_ID_MAX,
};
+enum ice_aqc_health_status_mask {
+ ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0),
+ ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1),
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2),
+};
+
+/* Set Health Status (direct 0xFF20) */
+struct ice_aqc_set_health_status_cfg {
+ u8 event_source;
+ u8 reserved[15];
+};
+
+enum ice_aqc_health_status {
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106,
+ ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107,
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109,
+ ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F,
+ ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113,
+ ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116,
+ ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121,
+ ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500,
+ ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503,
+ ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508,
+ ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509,
+ ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A,
+ ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C,
+ ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002,
+};
+
+/* Get Health Status (indirect 0xFF22) */
+struct ice_aqc_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+enum ice_aqc_health_status_scope {
+ ICE_AQC_HEALTH_STATUS_PF = 0x1,
+ ICE_AQC_HEALTH_STATUS_PORT = 0x2,
+ ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3,
+};
+
+#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF
+
+/* Get Health Status event buffer entry (0xFF22),
+ * repeated per reported health status.
+ */
+struct ice_aqc_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+ __le32 internal_data1;
+ __le32 internal_data2;
+};
+
/* Set FW Logging configuration (indirect 0xFF30)
* Register for FW Logging (indirect 0xFF31)
* Query FW Logging (indirect 0xFF32)
@@ -2650,6 +2740,8 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_set_health_status_cfg set_health_status_cfg;
+ struct ice_aqc_get_health_status get_health_status;
struct ice_aqc_dnl_call_command dnl_call;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
@@ -2852,6 +2944,10 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
+ /* System Diagnostic commands */
+ ice_aqc_opc_set_health_status_cfg = 0xFF20,
+ ice_aqc_opc_get_health_status = 0xFF22,
+
/* FW Logging Commands */
ice_aqc_opc_fw_logs_config = 0xFF30,
ice_aqc_opc_fw_logs_register = 0xFF31,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 82a9cd4ec7ae..b2af8e3586f7 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -454,6 +454,9 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* Enable descriptor prefetch */
+ rlan_ctx.prefena = 1;
+
/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
* metadata and flags to allow redirecting to PR netdev
*/
@@ -910,8 +913,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 496d86cbd13f..7a2a2e8da8fa 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,6 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#include "ice_ptp_hw.h"
+#include <linux/packing.h>
#define ICE_PF_RESET_WAIT_COUNT 300
#define ICE_MAX_NETLIST_SIZE 10
@@ -308,6 +309,42 @@ bool ice_is_e825c(struct ice_hw *hw)
}
/**
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
+ *
+ * Return: true if the device has c827 phy.
+ */
+static bool ice_is_pf_c827(struct ice_hw *hw)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
+
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
+ return true;
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
+ return false;
+
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
+ return true;
+
+ return false;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -1025,6 +1062,33 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout.
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -1173,8 +1237,19 @@ int ice_init_hw(struct ice_hw *hw)
mutex_init(&hw->tnl_lock);
ice_init_chk_recipe_reuse_support(hw);
- return 0;
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ status = ice_wait_for_fw(hw, 30000);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out");
+ goto err_unroll_fltr_mgmt_struct;
+ }
+ }
+ return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
err_unroll_sched:
@@ -1360,39 +1435,31 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_copy_rxq_ctx_to_hw
+ * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers
* @hw: pointer to the hardware structure
- * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
- *
- * Copies rxq context from dense structure to HW register space
*/
-static int
-ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
+ const ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
{
- u8 i;
-
- if (!ice_rxq_ctx)
- return -EINVAL;
-
- if (rxq_index > QRX_CTRL_MAX_INDEX)
- return -EINVAL;
-
/* Copy each dword separately to HW */
- for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
- wr32(hw, QRX_CONTEXT(i, rxq_index),
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)rxq_ctx)[i];
- ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
- }
+ wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
- return 0;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
+ }
}
+#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
+ PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
+
/* LAN Rx Queue Context */
-static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
- /* Field Width LSB */
+static const struct packed_field_u8 ice_rlan_ctx_fields[] = {
+ /* Field Width LSB */
ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
@@ -1413,35 +1480,50 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
- { 0 }
};
/**
- * ice_write_rxq_ctx
+ * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
+ * @ctx: the Rx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
+ ice_rxq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
- * @rlan_ctx: pointer to the rxq context
+ * @rlan_ctx: pointer to the unpacked Rx queue context
* @rxq_index: the index of the Rx queue
*
- * Converts rxq context from sparse to dense structure and then writes
- * it to HW register space and enables the hardware to prefetch descriptors
- * instead of only fetching them on demand
+ * Pack the sparse Rx Queue context into dense hardware format and write it
+ * into the HW register space.
+ *
+ * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
- u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ ice_rxq_ctx_buf_t buf = {};
- if (!rlan_ctx)
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
- rlan_ctx->prefena = 1;
+ ice_pack_rxq_ctx(rlan_ctx, &buf);
+ ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
- ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
- return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+ return 0;
}
/* LAN Tx Queue Context */
-const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
/* Field Width LSB */
ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
@@ -1470,10 +1552,22 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
- ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
- { 0 }
};
+/**
+ * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
/* Sideband Queue command wrappers */
/**
@@ -2547,6 +2641,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
+ info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2569,6 +2664,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
info->ts_ll_int_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n",
+ info->ll_phy_tmr_update);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2709,40 +2806,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
- * ice_is_pf_c827 - check if pf contains c827 phy
- * @hw: pointer to the hw struct
- */
-bool ice_is_pf_c827(struct ice_hw *hw)
-{
- struct ice_aqc_get_link_topo cmd = {};
- u8 node_part_number;
- u16 node_handle;
- int status;
-
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
- if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
- return true;
-
- cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
- cmd.addr.topo_params.index = 0;
-
- status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
- &node_handle);
-
- if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
- return false;
-
- if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
- return true;
-
- return false;
-}
-
-/**
* ice_is_phy_rclk_in_netlist
* @hw: pointer to the hw struct
*
@@ -4096,6 +4159,57 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
}
/**
+ * ice_get_phy_lane_number - Get PHY lane number for current adapter
+ * @hw: pointer to the hw struct
+ *
+ * Return: PHY lane number on success, negative error code otherwise.
+ */
+int ice_get_phy_lane_number(struct ice_hw *hw)
+{
+ struct ice_aqc_get_port_options_elem *options;
+ unsigned int lport = 0;
+ unsigned int lane;
+ int err;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
+ u8 options_count = ICE_AQC_PORT_OPT_MAX;
+ u8 speed, active_idx, pending_idx;
+ bool active_valid, pending_valid;
+
+ err = ice_aq_get_port_options(hw, options, &options_count, lane,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (err)
+ goto err;
+
+ if (!active_valid)
+ continue;
+
+ speed = options[active_idx].max_lane_speed;
+ /* If we don't get speed for this lane, it's unoccupied */
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
+ continue;
+
+ if (hw->pf_id == lport) {
+ kfree(options);
+ return lane;
+ }
+
+ lport++;
+ }
+
+ /* PHY lane not found */
+ err = -ENXIO;
+err:
+ kfree(options);
+ return err;
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -4558,205 +4672,6 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
- * ice_pack_ctx_byte - write a byte to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u8 src_byte, dest_byte, mask;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- src_byte = *from;
- src_byte <<= shift_width;
- src_byte &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_byte, dest, sizeof(dest_byte));
-
- dest_byte &= ~mask; /* get the bits not changing */
- dest_byte |= src_byte; /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_byte, sizeof(dest_byte));
-}
-
-/**
- * ice_pack_ctx_word - write a word to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u16 src_word, mask;
- __le16 dest_word;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_word = *(u16 *)from;
- src_word <<= shift_width;
- src_word &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_word, dest, sizeof(dest_word));
-
- dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
- dest_word |= cpu_to_le16(src_word); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_word, sizeof(dest_word));
-}
-
-/**
- * ice_pack_ctx_dword - write a dword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u32 src_dword, mask;
- __le32 dest_dword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_dword = *(u32 *)from;
- src_dword <<= shift_width;
- src_dword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_dword, dest, sizeof(dest_dword));
-
- dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
- dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_dword, sizeof(dest_dword));
-}
-
-/**
- * ice_pack_ctx_qword - write a qword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u64 src_qword, mask;
- __le64 dest_qword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_qword = *(u64 *)from;
- src_qword <<= shift_width;
- src_qword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_qword, dest, sizeof(dest_qword));
-
- dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
- dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_qword, sizeof(dest_qword));
-}
-
-/**
- * ice_set_ctx - set context bits in packed structure
- * @hw: pointer to the hardware structure
- * @src_ctx: pointer to a generic non-packed context structure
- * @dest_ctx: pointer to memory for the packed structure
- * @ce_info: List of Rx context elements
- */
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- int f;
-
- for (f = 0; ce_info[f].width; f++) {
- /* We have to deal with each element of the FW response
- * using the correct size so that we are correct regardless
- * of the endianness of the machine.
- */
- if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
- ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
- f, ce_info[f].width, ce_info[f].size_of);
- continue;
- }
- switch (ce_info[f].size_of) {
- case sizeof(u8):
- ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u16):
- ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u32):
- ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u64):
- ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
* @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
@@ -6032,6 +5947,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
}
/**
+ * ice_is_fw_health_report_supported - checks if firmware supports health events
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if firmware supports health status reports,
+ * false otherwise
+ */
+bool ice_is_fw_health_report_supported(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ,
+ ICE_FW_API_HEALTH_REPORT_MIN,
+ ICE_FW_API_HEALTH_REPORT_PATCH);
+}
+
+/**
+ * ice_aq_set_health_status_cfg - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF. The supported event types are: PF-specific, all PFs, and global.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source)
+{
+ struct ice_aqc_set_health_status_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_health_status_cfg;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg);
+
+ cmd->event_source = event_source;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 27208a60cece..15ba38543738 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -92,9 +92,8 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
-extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf);
extern struct mutex ice_global_cfg_lock_sw;
@@ -113,7 +112,6 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-bool ice_is_pf_c827(struct ice_hw *hw);
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
bool ice_is_cgu_in_netlist(struct ice_hw *hw);
@@ -143,6 +141,8 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+bool ice_is_fw_health_report_supported(struct ice_hw *hw);
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source);
int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
u8 serdes_num, int *output);
int
@@ -193,6 +193,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
+int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 38e151c7ea23..8d806d8ad761 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -2053,7 +2053,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct kthread_worker *kworker;
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
- kworker = kthread_create_worker(0, "ice-dplls-%s",
+ kworker = kthread_run_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index ac7db100e2cd..5c7dcf21b222 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,7 +5,7 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#ifdef CONFIG_ICE_SWITCHDEV
void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 2702a0da5c3e..70c201f569ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -6,6 +6,7 @@
#include <linux/crc32.h>
#include <linux/pldmfw.h>
#include "ice.h"
+#include "ice_lib.h"
#include "ice_fw_update.h"
struct ice_fwu_priv {
@@ -125,6 +126,10 @@ ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code,
case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
dev_info(dev, "firmware has rejected updating %s\n", component);
break;
+ case ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (ice_is_recovery_mode(&pf->hw))
+ return 0;
+ break;
}
switch (code) {
@@ -1004,13 +1009,20 @@ int ice_devlink_flash_update(struct devlink *devlink,
return -EOPNOTSUPP;
}
- if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ if (!hw->dev_caps.common_cap.nvm_unified_update && !ice_is_recovery_mode(hw)) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
memset(&priv, 0, sizeof(priv));
+ if (params->component && strcmp(params->component, "fw.mgmt") == 0) {
+ priv.context.mode = PLDMFW_UPDATE_MODE_SINGLE_COMPONENT;
+ priv.context.component_identifier = NVM_COMP_ID_NVM;
+ } else if (params->component) {
+ return -EOPNOTSUPP;
+ }
+
/* the E822 device needs a slightly different ops */
if (hw->mac_type == ICE_MAC_GENERIC)
priv.context.ops = &ice_fwu_ops_e822;
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index f02e8ca55375..b2148dbe49b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
return NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 611577ebc29d..1479b45738af 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -371,29 +371,21 @@ enum ice_rx_flex_desc_status_error_1_bits {
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
-#define ICE_RXQ_CTX_SIZE_DWORDS 8
-#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
-/* RLAN Rx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* RLAN Rx queue context data */
struct ice_rlan_ctx {
u16 head;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
- u16 dbuf; /* bigger than needed, see above for reason */
+ u8 dbuf;
#define ICE_RLAN_CTX_HBUF_S 6
- u16 hbuf; /* bigger than needed, see above for reason */
+ u8 hbuf;
u8 dtype;
u8 dsize;
u8 crcstrip;
@@ -401,29 +393,15 @@ struct ice_rlan_ctx {
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
- u32 rxmax; /* bigger than needed, see above for reason */
+ u16 rxmax;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
- u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 lrxqthresh;
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
-struct ice_ctx_ele {
- u16 offset;
- u16 size_of;
- u16 width;
- u16 lsb;
-};
-
-#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
- .offset = offsetof(struct _struct, _ele), \
- .size_of = sizeof_field(struct _struct, _ele), \
- .width = _width, \
- .lsb = _lsb, \
-}
-
/* for hsplit_0 field of Rx RLAN context */
enum ice_rlan_ctx_rx_hsplit_0 {
ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
@@ -551,18 +529,12 @@ enum ice_tx_ctx_desc_eipt_offload {
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
-/* Tx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* Tx queue context data */
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
- u16 cgd_num; /* bigger than needed, see above for reason */
+ u8 cgd_num;
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
@@ -573,7 +545,7 @@ struct ice_tlan_ctx {
u8 tsyn_ena;
u8 internal_usage_flag;
u8 alt_vlan;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
@@ -582,7 +554,7 @@ struct ice_tlan_ctx {
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
- u32 qlen; /* bigger than needed, see above for reason */
+ u16 qlen;
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
@@ -590,7 +562,6 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a7d45a8ce7ac..38a1c8372180 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1700,6 +1700,12 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf)
return true;
}
+#define ICE_FW_MODE_REC_M BIT(1)
+bool ice_is_recovery_mode(struct ice_hw *hw)
+{
+ return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M;
+}
+
/**
* ice_update_eth_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 10d6fc479a32..eabb35834a24 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -90,6 +90,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
+bool ice_is_recovery_mode(struct ice_hw *hw);
bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0ab35607e5d5..c3a0fb97c5ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -14,7 +14,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
@@ -1144,7 +1144,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
- ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+ ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -1567,6 +1567,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
+ case ice_aqc_opc_get_health_status:
+ ice_process_health_status_event(pf, &event);
+ break;
default:
dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
@@ -1816,6 +1819,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
}
@@ -1829,6 +1834,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
}
@@ -1842,6 +1849,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
+ queue);
wr32(hw, GL_MDET_RX, 0xffffffff);
}
@@ -2355,6 +2364,18 @@ static void ice_check_media_subtask(struct ice_pf *pf)
}
}
+static void ice_service_task_recovery_mode(struct work_struct *work)
+{
+ struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
+
+ set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
+ ice_clean_adminq_subtask(pf);
+
+ ice_service_task_complete(pf);
+
+ mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
@@ -2364,9 +2385,11 @@ static void ice_service_task(struct work_struct *work)
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
unsigned long start_time = jiffies;
- /* subtasks */
+ if (pf->health_reporters.tx_hang_buf.tx_ring) {
+ ice_report_tx_hang(pf);
+ pf->health_reporters.tx_hang_buf.tx_ring = NULL;
+ }
- /* process reset requests first */
ice_reset_subtask(pf);
/* bail if a reset/recovery cycle is pending or rebuild failed */
@@ -4741,55 +4764,12 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-/**
- * ice_wait_for_fw - wait for full FW readiness
- * @hw: pointer to the hardware structure
- * @timeout: milliseconds that can elapse before timing out
- */
-static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
-{
- int fw_loading;
- u32 elapsed = 0;
-
- while (elapsed <= timeout) {
- fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
-
- /* firmware was not yet loaded, we have to wait more */
- if (fw_loading) {
- elapsed += 100;
- msleep(100);
- continue;
- }
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int err;
- err = ice_init_hw(hw);
- if (err) {
- dev_err(dev, "ice_init_hw failed: %d\n", err);
- return err;
- }
-
- /* Some cards require longer initialization times
- * due to necessity of loading FW from an external source.
- * This can take even half a minute.
- */
- if (ice_is_pf_c827(hw)) {
- err = ice_wait_for_fw(hw, 30000);
- if (err) {
- dev_err(dev, "ice_wait_for_fw timed out");
- return err;
- }
- }
-
ice_init_feature_support(pf);
err = ice_init_ddp_config(hw, pf);
@@ -4810,7 +4790,7 @@ int ice_init_dev(struct ice_pf *pf)
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf;
+ return err;
}
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
@@ -4834,7 +4814,7 @@ int ice_init_dev(struct ice_pf *pf)
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_scheme;
+ goto unroll_pf_init;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4845,17 +4825,15 @@ int ice_init_dev(struct ice_pf *pf)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_req_irq_msix_misc;
+ goto unroll_irq_scheme_init;
}
return 0;
-err_req_irq_msix_misc:
+unroll_irq_scheme_init:
ice_clear_interrupt_scheme(pf);
-err_init_interrupt_scheme:
+unroll_pf_init:
ice_deinit_pf(pf);
-err_init_pf:
- ice_deinit_hw(hw);
return err;
}
@@ -5087,6 +5065,7 @@ static int ice_init_devlink(struct ice_pf *pf)
return err;
ice_devlink_init_regions(pf);
+ ice_health_init(pf);
ice_devlink_register(pf);
return 0;
@@ -5095,6 +5074,7 @@ static int ice_init_devlink(struct ice_pf *pf)
static void ice_deinit_devlink(struct ice_pf *pf)
{
ice_devlink_unregister(pf);
+ ice_health_deinit(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
}
@@ -5249,6 +5229,36 @@ void ice_unload(struct ice_pf *pf)
ice_decfg_netdev(vsi);
}
+static int ice_probe_recovery_mode(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n");
+
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
+ err = ice_create_all_ctrlq(&pf->hw);
+ if (err)
+ return err;
+
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ err = ice_init_devlink(pf);
+ if (err)
+ return err;
+ }
+
+ ice_service_task_restart(pf);
+
+ return 0;
+}
+
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
@@ -5312,13 +5322,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
pci_set_master(pdev);
-
- adapter = ice_adapter_get(pdev);
- if (IS_ERR(adapter))
- return PTR_ERR(adapter);
-
pf->pdev = pdev;
- pf->adapter = adapter;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
@@ -5346,29 +5350,47 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->debug_mask = debug;
#endif
+ if (ice_is_recovery_mode(hw))
+ return ice_probe_recovery_mode(pf);
+
+ err = ice_init_hw(hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto unroll_hw_init;
+ }
+ pf->adapter = adapter;
+
err = ice_init(pf);
if (err)
- goto err_init;
+ goto unroll_adapter;
devl_lock(priv_to_devlink(pf));
err = ice_load(pf);
if (err)
- goto err_load;
+ goto unroll_init;
err = ice_init_devlink(pf);
if (err)
- goto err_init_devlink;
+ goto unroll_load;
devl_unlock(priv_to_devlink(pf));
return 0;
-err_init_devlink:
+unroll_load:
ice_unload(pf);
-err_load:
+unroll_init:
devl_unlock(priv_to_devlink(pf));
ice_deinit(pf);
-err_init:
+unroll_adapter:
ice_adapter_put(pdev);
+unroll_hw_init:
+ ice_deinit_hw(hw);
return err;
}
@@ -5448,6 +5470,14 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (ice_is_recovery_mode(&pf->hw)) {
+ ice_service_task_stop(pf);
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ ice_deinit_devlink(pf);
+ }
+ return;
+ }
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -6790,7 +6820,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@@ -7260,7 +7290,7 @@ int ice_down(struct ice_vsi *vsi)
if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
- ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
+ ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}
@@ -7793,6 +7823,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
+ ice_health_clear(pf);
+
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
@@ -8283,16 +8315,18 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
struct ice_hw *hw = &pf->hw;
- u32 head, val = 0;
+ u32 head, intr = 0;
head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
- val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use, val);
+ head, tx_ring->next_to_use, intr);
+
+ ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
}
pf->tx_timeout_last_recovery = jiffies;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a999fface272..e26320ce52ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -16,28 +16,28 @@ static const char ice_pin_names[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
- /* name, gpio */
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { TIME_SYNC, { 4, -1 }, { 0, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 11 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 15, 14 }},
+ { SDP1, { 1, 1 }, { 15, 14 }},
+ { SDP2, { 2, 2 }, { 15, 14 }},
+ { SDP3, { 3, 3 }, { 15, 14 }},
+ { TIME_SYNC, { 4, -1 }, { 11, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 9 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 0, 1 }},
+ { SDP1, { 1, 1 }, { 0, 1 }},
+ { SDP2, { 2, 2 }, { 0, 1 }},
+ { SDP3, { 3, 3 }, { 0, 1 }},
+ { ONE_PPS, { -1, 5 }, { 0, 1 }},
};
static const char ice_pin_names_nvm[][64] = {
@@ -49,12 +49,12 @@ static const char ice_pin_names_nvm[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
- /* name, gpio */
- { GNSS, { 1, -1 }},
- { SMA1, { 1, 0 }},
- { UFL1, { -1, 0 }},
- { SMA2, { 3, 2 }},
- { UFL2, { 3, -1 }},
+ /* name, gpio, delay */
+ { GNSS, { 1, -1 }, { 0, 0 }},
+ { SMA1, { 1, 0 }, { 0, 1 }},
+ { UFL1, { -1, 0 }, { 0, 1 }},
+ { SMA2, { 3, 2 }, { 0, 1 }},
+ { UFL2, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
@@ -464,7 +464,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
*/
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
{
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct sk_buff *skb;
struct ice_pf *pf;
@@ -473,6 +475,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ params = &pf->hw.ptp.phy.e810;
/* Drop packets which have waited for more than 2 seconds */
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
@@ -489,11 +492,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
/* Write TS index to read to the PF register so the FW can read it */
- wr32(&pf->hw, PF_SB_ATQBAL,
- TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
- TS_LL_READ_TS);
+ wr32(&pf->hw, REG_LL_PROXY_H,
+ REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
+ REG_LL_PROXY_H_EXEC);
tx->last_ll_ts_idx_read = idx;
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
}
/**
@@ -504,35 +513,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
{
struct skb_shared_hwtstamps shhwtstamps = {};
u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
u64 raw_tstamp, tstamp;
bool drop_ts = false;
struct sk_buff *skb;
+ unsigned long flags;
+ struct device *dev;
struct ice_pf *pf;
- u32 val;
+ u32 reg_ll_high;
if (!tx->init || tx->last_ll_ts_idx_read < 0)
return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ dev = ice_pf_to_dev(pf);
+ params = &pf->hw.ptp.phy.e810;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
- val = rd32(&pf->hw, PF_SB_ATQBAL);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
+ dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
+ __func__);
+
+ /* Read the low 32 bit value */
+ raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
+ /* Read the status together with high TS part */
+ reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
+
+ /* Wake up threads waiting on low latency interface */
+ params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
+ wake_up_locked(&params->atqbal_wq);
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
/* When the bit is cleared, the TS is ready in the register */
- if (val & TS_LL_READ_TS) {
+ if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
return;
}
/* High 8 bit value of the TS is on the bits 16:23 */
- raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
- raw_tstamp <<= 32;
-
- /* Read the low 32 bit value */
- raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+ raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
/* Devices using this interface always verify the timestamp differs
* relative to the last cached timestamp value.
@@ -1388,10 +1414,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
- * @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@@ -1399,14 +1424,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
- return;
-
ptp_port = &pf->ptp.port;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- port *= 2;
- if (WARN_ON_ONCE(ptp_port->port_num != port))
- return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
@@ -1566,18 +1584,29 @@ void ice_ptp_extts_event(struct ice_pf *pf)
* Event is defined in GLTSYN_EVNT_0 register
*/
for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+ int pin_desc_idx;
+
/* Check if channel is enabled */
- if (pf->ptp.ext_ts_irq & (1 << chan)) {
- lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
- hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
- event.timestamp = (((u64)hi) << 32) | lo;
- event.type = PTP_CLOCK_EXTTS;
- event.index = chan;
-
- /* Fire event */
- ptp_clock_event(pf->ptp.clock, &event);
- pf->ptp.ext_ts_irq &= ~(1 << chan);
+ if (!(pf->ptp.ext_ts_irq & (1 << chan)))
+ continue;
+
+ lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+ hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+ event.timestamp = (u64)hi << 32 | lo;
+
+ /* Add delay compensation */
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
+ if (pin_desc_idx >= 0) {
+ const struct ice_ptp_pin_desc *desc;
+
+ desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
+ event.timestamp -= desc->delay[0];
}
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = chan;
+ pf->ptp.ext_ts_irq &= ~(1 << chan);
+ ptp_clock_event(pf->ptp.clock, &event);
}
}
@@ -1772,9 +1801,9 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
int on)
{
+ unsigned int gpio_pin, prop_delay_ns;
u64 clk, period, start, phase;
struct ice_hw *hw = &pf->hw;
- unsigned int gpio_pin;
int pin_desc_idx;
if (rq->flags & ~PTP_PEROUT_PHASE)
@@ -1785,6 +1814,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return -EIO;
gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
+ prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
/* If we're disabling the output or period is 0, clear out CLKO and TGT
@@ -1816,11 +1846,11 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
* at the next multiple of period, maintaining phase.
*/
clk = ice_ptp_read_src_clk_reg(pf, NULL);
- if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw))
+ if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
/* Compensate for propagation delay from the generator to the pin. */
- start -= ice_prop_delay(hw);
+ start -= prop_delay_ns;
return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
@@ -3080,7 +3110,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
/* Allocate a kworker for handling work required for the ports
* connected to the PTP hardware clock.
*/
- kworker = kthread_create_worker(0, "ice-ptp-%s",
+ kworker = kthread_run_worker(0, "ice-ptp-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -3164,10 +3194,17 @@ void ice_ptp_init(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
- int err;
+ int lane_num, err;
ptp->state = ICE_PTP_INITIALIZING;
+ lane_num = ice_get_phy_lane_number(hw);
+ if (lane_num < 0) {
+ err = lane_num;
+ goto err_exit;
+ }
+
+ ptp->port.port_num = (u8)lane_num;
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3188,10 +3225,6 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err_exit;
- ptp->port.port_num = hw->pf_id;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- ptp->port.port_num = hw->pf_id * 2;
-
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err_exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 824e73b677a4..a1d0e988c084 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -211,6 +211,7 @@ enum ice_ptp_pin_nvm {
* struct ice_ptp_pin_desc - hardware pin description data
* @name_idx: index of the name of pin in ice_pin_names
* @gpio: the associated GPIO input and output pins
+ * @delay: input and output signal delays in nanoseconds
*
* Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array
* for the device. Device families have separate sets of available pins with
@@ -219,6 +220,7 @@ enum ice_ptp_pin_nvm {
struct ice_ptp_pin_desc {
int name_idx;
int gpio[2];
+ unsigned int delay[2];
};
/**
@@ -310,7 +312,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -358,7 +360,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index d75f0eddd631..ac46d1183300 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -131,7 +131,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
.no_fec = 0xffffcccd, /* -25.6 */
- .fc = 0xfffe0014, /* -255.96 */
+ .fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
}
@@ -341,8 +341,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
823437500, /* 823.4375 MHz PLL */
/* nominal_incval */
0x136e44fabULL,
- /* pps_delay */
- 11,
},
/* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
@@ -351,8 +349,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
@@ -361,8 +357,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
796875000, /* 796.875 MHz */
/* nominal_incval */
0x141414141ULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
@@ -371,8 +365,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
816000000, /* 816 MHz */
/* nominal_incval */
0x139b9b9baULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
@@ -381,8 +373,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
830078125, /* 830.78125 MHz */
/* nominal_incval */
0x134679aceULL,
- /* pps_delay */
- 11,
},
/* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
@@ -391,8 +381,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 518893f23372..ec91822e9280 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -391,7 +391,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw24.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -469,7 +469,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw24.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -546,7 +546,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw23.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -651,7 +651,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw23.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -901,30 +901,45 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
*/
/**
+ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ *
+ * Return: destination sideband queue PHY device.
+ */
+static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
+{
+ /* On a single complex E825, PHY 0 is always destination device phy_0
+ * and PHY 1 is phy_0_peer.
+ */
+ if (port >= hw->ptp.ports_per_phy)
+ return eth56g_phy_1;
+ else
+ return eth56g_phy_0;
+}
+
+/**
* ice_write_phy_eth56g - Write a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to write to PHY
*/
-static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 val)
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_wr;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = val;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
-
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -935,41 +950,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
/**
* ice_read_phy_eth56g - Read a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to read from PHY
*/
-static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 *val)
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr)
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_rd;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = 0;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
- if (err) {
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
- return err;
- }
-
- *val = phy_msg.data;
+ else
+ *val = msg.data;
- return 0;
+ return err;
}
/**
* ice_phy_res_address_eth56g - Calculate a PHY port register address
- * @port: Port number to be written
+ * @hw: pointer to the HW struct
+ * @lane: Lane number to be written
* @res_type: resource type (register/memory)
* @offset: Offset from PHY port register base
* @addr: The result address
@@ -978,17 +988,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
* * %0 - success
* * %EINVAL - invalid port number or resource type
*/
-static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
- u32 offset, u32 *addr)
+static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
+ enum eth56g_res_type res_type,
+ u32 offset,
+ u32 *addr)
{
- u8 lane = port % ICE_PORTS_PER_QUAD;
- u8 phy = ICE_GET_QUAD_NUM(port);
-
if (res_type >= NUM_ETH56G_PHY_RES)
return -EINVAL;
- *addr = eth56g_phy_res[res_type].base[phy] +
+ /* Lanes 4..7 are in fact 0..3 on a second PHY */
+ lane %= hw->ptp.ports_per_phy;
+ *addr = eth56g_phy_res[res_type].base[0] +
lane * eth56g_phy_res[res_type].step + offset;
+
return 0;
}
@@ -1008,19 +1020,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_write_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1039,19 +1049,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 *val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_read_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1201,6 +1209,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
}
/**
+ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to read
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 *val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
* ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
@@ -1919,7 +1977,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li)
*/
static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 val;
int err;
@@ -1934,8 +1991,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
case ICE_ETH56G_LNK_SPD_1G:
case ICE_ETH56G_LNK_SPD_2_5G:
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, &val);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1946,8 +2003,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
ICE_ETH56G_NOMINAL_TX_THRESH);
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1988,50 +2045,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
*/
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
- u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr, val, peer_delay;
bool enable, sfd_ena;
- u32 val, peer_delay;
int err;
enable = hw->ptp.phy.eth56g.onestep_ena;
peer_delay = hw->ptp.phy.eth56g.peer_delay;
sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
- /* PHY_PTP_1STEP_CONFIG */
- err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ addr = PHY_PTP_1STEP_CONFIG;
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
if (enable)
- val |= blk_port;
+ val |= BIT(quad_lane);
else
- val &= ~blk_port;
+ val &= ~BIT(quad_lane);
val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
- err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_PTP_1STEP_PEER_DELAY */
+ addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
if (peer_delay)
val |= PHY_PTP_1STEP_PD_ADD_PD_M;
val |= PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_MAC_XIF_MODE */
- err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ addr = PHY_MAC_XIF_MODE;
+ err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
@@ -2051,7 +2105,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
- return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+ return ice_write_mac_reg_eth56g(hw, port, addr, val);
}
/**
@@ -2093,21 +2147,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
bool fc, bool rs,
enum ice_eth56g_link_spd spd)
{
- u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 bitslip;
int err;
if (!bs || rs)
return 0;
- if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
&bitslip);
- else
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_REG_SD_BIT_SLIP(port_offset),
- &bitslip);
+ } else {
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr;
+
+ addr = PHY_REG_SD_BIT_SLIP(quad_lane);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
+ }
if (err)
return 0;
@@ -2667,59 +2722,29 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
}
/**
- * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
- * @hw: pointer to the HW struct
- *
- * Return: true if it's 2x50 breakout topology, false otherwise
- */
-static bool ice_is_muxed_topo(struct ice_hw *hw)
-{
- u8 link_topo;
- bool mux;
- u32 val;
-
- val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
- mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
- val = rd32(hw, GLGEN_MAC_LINK_TOPO);
- link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
-
- return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
-}
-
-/**
- * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * ice_ptp_init_phy_e825 - initialize PHY parameters
* @hw: pointer to the HW struct
*/
-static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u8 phy;
+ u32 phy_rev;
+ int err;
ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
params->sfd_ena = false;
- params->phy_addr[0] = eth56g_phy_0;
- params->phy_addr[1] = eth56g_phy_1;
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
ice_sb_access_ena_eth56g(hw, true);
- for (phy = 0; phy < params->num_phys; phy++) {
- u32 phy_rev;
- int err;
-
- err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G) {
- ptp->phy_model = ICE_PHY_UNSUP;
- return;
- }
- }
-
- ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
+ err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
+ if (err || phy_rev != PHY_REVISION_ETH56G)
+ ptp->phy_model = ICE_PHY_UNSUP;
}
/* E822 family functions
@@ -2738,10 +2763,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
struct ice_sbq_msg_input *msg, u8 port,
u16 offset)
{
- int phy_port, phy, quadtype;
+ int phy_port, quadtype;
phy_port = port % hw->ptp.ports_per_phy;
- phy = port / hw->ptp.ports_per_phy;
quadtype = ICE_GET_QUAD_NUM(port) %
ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
@@ -2753,12 +2777,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- if (phy == 0)
- msg->dest_dev = rmn_0;
- else if (phy == 1)
- msg->dest_dev = rmn_1;
- else
- msg->dest_dev = rmn_2;
+ msg->dest_dev = rmn_0;
}
/**
@@ -4857,33 +4876,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
static int
ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
{
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ unsigned long flags;
u32 val;
- u8 i;
+ int err;
+
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
/* Write TS index to read to the PF register so the FW can read it */
- val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
- wr32(hw, PF_SB_ATQBAL, val);
+ val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
/* Read the register repeatedly until the FW provides us the TS */
- for (i = TS_LL_READ_RETRIES; i > 0; i--) {
- val = rd32(hw, PF_SB_ATQBAL);
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10,
+ REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
- /* When the bit is cleared, the TS is ready in the register */
- if (!(FIELD_GET(TS_LL_READ_TS, val))) {
- /* High 8 bit value of the TS is on the bits 16:23 */
- *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val);
- /* Read the low 32 bit value and set the TS valid bit */
- *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
- return 0;
- }
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID;
- udelay(10);
- }
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
- /* FW failed to provide the TS in time */
- ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
- return -EINVAL;
+ return 0;
}
/**
@@ -5066,6 +5098,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
}
/**
+ * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ *
+ * Use the low latency firmware interface to program PHY time adjustment to
+ * all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, adj);
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment value to program
@@ -5083,6 +5164,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_adj_ll_e810(hw, adj);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
@@ -5106,6 +5190,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
}
/**
+ * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Use the low latency firmware interface to program PHY time increment value
+ * for all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval));
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) |
+ FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
* @hw: pointer to HW struct
* @incval: The new 40bit increment value to prepare
@@ -5120,6 +5254,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_incval_ll_e810(hw, incval);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
@@ -5404,6 +5541,8 @@ static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
ptp->phy_model = ICE_PHY_E810;
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
+
+ init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
}
/* Device agnostic functions
@@ -5478,7 +5617,7 @@ void ice_ptp_init_hw(struct ice_hw *hw)
else if (ice_is_e810(hw))
ice_ptp_init_phy_e810(ptp);
else if (ice_is_e825c(hw))
- ice_ptp_init_phy_e825c(hw);
+ ice_ptp_init_phy_e825(hw);
else
ptp->phy_model = ICE_PHY_UNSUP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1cee0f1bba2d..6779ce120515 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g {
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
- * @pps_delay: propagation delay of the PPS output signal
*
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
@@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g {
struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
- u8 pps_delay;
};
/**
@@ -326,8 +324,6 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
-#define ICE_E810_OUT_PROP_DELAY_NS 1
-#define ICE_E825C_OUT_PROP_DELAY_NS 11
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -389,11 +385,6 @@ static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
return e82x_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
-{
- return e82x_time_ref[time_ref].pps_delay;
-}
-
/* E822 Vernier calibration functions */
int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
@@ -432,20 +423,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
-static inline u64 ice_prop_delay(const struct ice_hw *hw)
-{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_E825C_OUT_PROP_DELAY_NS;
- case ICE_PHY_E810:
- return ICE_E810_OUT_PROP_DELAY_NS;
- case ICE_PHY_E82X:
- return ice_e82x_pps_delay(ice_e82x_time_ref(hw));
- default:
- return 0;
- }
-}
-
/**
* ice_get_base_incval - Get base clock increment value
* @hw: pointer to the HW struct
@@ -689,11 +666,18 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define BYTES_PER_IDX_ADDR_L 4
/* Tx timestamp low latency read definitions */
-#define TS_LL_READ_RETRIES 200
-#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
-#define TS_LL_READ_TS_IDX GENMASK(29, 24)
-#define TS_LL_READ_TS_INTR BIT(30)
-#define TS_LL_READ_TS BIT(31)
+#define REG_LL_PROXY_H_TIMEOUT_US 2000
+#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6)
+#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1
+#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2
+#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16)
+#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24)
+#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24)
+#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30)
+#define REG_LL_PROXY_H_EXEC BIT(31)
+
+#define REG_LL_PROXY_L PF_SB_ATQBAH
+#define REG_LL_PROXY_H PF_SB_ATQBAL
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 970a99a52bf1..fb7a1b9a4313 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,7 +4,7 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
index 75d7147e1c01..1a2c94375ca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -5,8 +5,8 @@
#include "ice_txrx.h"
#include "ice_fltr.h"
#include "ice_sf_eth.h"
-#include "devlink/devlink_port.h"
#include "devlink/devlink.h"
+#include "devlink/port.h"
static const struct net_device_ops ice_sf_netdev_ops = {
.ndo_open = ice_open,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 0e740342e294..4a91e0aaf0a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4784,7 +4784,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
*/
if (found && recp[i].tun_type == rinfo->tun_type &&
recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
- recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2 &&
+ recp[i].priority == rinfo->priority)
return i; /* Return the recipe ID */
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index adb168860711..33a1a5934c0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -18,6 +18,7 @@
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
#include "ice_fwlog.h"
+#include <linux/wait.h>
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -368,6 +369,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
+#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30)
struct ice_ts_dev_info {
/* Device specific info */
@@ -382,6 +384,7 @@ struct ice_ts_dev_info {
u8 tmr1_ena;
u8 ts_ll_read;
u8 ts_ll_int_read;
+ u8 ll_phy_tmr_update;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
@@ -848,15 +851,23 @@ struct ice_mbx_data {
#define ICE_PORTS_PER_QUAD 4
#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0)
+
+struct ice_e810_params {
+ /* The wait queue lock also protects the low latency interface */
+ wait_queue_head_t atqbal_wq;
+ unsigned int atqbal_flags;
+};
+
struct ice_eth56g_params {
u8 num_phys;
- u8 phy_addr[2];
bool onestep_ena;
bool sfd_ena;
u32 peer_delay;
};
union ice_phy_params {
+ struct ice_e810_params e810;
struct ice_eth56g_params eth56g;
};
@@ -881,7 +892,6 @@ struct ice_ptp_hw {
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
- bool is_2x50g_muxed_topo;
};
/* Port hardware description */
@@ -1216,4 +1226,9 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* AQ API version for Health Status support */
+#define ICE_FW_API_HEALTH_REPORT_MAJ 1
+#define ICE_FW_API_HEALTH_REPORT_MIN 7
+#define ICE_FW_API_HEALTH_REPORT_PATCH 6
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 334ae945d640..8975d2971bc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -801,35 +801,6 @@ out_failure:
return result;
}
-static int
-ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
@@ -895,7 +866,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
if (!first) {
first = xdp;
- } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+ } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
+ xsk_buff_free(first);
break;
}
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 463c0d26b9d4..6c1b702fd992 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-y := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o igb_xsk.o
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 3c2dc7bdebb5..02f340280d20 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -18,8 +18,10 @@
#include <linux/i2c-algo-bit.h>
#include <linux/pci.h>
#include <linux/mdio.h>
+#include <linux/lockdep.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
struct igb_adapter;
@@ -86,6 +88,7 @@ struct igb_adapter;
#define IGB_XDP_CONSUMED BIT(0)
#define IGB_XDP_TX BIT(1)
#define IGB_XDP_REDIR BIT(2)
+#define IGB_XDP_EXIT BIT(3)
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -255,6 +258,7 @@ enum igb_tx_flags {
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
+ IGB_TYPE_XSK
};
/* wrapper around a pointer to a socket buffer,
@@ -320,6 +324,7 @@ struct igb_ring {
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
+ struct xdp_buff **rx_buffer_info_zc;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
@@ -357,6 +362,7 @@ struct igb_ring {
};
};
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -384,7 +390,8 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
- IGB_RING_FLAG_TX_DETECT_HANG
+ IGB_RING_FLAG_TX_DETECT_HANG,
+ IGB_RING_FLAG_TX_DISABLED
};
#define ring_uses_large_buffer(ring) \
@@ -731,12 +738,21 @@ int igb_setup_tx_resources(struct igb_ring *);
int igb_setup_rx_resources(struct igb_ring *);
void igb_free_tx_resources(struct igb_ring *);
void igb_free_rx_resources(struct igb_ring *);
+void igb_clean_tx_ring(struct igb_ring *tx_ring);
+void igb_clean_rx_ring(struct igb_ring *rx_ring);
void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status);
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp);
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *);
bool igb_has_link(struct igb_adapter *adapter);
@@ -797,6 +813,33 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
+/* This function assumes __netif_tx_lock is held by the caller. */
+static inline void igb_xdp_ring_update_tail(struct igb_ring *ring)
+{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
+{
+ unsigned int r_idx = smp_processor_id();
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter)
+{
+ return !!READ_ONCE(adapter->xdp_prog);
+}
+
int igb_add_filter(struct igb_adapter *adapter,
struct igb_nfc_filter *input);
int igb_erase_filter(struct igb_adapter *adapter,
@@ -807,4 +850,17 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter,
int igb_del_mac_steering_filter(struct igb_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring);
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid);
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring);
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget);
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool);
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 288a4bb2683a..d368b753a467 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -33,7 +33,6 @@
#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
-#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -116,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_clean_all_rx_rings(struct igb_adapter *);
-static void igb_clean_tx_ring(struct igb_ring *);
-static void igb_clean_rx_ring(struct igb_ring *);
static void igb_set_rx_mode(struct net_device *);
static void igb_update_phy_info(struct timer_list *);
static void igb_watchdog(struct timer_list *);
@@ -475,12 +472,17 @@ rx_ring_summary:
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
- struct igb_rx_buffer *buffer_info;
- buffer_info = &rx_ring->rx_buffer_info[i];
+ dma_addr_t dma = (dma_addr_t)0;
+ struct igb_rx_buffer *buffer_info = NULL;
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (!rx_ring->xsk_pool) {
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ dma = buffer_info->dma;
+ }
+
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
@@ -500,11 +502,11 @@ rx_ring_summary:
"R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
+ (u64)dma,
next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->page) {
+ buffer_info && dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
@@ -1990,7 +1992,11 @@ static void igb_configure(struct igb_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+ if (ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(ring, ring->xsk_pool,
+ igb_desc_unused(ring));
+ else
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
@@ -2911,37 +2917,20 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return igb_xdp_setup(dev, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return igb_xsk_pool_setup(adapter, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return -EINVAL;
}
}
-/* This function assumes __netif_tx_lock is held by the caller. */
-static void igb_xdp_ring_update_tail(struct igb_ring *ring)
-{
- lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
-
- /* Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
-}
-
-static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
-{
- unsigned int r_idx = smp_processor_id();
-
- if (r_idx >= adapter->num_tx_queues)
- r_idx = r_idx % adapter->num_tx_queues;
-
- return adapter->tx_ring[r_idx];
-}
-
-static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
@@ -2955,7 +2944,8 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return IGB_XDP_CONSUMED;
@@ -2988,10 +2978,14 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return -ENXIO;
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return -ENXIO;
+
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
@@ -3042,6 +3036,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_setup_tc = igb_setup_tc,
.ndo_bpf = igb_xdp,
.ndo_xdp_xmit = igb_xdp_xmit,
+ .ndo_xsk_wakeup = igb_xsk_wakeup,
};
/**
@@ -3338,7 +3333,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -4364,6 +4360,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
@@ -4424,7 +4422,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, 0);
+ rx_ring->queue_index,
+ rx_ring->q_vector->napi.napi_id);
if (res < 0) {
dev_err(dev, "Failed to register xdp_rxq index %u\n",
rx_ring->queue_index);
@@ -4720,12 +4719,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
struct e1000_hw *hw = &adapter->hw;
int reg_idx = ring->reg_idx;
u32 srrctl = 0;
+ u32 buf_size;
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring->xsk_pool)
+ buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ else if (ring_uses_large_buffer(ring))
+ buf_size = IGB_RXBUFFER_3072;
else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ buf_size = IGB_RXBUFFER_2048;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -4757,8 +4761,17 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
u32 rxdctl = 0;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL));
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+ if (ring->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL));
+ }
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4785,9 +4798,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
- /* initialize rx_buffer_info */
- memset(ring->rx_buffer_info, 0,
- sizeof(struct igb_rx_buffer) * ring->count);
+ if (ring->xsk_pool)
+ memset(ring->rx_buffer_info_zc, 0,
+ sizeof(*ring->rx_buffer_info_zc) * ring->count);
+ else
+ memset(ring->rx_buffer_info, 0,
+ sizeof(*ring->rx_buffer_info) * ring->count);
/* initialize Rx descriptor 0 */
rx_desc = IGB_RX_DESC(ring, 0);
@@ -4888,19 +4904,24 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
* igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
-static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ u32 xsk_frames = 0;
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs or xdp frames */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
dev_kfree_skb_any(tx_buffer->skb);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -4931,6 +4952,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+skip_for_xsk:
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
@@ -4945,6 +4967,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ if (tx_ring->xsk_pool && xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -4975,8 +5000,13 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = NULL;
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
+ if (rx_ring->xsk_pool) {
+ vfree(rx_ring->rx_buffer_info_zc);
+ rx_ring->rx_buffer_info_zc = NULL;
+ } else {
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ }
/* if not set, then don't free */
if (!rx_ring->desc)
@@ -5007,13 +5037,18 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
* igb_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
**/
-static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
+ if (rx_ring->xsk_pool) {
+ igb_clean_rx_ring_zc(rx_ring);
+ goto skip_for_xsk;
+ }
+
/* Free all the Rx ring sk_buffs */
while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
@@ -5041,6 +5076,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
i = 0;
}
+skip_for_xsk:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -6467,6 +6503,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return NETDEV_TX_BUSY;
+
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGB_TYPE_SKB;
@@ -6622,7 +6661,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
- if (adapter->xdp_prog) {
+ if (igb_xdp_is_enabled(adapter)) {
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -8195,6 +8234,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
struct igb_q_vector *q_vector = container_of(napi,
struct igb_q_vector,
napi);
+ struct xsk_buff_pool *xsk_pool;
bool clean_complete = true;
int work_done = 0;
@@ -8206,7 +8246,12 @@ static int igb_poll(struct napi_struct *napi, int budget)
clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
- int cleaned = igb_clean_rx_irq(q_vector, budget);
+ int cleaned;
+
+ xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool);
+ cleaned = xsk_pool ?
+ igb_clean_rx_irq_zc(q_vector, xsk_pool, budget) :
+ igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
if (cleaned >= budget)
@@ -8235,13 +8280,18 @@ static int igb_poll(struct napi_struct *napi, int budget)
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
- struct igb_adapter *adapter = q_vector->adapter;
- struct igb_ring *tx_ring = q_vector->tx.ring;
- struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
unsigned int budget = q_vector->tx.work_limit;
+ struct igb_ring *tx_ring = q_vector->tx.ring;
unsigned int i = tx_ring->next_to_clean;
+ union e1000_adv_tx_desc *tx_desc;
+ struct igb_tx_buffer *tx_buffer;
+ struct xsk_buff_pool *xsk_pool;
+ int cpu = smp_processor_id();
+ bool xsk_xmit_done = true;
+ struct netdev_queue *nq;
+ u32 xsk_frames = 0;
if (test_bit(__IGB_DOWN, &adapter->state))
return true;
@@ -8272,10 +8322,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
napi_consume_skb(tx_buffer->skb, napi_budget);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -8307,6 +8361,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
+skip_for_xsk:
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
@@ -8335,6 +8390,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ xsk_pool = READ_ONCE(tx_ring->xsk_pool);
+ if (xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+ xsk_xmit_done = igb_xmit_zc(tx_ring, xsk_pool);
+ __netif_tx_unlock(nq);
+ }
+
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw;
@@ -8397,7 +8467,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
- return !!budget;
+ return !!budget && xsk_xmit_done;
}
/**
@@ -8588,9 +8658,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
return skb;
}
-static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
- struct igb_ring *rx_ring,
- struct xdp_buff *xdp)
+static int igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IGB_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -8630,7 +8699,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
@@ -8756,10 +8825,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -8786,9 +8851,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
-static void igb_process_skb_fields(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
@@ -8870,6 +8935,38 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
rx_buffer->page = NULL;
}
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+
+ if (status & IGB_XDP_REDIR)
+ xdp_do_flush();
+
+ if (status & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+ }
+}
+
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes)
+{
+ struct igb_ring *ring = q_vector->rx.ring;
+
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.packets += packets;
+ ring->rx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->rx_syncp);
+
+ q_vector->rx.total_packets += packets;
+ q_vector->rx.total_bytes += bytes;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
@@ -8877,12 +8974,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
struct igb_ring *rx_ring = q_vector->rx.ring;
u16 cleaned_count = igb_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
- int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
- struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -8940,12 +9036,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
#endif
- skb = igb_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = igb_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
xdp_xmit |= xdp_res;
igb_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -8964,7 +9058,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
&xdp, timestamp);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -8978,7 +9072,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igb_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -9001,24 +9095,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush();
-
- if (xdp_xmit & IGB_XDP_TX) {
- struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
-
- nq = txring_txq(tx_ring);
- __netif_tx_lock(nq, cpu);
- igb_xdp_ring_update_tail(tx_ring);
- __netif_tx_unlock(nq);
- }
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
new file mode 100644
index 000000000000..157d43787fa0
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "e1000_hw.h"
+#include "igb.h"
+
+static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present)
+{
+ int size = pool_present ?
+ sizeof(*ring->rx_buffer_info_zc) * ring->count :
+ sizeof(*ring->rx_buffer_info) * ring->count;
+ void *buff_info = vmalloc(size);
+
+ if (!buff_info)
+ return -ENOMEM;
+
+ if (pool_present) {
+ vfree(ring->rx_buffer_info);
+ ring->rx_buffer_info = NULL;
+ ring->rx_buffer_info_zc = buff_info;
+ } else {
+ vfree(ring->rx_buffer_info_zc);
+ ring->rx_buffer_info_zc = NULL;
+ ring->rx_buffer_info = buff_info;
+ }
+
+ return 0;
+}
+
+static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+ struct e1000_hw *hw = &adapter->hw;
+
+ set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ wr32(E1000_TXDCTL(tx_ring->reg_idx), 0);
+ wr32(E1000_RXDCTL(rx_ring->reg_idx), 0);
+
+ synchronize_net();
+
+ /* Rx/Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
+ igb_clean_tx_ring(tx_ring);
+ igb_clean_rx_ring(rx_ring);
+
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+
+ igb_configure_tx_ring(adapter, tx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+
+ synchronize_net();
+
+ clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ /* call igb_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ if (rx_ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(rx_ring, rx_ring->xsk_pool,
+ igb_desc_unused(rx_ring));
+ else
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
+
+ /* Rx/Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+}
+
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ int qid = ring->queue_index;
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+
+ if (!igb_xdp_is_enabled(adapter))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+static int igb_xsk_pool_enable(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR);
+ if (err)
+ return err;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, true);
+ if (!err) {
+ igb_txrx_ring_enable(adapter, qid);
+ /* Kick start the NAPI context so that receiving will start */
+ err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
+ }
+
+ if (err) {
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct xsk_buff_pool *pool;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, false);
+ if (err)
+ return err;
+
+ igb_txrx_ring_enable(adapter, qid);
+ }
+
+ return 0;
+}
+
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ return pool ? igb_xsk_pool_enable(adapter, pool, qid) :
+ igb_xsk_pool_disable(adapter, qid);
+}
+
+static u16 igb_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union e1000_adv_rx_desc *rx_desc, u16 count)
+{
+ dma_addr_t dma;
+ u16 buffs;
+ int i;
+
+ /* nothing to do */
+ if (!count)
+ return 0;
+
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->wb.upper.length = 0;
+
+ rx_desc++;
+ xdp++;
+ }
+
+ return buffs;
+}
+
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
+{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
+ union e1000_adv_rx_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ xdp = &rx_ring->rx_buffer_info_zc[ntu];
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc,
+ rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->rx_buffer_info_zc;
+ ntu = 0;
+ count -= nb_buffs_extra;
+ }
+
+ nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count)
+ ntu = 0;
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ rx_desc->wb.upper.length = 0;
+
+exit:
+ if (rx_ring->next_to_use != ntu) {
+ rx_ring->next_to_use = ntu;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(ntu, rx_ring->tail);
+ }
+
+ return total_count == (nb_buffs + nb_buffs_extra);
+}
+
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring)
+{
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
+
+ while (ntc != ntu) {
+ struct xdp_buff *xdp = rx_ring->rx_buffer_info_zc[ntc];
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
+ }
+}
+
+static struct sk_buff *igb_construct_skb_zc(struct igb_ring *rx_ring,
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
+{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+
+ net_prefetch(xdp->data_meta);
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
+
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
+ return skb;
+}
+
+static int igb_run_xdp_zc(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp, struct xsk_buff_pool *xsk_pool,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = IGB_XDP_PASS;
+ u32 act;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ return IGB_XDP_REDIR;
+
+ if (xsk_uses_need_wakeup(xsk_pool) &&
+ err == -ENOBUFS)
+ result = IGB_XDP_EXIT;
+ else
+ result = IGB_XDP_CONSUMED;
+ goto out_failure;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ result = IGB_XDP_CONSUMED;
+ break;
+ }
+
+ return result;
+}
+
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ u32 ntc = rx_ring->next_to_clean;
+ struct bpf_prog *xdp_prog;
+ unsigned int xdp_xmit = 0;
+ bool failure = false;
+ u16 entries_to_alloc;
+ struct sk_buff *skb;
+
+ /* xdp_prog cannot be NULL in the ZC path */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ while (likely(total_packets < budget)) {
+ union e1000_adv_rx_desc *rx_desc;
+ ktime_t timestamp = 0;
+ struct xdp_buff *xdp;
+ unsigned int size;
+ int xdp_res = 0;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntc);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ xdp = rx_ring->rx_buffer_info_zc[ntc];
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ xdp->data,
+ &timestamp);
+
+ xdp->data += ts_hdr_len;
+ xdp->data_meta += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
+
+ xdp_res = igb_run_xdp_zc(adapter, rx_ring, xdp, xsk_pool,
+ xdp_prog);
+
+ if (xdp_res) {
+ if (likely(xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IGB_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IGB_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ }
+
+ total_packets++;
+ total_bytes += size;
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+ continue;
+ }
+
+ skb = igb_construct_skb_zc(rx_ring, xdp, timestamp);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ break;
+ }
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* update budget accounting */
+ total_packets++;
+ }
+
+ rx_ring->next_to_clean = ntc;
+
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
+
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
+
+ entries_to_alloc = igb_desc_unused(rx_ring);
+ if (entries_to_alloc >= IGB_RX_BUFFER_WRITE)
+ failure |= !igb_alloc_rx_buffers_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
+
+ if (xsk_uses_need_wakeup(xsk_pool)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(xsk_pool);
+
+ return (int)total_packets;
+ }
+ return failure ? budget : (int)total_packets;
+}
+
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
+{
+ unsigned int budget = igb_desc_unused(tx_ring);
+ u32 cmd_type, olinfo_status, nb_pkts, i = 0;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
+ union e1000_adv_tx_desc *tx_desc = NULL;
+ struct igb_tx_buffer *tx_buffer_info;
+ unsigned int total_bytes = 0;
+ dma_addr_t dma;
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ return true;
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))
+ return true;
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ while (nb_pkts-- > 0) {
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_buffer_info->bytecount = descs[i].len;
+ tx_buffer_info->type = IGB_TYPE_XSK;
+ tx_buffer_info->xdpf = NULL;
+ tx_buffer_info->gso_segs = 1;
+ tx_buffer_info->time_stamp = jiffies;
+
+ tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+ olinfo_status = descs[i].len << E1000_ADVTXD_PAYLEN_SHIFT;
+
+ /* FIXME: This sets the Report Status (RS) bit for every
+ * descriptor. One nice to have optimization would be to set it
+ * only for the last descriptor in the whole batch. See Intel
+ * ice driver for an example on how to do it.
+ */
+ cmd_type |= descs[i].len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+
+ total_bytes += descs[i].len;
+
+ i++;
+ tx_ring->next_to_use++;
+ tx_buffer_info->next_to_watch = tx_desc;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ }
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes);
+ igb_xdp_ring_update_tail(tx_ring);
+
+ return nb_pkts < budget;
+}
+
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
+ u32 eics = 0;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igb_xdp_is_enabled(adapter))
+ return -EINVAL;
+
+ if (qid >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[qid];
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
+ return -ENETDOWN;
+
+ if (!READ_ONCE(ring->xsk_pool))
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ /* Cause software interrupt */
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+ eics |= ring->q_vector->eims_value;
+ wr32(E1000_EICS, eics);
+ } else {
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index eac0f966e0e4..b8111ad9a9a8 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -337,6 +337,8 @@ struct igc_adapter {
struct igc_led_classdev *leds;
};
+void igc_set_queue_napi(struct igc_adapter *adapter, int q_idx,
+ struct napi_struct *napi);
void igc_up(struct igc_adapter *adapter);
void igc_down(struct igc_adapter *adapter);
int igc_open(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index d9d1a1a11daf..be8a49a86d09 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -279,9 +279,4 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-
#endif /* _IGC_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 27872bdea9bd..56a35d58e7a6 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2123,10 +2123,6 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
@@ -2515,8 +2511,7 @@ out_failure:
}
}
-static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
- struct xdp_buff *xdp)
+static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp)
{
struct bpf_prog *prog;
int res;
@@ -2530,7 +2525,7 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
res = __igc_xdp_run_prog(adapter, prog, xdp);
out:
- return ERR_PTR(-res);
+ return res;
}
/* This function assumes __netif_tx_lock is held by the caller. */
@@ -2585,6 +2580,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = igc_desc_unused(rx_ring);
int xdp_status = 0, rx_buffer_pgcnt;
+ int xdp_res = 0;
while (likely(total_packets < budget)) {
struct igc_xdp_buff ctx = { .rx_ts = NULL };
@@ -2630,12 +2626,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_buff_clear_frags_flag(&ctx.xdp);
ctx.rx_desc = rx_desc;
- skb = igc_xdp_run_prog(adapter, &ctx.xdp);
+ xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
switch (xdp_res) {
case IGC_XDP_CONSUMED:
rx_buffer->pagecnt_bias++;
@@ -2657,7 +2651,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
@@ -2672,7 +2666,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -4948,6 +4942,22 @@ static int igc_sw_init(struct igc_adapter *adapter)
return 0;
}
+void igc_set_queue_napi(struct igc_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -4955,6 +4965,7 @@ static int igc_sw_init(struct igc_adapter *adapter)
void igc_up(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i = 0;
/* hardware has been reset, we need to reload some things */
@@ -4962,8 +4973,11 @@ void igc_up(struct igc_adapter *adapter)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
if (adapter->msix_entries)
igc_configure_msix(adapter);
@@ -5192,6 +5206,7 @@ void igc_down(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igc_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
@@ -5576,6 +5591,9 @@ static int igc_request_msix(struct igc_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igc_configure_msix(adapter);
@@ -6018,6 +6036,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err = 0;
int i = 0;
@@ -6053,8 +6072,11 @@ static int __igc_open(struct net_device *netdev, bool resuming)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(IGC_ICR);
@@ -6779,45 +6801,6 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_get_tstamp = igc_get_tstamp,
};
-/* PCIe configuration access */
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_read_config_word(adapter->pdev, reg, value);
-}
-
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_write_config_word(adapter->pdev, reg, *value);
-}
-
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_read_word(adapter->pdev, reg, value);
-
- return IGC_SUCCESS;
-}
-
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_write_word(adapter->pdev, reg, *value);
-
- return IGC_SUCCESS;
-}
-
u32 igc_rd32(struct igc_hw *hw, u32 reg)
{
struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
@@ -7338,7 +7321,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int igc_resume(struct device *dev)
+static int __igc_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7381,7 +7364,11 @@ static int igc_resume(struct device *dev)
wr32(IGC_WUS, ~0);
if (netif_running(netdev)) {
+ if (!rpm)
+ rtnl_lock();
err = __igc_open(netdev, true);
+ if (!rpm)
+ rtnl_unlock();
if (!err)
netif_device_attach(netdev);
}
@@ -7389,9 +7376,14 @@ static int igc_resume(struct device *dev)
return err;
}
+static int igc_resume(struct device *dev)
+{
+ return __igc_resume(dev, false);
+}
+
static int igc_runtime_resume(struct device *dev)
{
- return igc_resume(dev);
+ return __igc_resume(dev, true);
}
static int igc_suspend(struct device *dev)
@@ -7436,14 +7428,18 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
igc_down(adapter);
pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7454,7 +7450,7 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igc_resume routine.
+ * resembles the first-half of the __igc_resume routine.
**/
static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
{
@@ -7493,7 +7489,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igc_resume routine.
+ * second-half of the __igc_resume routine.
*/
static void igc_io_resume(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
index 58f81aba0144..efd121c03967 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.c
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -36,56 +36,6 @@ static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
}
/**
- * igc_acquire_nvm - Generic request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -IGC_ERR_NVM (-1).
- */
-s32 igc_acquire_nvm(struct igc_hw *hw)
-{
- s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
- u32 eecd = rd32(IGC_EECD);
- s32 ret_val = 0;
-
- wr32(IGC_EECD, eecd | IGC_EECD_REQ);
- eecd = rd32(IGC_EECD);
-
- while (timeout) {
- if (eecd & IGC_EECD_GNT)
- break;
- udelay(5);
- eecd = rd32(IGC_EECD);
- timeout--;
- }
-
- if (!timeout) {
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
- hw_dbg("Could not acquire NVM grant\n");
- ret_val = -IGC_ERR_NVM;
- }
-
- return ret_val;
-}
-
-/**
- * igc_release_nvm - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit.
- */
-void igc_release_nvm(struct igc_hw *hw)
-{
- u32 eecd;
-
- eecd = rd32(IGC_EECD);
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
-}
-
-/**
* igc_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h
index f9fc2e9cfb03..ab78d0c64547 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.h
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.h
@@ -4,8 +4,6 @@
#ifndef _IGC_NVM_H_
#define _IGC_NVM_H_
-s32 igc_acquire_nvm(struct igc_hw *hw);
-void igc_release_nvm(struct igc_hw *hw);
s32 igc_read_mac_addr(struct igc_hw *hw);
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
s32 igc_validate_nvm_checksum(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index e27af72aada8..13bbd3346e01 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -13,6 +13,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct net_device *dev = adapter->netdev;
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
+ bool need_update;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -22,7 +23,8 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
- if (if_running)
+ need_update = !!adapter->xdp_prog != !!prog;
+ if (if_running && need_update)
igc_close(dev);
old_prog = xchg(&adapter->xdp_prog, prog);
@@ -34,7 +36,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running)
+ if (if_running && need_update)
igc_open(dev);
return 0;
@@ -84,6 +86,7 @@ static int igc_xdp_enable_pool(struct igc_adapter *adapter,
napi_disable(napi);
}
+ igc_set_queue_napi(adapter, queue_id, NULL);
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
@@ -133,6 +136,7 @@ static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
+ igc_set_queue_napi(adapter, queue_id, napi);
if (needs_reset) {
napi_enable(napi);
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 965e5ce1b326..b456d102655a 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 1999 - 2018 Intel Corporation.
+# Copyright(c) 1999 - 2024 Intel Corporation.
#
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
@@ -9,7 +9,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o
+ ixgbe_xsk.o ixgbe_e610.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 559b443c409f..e6a380d4929b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -20,6 +20,7 @@
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
+#include "ixgbe_e610.h"
#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
@@ -173,6 +174,7 @@ enum ixgbe_tx_flags {
#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define IXGBE_E610_VF_DEVICE_ID 0x57AD
#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
{ \
@@ -654,6 +656,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
+#define IXGBE_FLAG2_FW_ASYNC_EVENT BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
@@ -661,6 +664,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
+#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20)
+#define IXGBE_FLAG2_NO_MEDIA BIT(21)
+#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22)
/* Tx fast path data */
int num_tx_queues;
@@ -793,6 +799,7 @@ struct ixgbe_adapter {
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
+ u16 lse_mask;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
@@ -849,6 +856,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -874,6 +882,7 @@ enum ixgbe_state_t {
__IXGBE_PTP_RUNNING,
__IXGBE_PTP_TX_IN_PROGRESS,
__IXGBE_RESET_REQUESTED,
+ __IXGBE_PHY_INIT_COMPLETE,
};
struct ixgbe_cb {
@@ -896,6 +905,7 @@ enum ixgbe_boards {
board_x550em_x_fw,
board_x550em_a,
board_x550em_a_fw,
+ board_e610,
};
extern const struct ixgbe_info ixgbe_82598_info;
@@ -906,6 +916,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
+extern const struct ixgbe_info ixgbe_e610_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index cdaf087b4e85..964988b4d58b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1615,6 +1615,7 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3be1bfb16498..7beaf6ea57f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -58,6 +58,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -88,6 +89,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -469,9 +472,14 @@ int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_e610) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
+ }
+
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
@@ -660,7 +668,11 @@ int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
- link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
+ if (hw->mac.type == ixgbe_mac_e610)
+ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610);
+ else
+ link_status = ixgbe_read_pci_cfg_word(hw,
+ IXGBE_PCI_LINK_STATUS);
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
@@ -2918,6 +2930,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_e610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return 1;
}
@@ -3366,7 +3382,8 @@ int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
- if ((hw->mac.type >= ixgbe_mac_X550) &&
+ if ((hw->mac.type >= ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_e610) &&
(links_reg & IXGBE_LINKS_SPEED_NON_STD))
*speed = IXGBE_LINK_SPEED_5GB_FULL;
else
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f2709b10c2e5..19d6b6fa8fb3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include <linux/dcbnl.h>
@@ -154,6 +154,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..683c668672d6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -0,0 +1,2658 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_type.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_mbx.h"
+#include "ixgbe_phy.h"
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ * It may happen when CSR is busy during link state changes.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - 0 - success.
+ * * - -EIO - CSR mechanism is not enabled.
+ * * - -EBUSY - CSR mechanism is busy.
+ * * - -EINVAL - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - -ETIME - Admin Command X command timeout.
+ * * - -EIO - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
+ struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode, buf_tail_size = buf_size % 4;
+ u32 *raw_desc = (u32 *)desc;
+ u32 hicr, i, buf_tail = 0;
+ bool valid_buf = false;
+
+ hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
+
+ if (!(hicr & IXGBE_PF_HICR_EN))
+ return -EIO;
+
+ if (hicr & IXGBE_PF_HICR_C) {
+ hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ return -EBUSY;
+ }
+
+ opcode = le16_to_cpu(desc->opcode);
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE)
+ return -EINVAL;
+
+ if (buf)
+ desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF);
+
+ if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) {
+ if ((buf && !buf_size) ||
+ (!buf && buf_size))
+ return -EINVAL;
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf) {
+ if (buf_tail_size)
+ memcpy(&buf_tail, buf + buf_size - buf_tail_size,
+ buf_tail_size);
+
+ if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF)
+ desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB);
+
+ desc->datalen = cpu_to_le16(buf_size);
+
+ if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) {
+ for (i = 0; i < buf_size / 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
+ if (buf_tail_size)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail);
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]);
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) &
+ ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr);
+
+#define MAX_SLEEP_RESP_US 1000
+#define MAX_TMOUT_RESP_SYNC_US 100000000
+
+ /* Wait for sync Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_SV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+#define MAX_TMOUT_RESP_ASYNC_US 150000000
+
+ /* Wait for async Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_EV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+ /* Read sync Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & IXGBE_PF_HICR_C)
+ return -ETIME;
+
+ if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV))
+ return -EIO;
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != cpu_to_le16(opcode) &&
+ opcode != ixgbe_aci_opc_get_fw_event)
+ return -EIO;
+
+ if (desc->retval) {
+ hw->aci.last_status = (enum ixgbe_aci_err)
+ le16_to_cpu(desc->retval);
+ return -EIO;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf) {
+ for (i = 0; i < buf_size / 4; i++)
+ ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ if (buf_tail_size) {
+ buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ memcpy(buf + buf_size - buf_tail_size, &buf_tail,
+ buf_tail_size);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode = le16_to_cpu(desc->opcode);
+ struct ixgbe_aci_desc desc_cpy;
+ enum ixgbe_aci_err last_status;
+ u8 idx = 0, *buf_cpy = NULL;
+ bool is_cmd_for_retry;
+ unsigned long timeout;
+ int err;
+
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf_cpy)
+ return -ENOMEM;
+ *buf_cpy = *(u8 *)buf;
+ }
+ desc_cpy = *desc;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS);
+ do {
+ mutex_lock(&hw->aci.lock);
+ err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ mutex_unlock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || !err ||
+ last_status != IXGBE_ACI_RC_EBUSY)
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ *desc = desc_cpy;
+
+ msleep(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE &&
+ time_before(jiffies, timeout));
+
+ kfree(buf_cpy);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+ u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!e || (!e->msg_buf && e->buf_len))
+ return -EINVAL;
+
+ mutex_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (err)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending)
+ *pending = ixgbe_aci_check_event_pending(hw);
+
+aci_get_event_exit:
+ mutex_unlock(&hw->aci.lock);
+
+ return err;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+ /* Zero out the desc. */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access,
+ u8 sdp_number, u32 *timeout)
+{
+ struct ixgbe_aci_cmd_req_res *cmd_resp;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = cpu_to_le16(res);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
+ cmd_resp->timeout = cpu_to_le32(*timeout);
+ *timeout = 0;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_release_res(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_ids res, u8 sdp_number)
+{
+ struct ixgbe_aci_cmd_req_res *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = cpu_to_le16(res);
+ cmd->res_number = cpu_to_le32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the -EALREADY is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout;
+ int err;
+
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of -EALREADY means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (err == -EALREADY)
+ return err;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (err && retry_timeout && res_timeout) {
+ msleep(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* Success - lock acquired.
+ * -EALREADY - lock free, no work to do.
+ */
+ if (!err || err == -EALREADY)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+ u32 total_delay = 0;
+ int err;
+
+ err = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while (err == -ETIME &&
+ total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) {
+ usleep_range(1000, 1500);
+ err = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_e610_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_caps *caps,
+ struct ixgbe_aci_cmd_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = le32_to_cpu(elem->logical_id);
+ u32 phys_id = le32_to_cpu(elem->phys_id);
+ u32 number = le32_to_cpu(elem->number);
+ u16 cap = le16_to_cpu(elem->cap);
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case IXGBE_ACI_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case IXGBE_ACI_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_NVM_VER:
+ break;
+ case IXGBE_ACI_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id);
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ default:
+ /* Not one of the recognized common capabilities */
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i],
+ "dev caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->num_allocd_vfs = le32_to_cpu(cap->number);
+ func_p->vf_base_id = le32_to_cpu(cap->logical_id);
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+#define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0)
+ u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ return funcs ? (max / funcs) : 0;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return -ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of -ENOMEM means the buffer size is too small.
+ */
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct ixgbe_aci_cmd_list_caps *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = le32_to_cpu(cmd->count);
+
+ return err;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!err)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!err)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (err)
+ return err;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.disable_rxen;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= cpu_to_le16(report_mode);
+ err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+ if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+ hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!cfg)
+ return -EINVAL;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ desc.params.set_phy.lport_num = hw->bus.func;
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+ if (!err)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ cmd->lport_num = hw->bus.func;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_topo *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M,
+ IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT);
+
+ /* Set node type. */
+ cmd->addr.topo_params.node_type_ctx |=
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M,
+ IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
+ if (ixgbe_is_media_cage_present(hw))
+ return ixgbe_media_type_aui;
+ fallthrough;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li = &hw->link.link_info;
+
+ err = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (err)
+ return err;
+
+ if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ return 0;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (!err)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ kfree(pcaps);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ if (!hw || !link_up)
+ return -EINVAL;
+
+ if (hw->link.get_link_info) {
+ int err = ixgbe_update_link_info(hw);
+
+ if (err)
+ return err;
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = {};
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct ixgbe_aci_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cpu_to_le16(cmd_flags);
+ resp->lport_num = hw->bus.func;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (err)
+ return err;
+
+ /* Save off old link status information. */
+ *li_old = *li;
+
+ /* Update current link status information. */
+ li->link_speed = le16_to_cpu(link_data.link_speed);
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* Update fc info. */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) &
+ IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* Save link status information. */
+ if (link)
+ *link = *li;
+
+ /* Flag cleared so calling functions don't call AQ again. */
+ hw->link.get_link_info = false;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: true for enable lse, false otherwise
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ int err;
+
+ err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (err)
+ return err;
+
+ /* Enabling link status events generation by fw. */
+ return ixgbe_aci_get_link_info(hw, activate, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_e610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ int rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ int highest_bit;
+
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high));
+ if (highest_bit) {
+ hw->link.link_info.phy_type_high =
+ BIT_ULL(highest_bit - 1);
+ hw->link.link_info.phy_type_low = 0;
+ } else {
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low));
+ if (highest_bit)
+ hw->link.link_info.phy_type_low =
+ BIT_ULL(highest_bit - 1);
+ }
+ }
+
+ /* Based on link status or search above try to discover media type. */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_setup_link_e610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_e610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ int err;
+ u32 i;
+
+ if (!speed || !link_up)
+ return -EINVAL;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command.
+ */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && !(*link_up)) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msleep(100);
+ hw->link.get_link_info = true;
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW.
+ */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_link_capabilities_e610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return -EINVAL;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return -EINVAL;
+
+ switch (req_mode) {
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the old pause settings. */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* Set the new capabilities. */
+ cfg->caps |= pause_mask;
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_fc_e610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {};
+ int err;
+
+ /* Get the current PHY config */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (err)
+ return err;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_fc_autoneg_e610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ /* Get current link err.
+ * Current FC mode will be stored in the hw context.
+ */
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ goto no_autoneg;
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP))
+ goto no_autoneg;
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED))
+ goto no_autoneg;
+
+ hw->fc.fc_was_autonegged = true;
+ return;
+
+no_autoneg:
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
+/**
+ * ixgbe_disable_rx_e610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ u32 pfdtxgswc;
+ int err;
+
+ if (!(rxctrl & IXGBE_RXCTRL_RXEN))
+ return;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ err = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (err) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+}
+
+/**
+ * ixgbe_init_phy_ops_e610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_e610;
+ else
+ phy->ops.set_phy_power = NULL;
+
+ /* Identify the PHY */
+ return phy->ops.identify(hw);
+}
+
+/**
+ * ixgbe_identify_phy_e610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type_low, phy_type_high;
+ int err;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps);
+ if (err)
+ return err;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (err)
+ return err;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+ phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+ phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+
+ if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* 2.5 and 5 Gbps link speeds must be excluded from the
+ * auto-negotiation set used during driver initialization due to
+ * compatibility issues with certain switches. Those issues do not
+ * exist in case of E610 2.5G SKU device (0x57b1).
+ */
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_identify_module_e610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ int err;
+
+ err = ixgbe_update_link_info(hw);
+ if (err)
+ return err;
+
+ media_available =
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE);
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by
+ * ixgbe_update_link_info()
+ */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ u64 phy_type_low = 0, phy_type_high = 0;
+ int err;
+
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ return err;
+
+ /* If media is not available get default config. */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (err)
+ return err;
+
+ sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+ sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+
+ /* Get Active configuration to avoid unintended changes. */
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types. */
+ phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_low = cpu_to_le64(phy_type_low);
+ phy_type_high &= sup_phy_type_high;
+ pcfg.phy_type_high = cpu_to_le64(phy_type_high);
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_phy_power_e610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on)
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ else
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+
+ /* PHY is already in requested power mode. */
+ if (phy_caps.caps == phy_cfg.caps)
+ return 0;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_enter_lplu_e610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw
+ * struct in order to set up EEPROM access.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type != ixgbe_eeprom_uninitialized)
+ return 0;
+
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2). */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type,
+ eeprom->word_size);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return -EOPNOTSUPP;
+
+ if (node_handle)
+ *node_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return 0;
+
+ return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return -EINVAL;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = cpu_to_le16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!err && cmd->checksum !=
+ cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Invalid Shadow Ram checksum");
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ u16 data_local;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (err)
+ return err;
+
+ *data = data_local;
+ return 0;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns an error code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ int err;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u)))
+ return -EINVAL;
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = min_t(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (err)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM
+ * ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+ if (err)
+ return err;
+
+ *words = bytes / 2;
+
+ for (int i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI
+ * @hw: pointer to hardware structure
+ * @offset: offset of words in the EEPROM to read
+ * @words: number of words to read
+ * @data: words to read from the EEPROM
+ *
+ * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params
+ * prior to the read. Acquire/release the NVM ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_nvm_validate_checksum(hw);
+ if (err)
+ return err;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!err)
+ *checksum_val = tmp_checksum;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_reset_hw_e610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and performs a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int err;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ err = hw->mac.ops.stop_adapter(hw);
+ if (err)
+ goto reset_hw_out;
+
+ /* Flush pending Tx transactions. */
+ ixgbe_clear_tx_pending(hw);
+
+ hw->phy.ops.init(hw);
+mac_reset_top:
+ err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (err)
+ return -EBUSY;
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Reset polling failed to complete.");
+ }
+
+ /* Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ msleep(100);
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17));
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Maximum number of Receive Address Registers. */
+#define IXGBE_MAX_NUM_RAR 128
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to the
+ * maximum number of Receive Address Registers, since we modify this
+ * value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+reset_hw_out:
+ return err;
+}
+
+static const struct ixgbe_mac_operations mac_ops_e610 = {
+ .init_hw = ixgbe_init_hw_generic,
+ .start_hw = ixgbe_start_hw_X540,
+ .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic,
+ .enable_rx_dma = ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = ixgbe_get_mac_addr_generic,
+ .get_device_caps = ixgbe_get_device_caps_generic,
+ .stop_adapter = ixgbe_stop_adapter_generic,
+ .set_lan_id = ixgbe_set_lan_id_multi_port_pcie,
+ .set_rxpba = ixgbe_set_rxpba_generic,
+ .check_link = ixgbe_check_link_e610,
+ .blink_led_start = ixgbe_blink_led_start_X540,
+ .blink_led_stop = ixgbe_blink_led_stop_X540,
+ .set_rar = ixgbe_set_rar_generic,
+ .clear_rar = ixgbe_clear_rar_generic,
+ .set_vmdq = ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic,
+ .clear_vmdq = ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = ixgbe_update_mc_addr_list_generic,
+ .enable_mc = ixgbe_enable_mc_generic,
+ .disable_mc = ixgbe_disable_mc_generic,
+ .clear_vfta = ixgbe_clear_vfta_generic,
+ .set_vfta = ixgbe_set_vfta_generic,
+ .fc_enable = ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550,
+ .init_uta_tables = ixgbe_init_uta_tables_generic,
+ .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing,
+ .set_source_address_pruning =
+ ixgbe_set_source_address_pruning_x550,
+ .set_ethertype_anti_spoofing =
+ ixgbe_set_ethertype_anti_spoofing_x550,
+ .disable_rx_buff = ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = ixgbe_enable_rx_buff_generic,
+ .enable_rx = ixgbe_enable_rx_generic,
+ .disable_rx = ixgbe_disable_rx_e610,
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
+ .reset_hw = ixgbe_reset_hw_e610,
+ .get_media_type = ixgbe_get_media_type_e610,
+ .setup_link = ixgbe_setup_link_e610,
+ .get_link_capabilities = ixgbe_get_link_capabilities_e610,
+ .get_bus_info = ixgbe_get_bus_info_generic,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_e610,
+ .fc_autoneg = ixgbe_fc_autoneg_e610,
+};
+
+static const struct ixgbe_phy_operations phy_ops_e610 = {
+ .init = ixgbe_init_phy_ops_e610,
+ .identify = ixgbe_identify_phy_e610,
+ .identify_sfp = ixgbe_identify_module_e610,
+ .setup_link_speed = ixgbe_setup_phy_link_speed_generic,
+ .setup_link = ixgbe_setup_phy_link_e610,
+ .enter_lplu = ixgbe_enter_lplu_e610,
+};
+
+static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
+ .read = ixgbe_read_ee_aci_e610,
+ .read_buffer = ixgbe_read_ee_aci_buffer_e610,
+ .validate_checksum = ixgbe_validate_eeprom_checksum_e610,
+};
+
+const struct ixgbe_info ixgbe_e610_info = {
+ .mac = ixgbe_mac_e610,
+ .get_invariants = ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_e610,
+ .eeprom_ops = &eeprom_ops_e610,
+ .phy_ops = &phy_ops_e610,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..ba8c06b73810
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps);
+int ixgbe_get_caps(struct ixgbe_hw *hw);
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+int ixgbe_update_link_info(struct ixgbe_hw *hw);
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw);
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw);
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on);
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw);
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data);
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val);
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9482e0cca8b7..da91c582d439 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/* ethtool support for ixgbe */
@@ -690,6 +690,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -1613,6 +1614,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1874,6 +1876,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1935,6 +1938,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 16fa621ce0ff..336d47ffb95a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_sriov.h"
@@ -107,6 +107,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2e38e8f6fac1..7236f20c9a30 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -42,6 +42,7 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
@@ -73,6 +74,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
[board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
+ [board_e610] = &ixgbe_e610_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -131,6 +133,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_BACKPLANE), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SFP), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_10G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_2_5G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SGMII), board_e610},
/* required last entry */
{0, }
};
@@ -173,6 +180,8 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *);
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *);
static const struct net_device_ops ixgbe_netdev_ops;
@@ -236,8 +245,11 @@ static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
* bandwidth details should be gathered from the parent bus instead of from the
* device. Used to ensure that various locations all have the correct device ID
* checks.
+ *
+ * Return: true if information should be collected from the parent bus, false
+ * otherwise
*/
-static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+static bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_SFP_SF_QP:
@@ -876,6 +888,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -915,6 +928,7 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1025,7 +1039,7 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
return ((head <= tail) ? tail : tail + ring->count) - head;
}
-static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
{
u32 tx_done = ixgbe_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
@@ -1909,10 +1923,6 @@ bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
{
struct net_device *netdev = rx_ring->netdev;
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* Verify netdev is present, and that packet does not have any
* errors that would be unacceptable to the netdev.
*/
@@ -2220,9 +2230,9 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -2272,7 +2282,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
@@ -2330,6 +2340,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -2375,12 +2386,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -2400,7 +2409,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -2414,7 +2423,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
+ if (xdp_res || ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
continue;
/* probably a little skewed due to removing CRC */
@@ -2515,6 +2524,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2528,6 +2538,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_EIMS_MAILBOX |
IXGBE_EIMS_LSC);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mask &= ~IXGBE_EIMS_FW_EVENT;
+
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -2744,6 +2757,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2966,6 +2980,218 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_check_phy_fw_load - check if PHY FW load failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check if external PHY FW load failed and print an error message if it did.
+ */
+static void ixgbe_check_phy_fw_load(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ if (!(link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
+ adapter->flags2 &= ~IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ return;
+ }
+
+ if (adapter->flags2 & IXGBE_FLAG2_PHY_FW_LOAD_FAILED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
+ netdev_err(adapter->netdev, "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
+ adapter->flags2 |= IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ }
+}
+
+/**
+ * ixgbe_check_module_power - check module power level
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check module power level returned by a previous call to aci_get_link_info
+ * and print error messages if module power level is not supported.
+ */
+static void ixgbe_check_module_power(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ /* If module power level is supported, clear the flag. */
+ if (!(link_cfg_err & (IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT |
+ IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED))) {
+ adapter->flags2 &= ~IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ return;
+ }
+
+ /* If IXGBE_FLAG2_MOD_POWER_UNSUPPORTED was previously set and the
+ * above block didn't clear this bit, there's nothing to do.
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_MOD_POWER_UNSUPPORTED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT) {
+ netdev_err(adapter->netdev, "The installed module is incompatible with the device's NVM image. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ } else if (link_cfg_err & IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED) {
+ netdev_err(adapter->netdev, "The module's power requirements exceed the device's power supply. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_check_link_cfg_err - check if link configuration failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Print if any link configuration failure happens due to the value in the
+ * link_cfg_err parameter in the link info structure.
+ */
+static void ixgbe_check_link_cfg_err(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ ixgbe_check_module_power(adapter, link_cfg_err);
+ ixgbe_check_phy_fw_load(adapter, link_cfg_err);
+}
+
+/**
+ * ixgbe_process_link_status_event - process the link event
+ * @adapter: pointer to adapter structure
+ * @link_up: true if the physical link is up and false if it is down
+ * @link_speed: current link speed received from the link event
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+static int
+ixgbe_process_link_status_event(struct ixgbe_adapter *adapter, bool link_up,
+ u16 link_speed)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int status;
+
+ /* Update the link info structures and re-enable link events,
+ * don't bail on failure due to other book keeping needed.
+ */
+ status = ixgbe_update_link_info(hw);
+ if (status)
+ e_dev_err("Failed to update link status, err %d aq_err %d\n",
+ status, hw->aci.last_status);
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)
+ link_up = true;
+
+ /* Turn off PHY if media was removed. */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA) &&
+ !(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ adapter->flags2 |= IXGBE_FLAG2_NO_MEDIA;
+
+ if (link_up == adapter->link_up &&
+ link_up == netif_carrier_ok(adapter->netdev) &&
+ link_speed == adapter->link_speed)
+ return 0;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ ixgbe_watchdog_update_link(adapter);
+
+ if (link_up)
+ ixgbe_watchdog_link_is_up(adapter);
+ else
+ ixgbe_watchdog_link_is_down(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_link_status_event - handle link status event via ACI
+ * @adapter: pointer to adapter structure
+ * @e: event structure containing link status info
+ */
+static void
+ixgbe_handle_link_status_event(struct ixgbe_adapter *adapter,
+ struct ixgbe_aci_event *e)
+{
+ struct ixgbe_aci_cmd_get_link_status_data *link_data;
+ u16 link_speed;
+ bool link_up;
+
+ link_data = (struct ixgbe_aci_cmd_get_link_status_data *)e->msg_buf;
+
+ link_up = !!(link_data->link_info & IXGBE_ACI_LINK_UP);
+ link_speed = le16_to_cpu(link_data->link_speed);
+
+ if (ixgbe_process_link_status_event(adapter, link_up, link_speed))
+ e_dev_warn("Could not process link status event");
+}
+
+/**
+ * ixgbe_schedule_fw_event - schedule Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * If the adapter is not in down, removing or resetting state,
+ * an event is scheduled.
+ */
+static void ixgbe_schedule_fw_event(struct ixgbe_adapter *adapter)
+{
+ if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
+ !test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_FW_ASYNC_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+/**
+ * ixgbe_aci_event_cleanup - release msg_buf memory
+ * @event: pointer to the event holding msg_buf to be released
+ *
+ * Clean memory allocated for event's msg_buf. Implements auto memory cleanup.
+ */
+static void ixgbe_aci_event_cleanup(struct ixgbe_aci_event *event)
+{
+ kfree(event->msg_buf);
+}
+
+/**
+ * ixgbe_handle_fw_event - handle Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * Obtain an event from the ACI and then and then process it according to the
+ * type of the event and the opcode.
+ */
+static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_aci_event event __cleanup(ixgbe_aci_event_cleanup);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool pending = false;
+ int err;
+
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ adapter->flags2 &= ~IXGBE_FLAG2_FW_ASYNC_EVENT;
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ err = ixgbe_aci_get_event(hw, &event, &pending);
+ if (err)
+ break;
+
+ switch (le16_to_cpu(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ ixgbe_handle_link_status_event(adapter, &event);
+ break;
+ default:
+ e_warn(hw, "unknown FW async event captured\n");
+ break;
+ }
+ } while (pending);
+}
+
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
@@ -2982,6 +3208,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -3035,6 +3262,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ fallthrough;
case ixgbe_mac_x550em_a:
if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
@@ -3091,12 +3321,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -3334,6 +3568,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
@@ -3342,6 +3579,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
@@ -3442,6 +3680,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -4359,6 +4598,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
fallthrough;
@@ -4526,6 +4766,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4564,6 +4805,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -5148,6 +5390,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -5208,6 +5451,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -5510,6 +5754,48 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_enable_link_status_events - enable link status events
+ * @adapter: pointer to the adapter structure
+ * @mask: event mask to be set
+ *
+ * Enables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_enable_link_status_events(struct ixgbe_adapter *adapter,
+ u16 mask)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, true, mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = mask;
+ return 0;
+}
+
+/**
+ * ixgbe_disable_link_status_events - disable link status events
+ * @adapter: pointer to the adapter structure
+ *
+ * Disables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_disable_link_status_events(struct ixgbe_adapter *adapter)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, false, adapter->lse_mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = 0;
+ return 0;
+}
+
+/**
* ixgbe_sfp_link_config - set up SFP+ link
* @adapter: pointer to private adapter struct
**/
@@ -5532,13 +5818,21 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* ixgbe_non_sfp_link_config - set up non-SFP+ link
* @hw: pointer to private hardware struct
*
- * Returns 0 on success, negative on failure
+ * Configure non-SFP link.
+ *
+ * Return: 0 on success, negative on failure
**/
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
- u32 speed;
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
bool autoneg, link_up = false;
int ret = -EIO;
+ u32 speed;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -5561,13 +5855,53 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (ret)
return ret;
- if (hw->mac.ops.setup_link)
+ if (hw->mac.ops.setup_link) {
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ret = ixgbe_enable_link_status_events(adapter, mask);
+ if (ret)
+ return ret;
+ }
ret = hw->mac.ops.setup_link(hw, speed, link_up);
+ }
return ret;
}
/**
+ * ixgbe_check_media_subtask - check for media
+ * @adapter: pointer to adapter structure
+ *
+ * If media is available then initialize PHY user configuration. Configure the
+ * PHY if the interface is up.
+ */
+static void ixgbe_check_media_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* No need to check for media if it's already present */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA))
+ return;
+
+ /* Refresh link info and check if media is present */
+ if (ixgbe_update_link_info(hw))
+ return;
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ if (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ /* PHY settings are reset on media insertion, reconfigure
+ * PHY to preserve settings.
+ */
+ if (!(ixgbe_non_sfp_link_config(&adapter->hw)))
+ adapter->flags2 &= ~IXGBE_FLAG2_NO_MEDIA;
+
+ /* A Link Status Event will be generated; the event handler
+ * will complete bringing the interface up
+ */
+ }
+}
+
+/**
* ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
* @adapter: board private structure
*
@@ -5630,6 +5964,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5980,6 +6315,7 @@ dma_engine_disable:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -6224,6 +6560,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_disable_link_status_events(adapter);
}
/**
@@ -6279,6 +6617,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
@@ -6342,6 +6681,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
+ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
+
/* get_invariants needs the device IDs */
ii->get_invariants(hw);
@@ -6909,6 +7250,19 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
udp_tunnel_nic_reset_ntf(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ int err = ixgbe_update_link_info(&adapter->hw);
+
+ if (err)
+ e_dev_err("Failed to update link info, err %d.\n", err);
+
+ ixgbe_check_link_cfg_err(adapter,
+ adapter->hw.link.link_info.link_cfg_err);
+
+ err = ixgbe_non_sfp_link_config(&adapter->hw);
+ if (ixgbe_non_sfp_link_config(&adapter->hw))
+ e_dev_err("Link setup failed, err %d.\n", err);
+ }
return 0;
@@ -7062,6 +7416,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -7209,6 +7564,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -7221,11 +7577,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
for (i = 0; i < 16; i++) {
hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540) ||
- (hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x) ||
- (hw->mac.type == ixgbe_mac_x550em_a)) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_x550em_a ||
+ hw->mac.type == ixgbe_mac_e610) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -7251,6 +7608,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -7551,6 +7909,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -8052,6 +8411,11 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_check_media_subtask(adapter);
+ }
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -10771,6 +11135,24 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version_e610 - Set FW version specifically on E610 adapters
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ *
+ */
+static void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
+ nvm->eetrack, orom->major, orom->build, orom->patch);
+}
+
+/**
* ixgbe_set_fw_version - Set FW version
* @adapter: the adapter private structure
*
@@ -10782,6 +11164,11 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_nvm_version nvm_ver;
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_set_fw_version_e610(adapter);
+ return;
+ }
+
ixgbe_get_oem_prod_version(hw, &nvm_ver);
if (nvm_ver.oem_valid) {
snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
@@ -10868,6 +11255,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#else
indices = IXGBE_MAX_RSS_INDICES;
#endif
+ } else if (ii->mac == ixgbe_mac_e610) {
+ indices = IXGBE_MAX_RSS_INDICES_X550;
}
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
@@ -10933,12 +11322,19 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ err = ixgbe_get_caps(&adapter->hw);
+ if (err)
+ dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err);
+ }
+
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
break;
case ixgbe_mac_x550em_a:
@@ -10959,6 +11355,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -11130,6 +11527,8 @@ skip_sriov:
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_init(&hw->aci.lock);
timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
if (ixgbe_removed(hw->hw_addr)) {
@@ -11275,6 +11674,8 @@ err_netdev:
err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
@@ -11321,6 +11722,11 @@ static void ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_disable_link_status_events(adapter);
+ mutex_destroy(&adapter->hw.aci.lock);
+ }
+
if (adapter->mii_bus)
mdiobus_unregister(adapter->mii_bus);
@@ -11452,6 +11858,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_x550em_a:
device_id = IXGBE_DEV_ID_X550EM_A_VF;
break;
+ case ixgbe_mac_e610:
+ device_id = IXGBE_DEV_ID_E610_VF;
+ break;
default:
device_id = 0;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d67d77e5dacc..788b5af07c70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -283,6 +283,7 @@ static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
@@ -407,6 +408,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_x550em_a &&
+ hw->mac.type != ixgbe_mac_e610 &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 07eaa3c3f4d3..0a03a8bb5f88 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1117,7 +1117,7 @@ int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
MDIO_MMD_AN, &autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_e610) {
/* Set or unset auto-negotiation 5G advertisement */
autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
@@ -1233,6 +1233,7 @@ static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
switch (hw->mac.type) {
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9baccacd02a1..5fdf32d79d82 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
#include <linux/netdevice.h>
+#include "ixgbe_type_e610.h"
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
@@ -71,12 +72,19 @@
#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
+
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
+#define IXGBE_DEV_ID_E610_VF 0x57AD
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -1600,7 +1608,7 @@ enum {
#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -1636,6 +1644,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -1654,6 +1663,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -1673,6 +1683,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -2068,6 +2079,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2168,6 +2180,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2288,6 +2301,7 @@ enum {
#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_TPE 0x00000080 /* Tag Promiscuous Ena*/
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
@@ -2351,6 +2365,7 @@ enum {
/* Multiple Transmit Queue Command Register */
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_NUM_TC_OR_Q 0xC /* Number of TCs or TxQs per pool */
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
@@ -2970,6 +2985,29 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
+/* Physical layer type */
+typedef u64 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000
+#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000
+
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
*/
@@ -3145,6 +3183,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
ixgbe_mac_x550em_a,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
@@ -3224,7 +3264,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui,
};
/* Flow Control Settings */
@@ -3233,7 +3275,8 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
- ixgbe_fc_default
+ ixgbe_fc_default,
+ ixgbe_fc_pfc,
};
/* Smart Speed Settings */
@@ -3533,6 +3576,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -3575,6 +3621,7 @@ struct ixgbe_mac_info {
u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
bool set_lben;
+ u32 max_link_up_time;
u8 led_link_act;
};
@@ -3599,6 +3646,10 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 curr_user_speed_req;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
struct ixgbe_mbx_stats {
@@ -3643,6 +3694,19 @@ struct ixgbe_hw {
bool allow_unsupported_sfp;
bool wol_enabled;
bool need_crosstalk_fix;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..8d06ade3c7cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,1074 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+#define BYTES_PER_DWORD 4
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Shadow RAM related */
+#define IXGBE_SR_WORDS_IN_1KB 512
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5)
+
+/* Flash Access Register */
+#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define IXGBE_GLNVM_FLA_LOCKED_S 6
+#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Admin Command Interface (ACI) registers */
+#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define IXGBE_PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define IXGBE_PF_HICR 0x00082048
+
+#define IXGBE_PF_HICR_EN BIT(0)
+#define IXGBE_PF_HICR_C BIT(1)
+#define IXGBE_PF_HICR_SV BIT(2)
+#define IXGBE_PF_HICR_EV BIT(3)
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD)
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+#define IXGBE_ACI_SEND_TIMEOUT_MS \
+ (IXGBE_ACI_SEND_MAX_EXECUTE * IXGBE_ACI_SEND_DELAY_TIME_MS)
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+#define IXGBE_ACI_FLAG_DD BIT(0) /* 0x1 */
+#define IXGBE_ACI_FLAG_CMP BIT(1) /* 0x2 */
+#define IXGBE_ACI_FLAG_ERR BIT(2) /* 0x4 */
+#define IXGBE_ACI_FLAG_VFE BIT(3) /* 0x8 */
+#define IXGBE_ACI_FLAG_LB BIT(9) /* 0x200 */
+#define IXGBE_ACI_FLAG_RD BIT(10) /* 0x400 */
+#define IXGBE_ACI_FLAG_VFC BIT(11) /* 0x800 */
+#define IXGBE_ACI_FLAG_BUF BIT(12) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI BIT(13) /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI BIT(14) /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE BIT(15) /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+ IXGBE_ACI_RC_OK = 0, /* Success */
+ IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
+ IXGBE_ACI_RC_ENOENT = 2, /* No such element */
+ IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
+ IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
+ IXGBE_ACI_RC_EIO = 5, /* I/O error */
+ IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
+ IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
+ IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
+ IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
+ IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
+ IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
+ IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
+ IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
+ IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
+ IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
+ IXGBE_ACI_RC_ENOSPC = 16, /* No space left or alloc failure */
+ IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
+ IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
+ IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IXGBE_ACI_RC_EFBIG = 22, /* File too big */
+ IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
+ IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
+ IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
+ IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_get_link_topo_pin = 0x06E1,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+};
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+
+enum ixgbe_aci_res_access_type {
+ IXGBE_RES_READ = 1,
+ IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+ IXGBE_NVM_RES_ID = 1,
+ IXGBE_SPD_RES_ID,
+ IXGBE_CHANGE_LOCK_RES_ID,
+ IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+ __le16 res_id;
+ __le16 access_type;
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ __le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS 0
+#define IXGBE_ACI_RES_GLBL_IN_PROG 1
+#define IXGBE_ACI_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+ __le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
+#define IXGBE_ACI_CAPS_SRIOV 0x0012
+#define IXGBE_ACI_CAPS_VF 0x0013
+#define IXGBE_ACI_CAPS_VMDQ 0x0014
+#define IXGBE_ACI_CAPS_VSI 0x0017
+#define IXGBE_ACI_CAPS_DCB 0x0018
+#define IXGBE_ACI_CAPS_RSS 0x0040
+#define IXGBE_ACI_CAPS_RXQS 0x0041
+#define IXGBE_ACI_CAPS_TXQS 0x0042
+#define IXGBE_ACI_CAPS_MSIX 0x0043
+#define IXGBE_ACI_CAPS_FD 0x0045
+#define IXGBE_ACI_CAPS_1588 0x0046
+#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
+#define IXGBE_ACI_CAPS_NVM_VER 0x0048
+#define IXGBE_ACI_CAPS_PENDING_NVM_VER 0x0049
+#define IXGBE_ACI_CAPS_OROM_VER 0x004A
+#define IXGBE_ACI_CAPS_PENDING_OROM_VER 0x004B
+#define IXGBE_ACI_CAPS_PENDING_NET_VER 0x004D
+#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_M GENMASK(3, 1)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 29
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK GENMASK(7, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK 0xdf
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 lport_num;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK 0xef
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 lport_num;
+ u8 reserved;
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 lport_num;
+ u8 reserved;
+ __le16 cmd_flags;
+#define IXGBE_ACI_LSE_M GENMASK(1, 0)
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK GENMASK(2, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_M GENMASK(6, 3)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M GENMASK(5, 0)
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M GENMASK(10, 0)
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3;
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+} __packed;
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 lport_num;
+ u8 reserved[7];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M GENMASK(3, 0)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_CTRL 9
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_MUX 10
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M GENMASK(7, 4)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_SI5384 0x25
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ixgbe_aci_cmd_get_link_topo_pin {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 input_io_params;
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GPIO 0
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RESET_N 1
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_INT_N 2
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_PRESENT_N 3
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_DIS 4
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_MODSEL_N 5
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LPMODE 6
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_FAULT 7
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RX_LOSS 8
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS0 9
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS1 10
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_EEPROM_WP 11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RED_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GREEN_LED 13
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_BLUE_LED 14
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_GPIO 3
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_params;
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_flags;
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_POLARITY BIT(5)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_VALUE BIT(6)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
+ u8 rsvd[7];
+};
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M GENMASK(6, 0)
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M GENMASK(9, 0)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M GENMASK(1, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for
+ * struct ixgbe_aci_cmd_nvm.
+ */
+#define IXGBE_ACI_NVM_START_POINT 0
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+/**
+ * struct ixgbe_aci_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the
+ * Admin Command Interface (ACI).
+ * The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ixgbe_aci_cmd_get_ver get_ver;
+ struct ixgbe_aci_cmd_driver_ver driver_ver;
+ struct ixgbe_aci_cmd_get_exp_err exp_err;
+ struct ixgbe_aci_cmd_req_res res_owner;
+ struct ixgbe_aci_cmd_list_caps get_cap;
+ struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+ struct ixgbe_aci_cmd_get_phy_caps get_phy;
+ struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+ struct ixgbe_aci_cmd_restart_an restart_an;
+ struct ixgbe_aci_cmd_get_link_status get_link_status;
+ struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+ struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin;
+ struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+ struct ixgbe_aci_cmd_nvm nvm;
+ struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ } params;
+};
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 topo_media_conflict;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]
+ * of ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M GENMASK(3, 0)
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M GENMASK(7, 4)
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M GENMASK(11, 8)
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 ieee_1588;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool nvm_update_pending_nvm;
+ bool nvm_update_pending_orom;
+ bool nvm_update_pending_netlist;
+#define IXGBE_NVM_PENDING_NVM_IMAGE BIT(0)
+#define IXGBE_NVM_PENDING_OROM BIT(1)
+#define IXGBE_NVM_PENDING_NETLIST BIT(2)
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M GENMASK(15, 8)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+} __packed;
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ struct ixgbe_hw_caps common_cap;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct ixgbe_aci_desc desc;
+ u8 *msg_buf;
+ u16 msg_len;
+ u16 buf_len;
+};
+
+struct ixgbe_aci_info {
+ struct mutex lock; /* admin command interface lock */
+ enum ixgbe_aci_err last_status; /* last status of sent admin command */
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+} __packed;
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+} __packed;
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Ptr to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ u32 flash_size; /* Available flash size in bytes */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 81e1df83f136..1fc821fb351a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -66,7 +66,9 @@ int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
- **/
+ *
+ * Return: 0 on success or negative value on failure
+ */
int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -133,10 +135,14 @@ mac_reset_top:
hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
hw->mac.ops.init_rx_addrs(hw);
+ /* The following is not supported by E610. */
+ if (hw->mac.type == ixgbe_mac_e610)
+ return status;
+
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
- /* Add the SAN MAC address to the RAR only if it's a valid address */
+ /* Add the SAN MAC address to RAR if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index b69a680d3ab5..6ed360c5b605 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
#include "ixgbe_type.h"
@@ -17,3 +20,5 @@ int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_X540_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index d9a8cf018d3b..277ceaf8a793 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe_x540.h"
+#include "ixgbe_x550.h"
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_mbx.h"
@@ -2770,9 +2771,9 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* semaphore, -EIO when command fails or -ENIVAL when incorrect
* params passed.
**/
-static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub, u16 len,
- const char *driver_ver)
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
int ret_val;
@@ -3505,14 +3506,14 @@ mac_reset_top:
return status;
}
-/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
+/** ixgbe_set_ethertype_anti_spoofing_x550 - Enable/Disable Ethertype
* anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
**/
-static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
- bool enable, int vf)
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
@@ -3527,14 +3528,14 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
-/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning
+/** ixgbe_set_source_address_pruning_x550 - Enable/Disable src address pruning
* @hw: pointer to hardware structure
* @enable: enable or disable source address pruning
* @pool: Rx pool to set source address pruning for
**/
-static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
- bool enable,
- unsigned int pool)
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool)
{
u64 pfflp;
@@ -3831,9 +3832,9 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
.set_source_address_pruning = \
- &ixgbe_set_source_address_pruning_X550, \
+ &ixgbe_set_source_address_pruning_x550, \
.set_ethertype_anti_spoofing = \
- &ixgbe_set_ethertype_anti_spoofing_X550, \
+ &ixgbe_set_ethertype_anti_spoofing_x550, \
.disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
@@ -4047,7 +4048,7 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_a)
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
new file mode 100644
index 000000000000..3e4092f8da3e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X550_H_
+#define _IXGBE_X550_H_
+
+#include "ixgbe_type.h"
+
+extern const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT];
+
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver);
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool);
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf);
+
+#endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5f08779c0e4e..a9bc96f6399d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
@@ -16,6 +16,9 @@
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 130cb868774c..4384e892f967 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
@@ -418,6 +418,8 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
board_X550EM_x_vf_hv,
board_x550em_a_vf,
+ board_e610_vf,
+ board_e610_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -434,12 +436,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
+extern const struct ixgbevf_info ixgbevf_e610_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
-extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 149911e3002a..6442f115a262 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2018 Intel Corporation.";
+ "Copyright (c) 2009 - 2024 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
[board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
+ [board_e610_vf] = &ixgbevf_e610_vf_info,
+ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
+ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
+ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
/* required last entry */
{0, }
};
@@ -732,10 +737,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* verify that the packet does not have any known errors */
if (unlikely(ixgbevf_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
@@ -1044,9 +1045,9 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
return IXGBEVF_XDP_TX;
}
-static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int result = IXGBEVF_XDP_PASS;
struct ixgbevf_ring *xdp_ring;
@@ -1080,7 +1081,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
@@ -1122,6 +1123,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -1165,11 +1167,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
+ if (xdp_res) {
+ if (xdp_res == IXGBEVF_XDP_TX) {
xdp_xmit = true;
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
size);
@@ -1189,7 +1191,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -1203,7 +1205,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -4693,6 +4695,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
+ case ixgbe_mac_e610_vf:
+ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
+ break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index a55dd978f7ca..24d0237e7a99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -505,15 +505,3 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
};
-
-/* Mailbox operations when running on Hyper-V.
- * On Hyper-V, PF/VF communication is not through the
- * hardware mailbox; this communication is through
- * a software mediated path.
- * Most mail box operations are noop while running on
- * Hyper-V.
- */
-const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
- .init_params = ixgbevf_init_mbx_params_vf,
- .check_for_rst = ixgbevf_check_for_rst_vf,
-};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 1641d00d8ed3..da7a72ecce7a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
@@ -1076,3 +1076,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
.mac = ixgbe_mac_x550em_a_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_e610_vf_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index b4eef5b6c172..2d791bc26ae4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
@@ -54,6 +54,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_x550em_a_vf,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 660dff5426e7..83ce3bfefa5c 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -90,7 +90,6 @@ struct ltq_etop_priv {
struct net_device *netdev;
struct platform_device *pdev;
struct ltq_eth_data *pldata;
- struct resource *res;
struct mii_bus *mii_bus;
@@ -643,31 +642,14 @@ ltq_etop_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct ltq_etop_priv *priv;
- struct resource *res;
int err;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get etop resource\n");
- err = -ENOENT;
- goto err_out;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request etop resource\n");
- err = -EBUSY;
- goto err_out;
- }
-
- ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_etop_membase) {
+ ltq_etop_membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ltq_etop_membase)) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
- err = -ENOMEM;
+ err = PTR_ERR(ltq_etop_membase);
goto err_out;
}
@@ -679,7 +661,6 @@ ltq_etop_probe(struct platform_device *pdev)
dev->netdev_ops = &ltq_eth_netdev_ops;
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
- priv->res = res;
priv->pdev = pdev;
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 1fb285fa0bdb..82f4333fb426 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -284,8 +284,12 @@
MVNETA_TXQ_BUCKET_REFILL_PERIOD))
#define MVNETA_LPI_CTRL_0 0x2cc0
+#define MVNETA_LPI_CTRL_0_TS (0xff << 8)
#define MVNETA_LPI_CTRL_1 0x2cc4
-#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_FORCE BIT(1)
+#define MVNETA_LPI_CTRL_1_MANUAL_MODE BIT(2)
+#define MVNETA_LPI_CTRL_1_TW (0xfff << 4)
#define MVNETA_LPI_CTRL_2 0x2cc8
#define MVNETA_LPI_STATUS 0x2ccc
@@ -541,10 +545,6 @@ struct mvneta_port {
struct mvneta_bm_pool *pool_short;
int bm_win_id;
- bool eee_enabled;
- bool eee_active;
- bool tx_lpi_enabled;
-
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -3960,23 +3960,30 @@ static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvneta_port, phylink_pcs);
}
-static int mvneta_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
- * When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* QSGMII, SGMII and RGMII can be configured to use inband
+ * signalling of the AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_QSGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
-static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
@@ -4071,7 +4078,7 @@ static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
- .pcs_validate = mvneta_pcs_validate,
+ .pcs_inband_caps = mvneta_pcs_inband_caps,
.pcs_get_state = mvneta_pcs_get_state,
.pcs_config = mvneta_pcs_config,
.pcs_an_restart = mvneta_pcs_an_restart,
@@ -4206,18 +4213,6 @@ static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
return 0;
}
-static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
-{
- u32 lpi_ctl1;
-
- lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
- if (enable)
- lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
- else
- lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
- mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
-}
-
static void mvneta_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -4233,9 +4228,6 @@ static void mvneta_mac_link_down(struct phylink_config *config,
val |= MVNETA_GMAC_FORCE_LINK_DOWN;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
-
- pp->eee_active = false;
- mvneta_set_eee(pp, false);
}
static void mvneta_mac_link_up(struct phylink_config *config,
@@ -4284,11 +4276,56 @@ static void mvneta_mac_link_up(struct phylink_config *config,
}
mvneta_port_up(pp);
+}
+
+static void mvneta_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 lpi1;
+
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 &= ~(MVNETA_LPI_CTRL_1_REQUEST_ENABLE |
+ MVNETA_LPI_CTRL_1_REQUEST_FORCE |
+ MVNETA_LPI_CTRL_1_MANUAL_MODE);
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+}
+
+static int mvneta_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 ts, tw, lpi0, lpi1, status;
- if (phy && pp->eee_enabled) {
- pp->eee_active = phy_init_eee(phy, false) >= 0;
- mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+ status = mvreg_read(pp, MVNETA_GMAC_STATUS);
+ if (status & MVNETA_GMAC_SPEED_1000) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
}
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+ lpi0 = u32_replace_bits(lpi0, ts, MVNETA_LPI_CTRL_0_TS);
+ mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0);
+
+ /* Configure tw and enable LPI generation */
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 = u32_replace_bits(lpi1, tw, MVNETA_LPI_CTRL_1_TW);
+ lpi1 |= MVNETA_LPI_CTRL_1_REQUEST_ENABLE;
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+
+ return 0;
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
@@ -4298,6 +4335,8 @@ static const struct phylink_mac_ops mvneta_phylink_ops = {
.mac_finish = mvneta_mac_finish,
.mac_link_down = mvneta_mac_link_down,
.mac_link_up = mvneta_mac_link_up,
+ .mac_disable_tx_lpi = mvneta_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mvneta_mac_enable_tx_lpi,
};
static int mvneta_mdio_probe(struct mvneta_port *pp)
@@ -4385,6 +4424,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
if (pp->neta_armada3700)
return 0;
+ netdev_lock(port->napi.dev);
spin_lock(&pp->lock);
/*
* Configuring the driver for a new CPU while the driver is
@@ -4411,7 +4451,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
/* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- napi_enable(&port->napi);
+ napi_enable_locked(&port->napi);
/*
* Enable per-CPU interrupts on the CPU that is
@@ -4432,6 +4472,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
+
return 0;
}
@@ -5099,14 +5141,6 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
-
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
-
- eee->eee_enabled = pp->eee_enabled;
- eee->eee_active = pp->eee_active;
- eee->tx_lpi_enabled = pp->tx_lpi_enabled;
- eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
return phylink_ethtool_get_eee(pp->phylink, eee);
}
@@ -5115,7 +5149,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
/* The Armada 37x documents do not give limits for this other than
* it being an 8-bit register.
@@ -5123,16 +5156,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
return -EINVAL;
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
- lpi_ctl0 &= ~(0xff << 8);
- lpi_ctl0 |= eee->tx_lpi_timer << 8;
- mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
-
- pp->eee_enabled = eee->eee_enabled;
- pp->tx_lpi_enabled = eee->tx_lpi_enabled;
-
- mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
-
return phylink_ethtool_set_eee(pp->phylink, eee);
}
@@ -5446,6 +5469,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
!phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
+ /* Ensure LPI is disabled */
+ mvneta_mac_disable_tx_lpi(&pp->phylink_config);
+
return 0;
}
@@ -5537,6 +5563,13 @@ static int mvneta_probe(struct platform_device *pdev)
pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
MAC_100 | MAC_1000FD | MAC_2500FD;
+ /* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */
+ __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces);
+ pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ pp->phylink_config.lpi_timer_default = 250;
+ pp->phylink_config.eee_enabled_default = true;
+
phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
pp->phylink_config.supported_interfaces);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 9e02e4367bec..44fe9b68d1c2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -481,6 +481,11 @@
#define MVPP22_GMAC_INT_SUM_MASK 0xa4
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
+#define MVPP2_GMAC_LPI_CTRL0 0xc0
+#define MVPP2_GMAC_LPI_CTRL0_TS_MASK GENMASK(15, 8)
+#define MVPP2_GMAC_LPI_CTRL1 0xc4
+#define MVPP2_GMAC_LPI_CTRL1_REQ_EN BIT(0)
+#define MVPP2_GMAC_LPI_CTRL1_TW_MASK GENMASK(15, 4)
/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
* relative to port->base.
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 571631a30320..dd76c1b7ed3a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5757,6 +5757,28 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
}
+static int mvpp2_ethtool_get_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_get_eee(port->phylink, eee);
+}
+
+static int mvpp2_ethtool_set_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_set_eee(port->phylink, eee);
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5802,6 +5824,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.create_rxfh_context = mvpp2_create_rxfh_context,
.modify_rxfh_context = mvpp2_modify_rxfh_context,
.remove_rxfh_context = mvpp2_remove_rxfh_context,
+ .get_eee = mvpp2_ethtool_get_eee,
+ .set_eee = mvpp2_ethtool_set_eee,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -6188,6 +6212,7 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
}
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
@@ -6224,22 +6249,30 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
.pcs_config = mvpp2_xlg_pcs_config,
};
-static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* SGMII and RGMII can be configured to use inband signalling of the
+ * AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
@@ -6343,7 +6376,7 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
- .pcs_validate = mvpp2_gmac_pcs_validate,
+ .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps,
.pcs_get_state = mvpp2_gmac_pcs_get_state,
.pcs_config = mvpp2_gmac_pcs_config,
.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
@@ -6665,6 +6698,55 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
mvpp2_port_disable(port);
}
+static void mvpp2_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL1,
+ MVPP2_GMAC_LPI_CTRL1_REQ_EN, 0);
+}
+
+static int mvpp2_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ u32 ts, tw, lpi1, status;
+
+ status = readl(port->base + MVPP2_GMAC_STATUS0);
+ if (status & MVPP2_GMAC_STATUS0_GMII_SPEED) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
+ }
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL0,
+ MVPP2_GMAC_LPI_CTRL0_TS_MASK,
+ FIELD_PREP(MVPP2_GMAC_LPI_CTRL0_TS_MASK, ts));
+
+ lpi1 = readl(port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ /* Configure tw */
+ lpi1 = u32_replace_bits(lpi1, tw, MVPP2_GMAC_LPI_CTRL1_TW_MASK);
+
+ /* Enable LPI generation */
+ writel(lpi1 | MVPP2_GMAC_LPI_CTRL1_REQ_EN,
+ port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ return 0;
+}
+
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_select_pcs = mvpp2_select_pcs,
.mac_prepare = mvpp2_mac_prepare,
@@ -6672,6 +6754,8 @@ static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_finish = mvpp2_mac_finish,
.mac_link_up = mvpp2_mac_link_up,
.mac_link_down = mvpp2_mac_link_down,
+ .mac_enable_tx_lpi = mvpp2_mac_enable_tx_lpi,
+ .mac_disable_tx_lpi = mvpp2_mac_disable_tx_lpi,
};
/* Work-around for ACPI */
@@ -6950,6 +7034,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.mac_capabilities =
MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.lpi_interfaces);
+
+ port->phylink_config.lpi_capabilities = MAC_1000FD | MAC_100FD;
+
+ /* Setup EEE. Choose 250us idle. */
+ port->phylink_config.lpi_timer_default = 250;
+ port->phylink_config.eee_enabled_default = true;
+
if (port->priv->global_tx_fc)
port->phylink_config.mac_capabilities |=
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
@@ -7024,6 +7117,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_port_pcpu;
}
port->phylink = phylink;
+
+ mvpp2_mac_disable_tx_lpi(&port->phylink_config);
} else {
dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
port->phylink = NULL;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
index 4f4d58189118..a88c006ea65b 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -150,17 +150,14 @@ octep_get_ethtool_stats(struct net_device *netdev,
iface_rx_stats,
iface_tx_stats);
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- tx_busy_errors += iq->stats.tx_busy;
-
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
- rx_alloc_errors += oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
+
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_packets;
@@ -198,22 +195,18 @@ octep_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->err_pkts;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 549436efc204..0a679e95196f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -822,7 +822,7 @@ static inline int octep_iq_full_check(struct octep_iq *iq)
if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
return 0;
}
@@ -960,7 +960,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
wmb();
/* Ring Doorbell to notify the NIC of new packets */
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
@@ -991,37 +991,24 @@ dma_map_err:
static void octep_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
struct octep_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
int q;
- if (netif_running(netdev))
- octep_ctrl_net_get_if_stats(oct,
- OCTEP_CTRL_NET_INVALID_VFID,
- &oct->iface_rx_stats,
- &oct->iface_tx_stats);
-
tx_packets = 0;
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->collisions = oct->iface_tx_stats.xscol;
- stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
}
/**
@@ -1137,6 +1124,43 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
return err;
}
+static int octep_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
+ ivi->spoofchk = true;
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ ivi->trusted = false;
+
+ return 0;
+}
+
+static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
+ return -EADDRNOTAVAIL;
+ }
+
+ dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac);
+ ether_addr_copy(oct->vf_info[vf].mac_addr, mac);
+ oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF;
+
+ err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true);
+ if (err)
+ dev_err(&oct->pdev->dev,
+ "Set VF%d MAC address failed via host control Mbox\n",
+ vf);
+
+ return err;
+}
+
static const struct net_device_ops octep_netdev_ops = {
.ndo_open = octep_open,
.ndo_stop = octep_stop,
@@ -1146,6 +1170,8 @@ static const struct net_device_ops octep_netdev_ops = {
.ndo_set_mac_address = octep_set_mac,
.ndo_change_mtu = octep_change_mtu,
.ndo_set_features = octep_set_features,
+ .ndo_get_vf_config = octep_get_vf_config,
+ .ndo_set_vf_mac = octep_set_vf_mac
};
/**
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index fee59e0e0138..81ac4267811c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -220,6 +220,7 @@ struct octep_iface_link_info {
/* The Octeon VF device specific info data structure.*/
struct octep_pfvf_info {
u8 mac_addr[ETH_ALEN];
+ u32 flags;
u32 mbox_version;
};
@@ -257,11 +258,17 @@ struct octep_device {
/* Pointers to Octeon Tx queues */
struct octep_iq *iq[OCTEP_MAX_IQ];
+ /* Per iq stats */
+ struct octep_iq_stats stats_iq[OCTEP_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_oq *oq[OCTEP_MAX_OQ];
+ /* Per oq stats */
+ struct octep_oq_stats stats_oq[OCTEP_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index e6eb98d70f3c..ebecdd29f3bd 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -156,12 +156,23 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_err(&oct->pdev->dev,
+ "VF%d attempted to override administrative set MAC address\n",
+ vf_id);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+
err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
+
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -171,10 +182,18 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id);
+ ether_addr_copy(rsp->s_set_mac.mac_addr,
+ oct->vf_info[vf_id].mac_addr);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ return;
+ }
err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
index 0dc6eead292a..386a095a99bc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
@@ -8,8 +8,6 @@
#ifndef _OCTEP_PFVF_MBOX_H_
#define _OCTEP_PFVF_MBOX_H_
-/* VF flags */
-#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */
#define OCTEON_SDP_16K_HW_FRS 16380UL
#define OCTEON_SDP_64K_HW_FRS 65531UL
@@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version {
#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+/* VF flags */
+/* PF has set VF MAC address */
+#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0)
+
enum octep_pfvf_mbox_opcode {
OCTEP_PFVF_MBOX_CMD_VERSION,
OCTEP_PFVF_MBOX_CMD_SET_MTU,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 8af75cb37c3e..82b6b19e76b4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -87,7 +87,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -443,7 +444,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
if (!skb) {
octep_oq_drop_rx(oq, buff_info,
&read_idx, &desc_used);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
continue;
}
skb_reserve(skb, data_offset);
@@ -494,8 +495,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 3b08e2d560dc..b4696c93d0e6 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -186,8 +186,8 @@ struct octep_oq {
*/
u8 __iomem *pkts_sent_reg;
- /* Statistics for this OQ. */
- struct octep_oq_stats stats;
+ /* Pointer to statistics for this OQ. */
+ struct octep_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index 06851b78aa28..08ee90013fef 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -81,9 +81,9 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
@@ -187,6 +187,7 @@ static int octep_setup_iq(struct octep_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 875a2c34091f..58fb39dda977 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -170,8 +170,8 @@ struct octep_iq {
*/
u16 flush_index;
- /* Statistics for this input queue. */
- struct octep_iq_stats stats;
+ /* Pointer to statistics for this input queue. */
+ struct octep_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
index 7b21439a315f..d60441928ba9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -114,12 +114,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
iface_tx_stats = &oct->iface_tx_stats;
iface_rx_stats = &oct->iface_rx_stats;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_busy_errors += iq->stats.tx_busy;
- rx_alloc_errors += oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_alloc_errors;
@@ -134,22 +131,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->dropped_octets_fifo_full;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index 7e6771c9cdbb..18c922dd5fc6 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -574,7 +574,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
* caused queues to get re-enabled after
* being stopped
*/
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
fallthrough;
case 1: /* Queue left enabled, since IQ is not yet full*/
return 0;
@@ -731,7 +731,7 @@ ring_dbell:
/* Flush the hw descriptors before writing to doorbell */
smp_wmb();
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
}
@@ -786,27 +786,16 @@ static void octep_vf_get_stats64(struct net_device *netdev,
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- if (!octep_vf_get_if_stats(oct)) {
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
- oct->iface_rx_stats.err_pkts;
- stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
- stats->tx_dropped = oct->iface_tx_stats.dropped;
- }
}
/**
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
index 5769f62545cd..1a352f41f823 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -246,11 +246,17 @@ struct octep_vf_device {
/* Pointers to Octeon Tx queues */
struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+ /* Per iq stats */
+ struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+ /* Per oq stats */
+ struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
index 82821bc28634..d70c8be3cfc4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
index fe46838b5200..9e296b7d7e34 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -187,7 +187,7 @@ struct octep_vf_oq {
u8 __iomem *pkts_sent_reg;
/* Statistics for this OQ. */
- struct octep_vf_oq_stats stats;
+ struct octep_vf_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
index 47a5c054fdb6..8180e5ce3d7e 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
@@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
index f338b975103c..1cede90e3a5f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -129,7 +129,7 @@ struct octep_vf_iq {
u16 flush_index;
/* Statistics for this input queue. */
- struct octep_vf_iq_stats stats;
+ struct octep_vf_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_vf_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 62c07407eb94..005ca8a056c0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -313,6 +313,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
+M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
+ msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a5d1e2bddd58..613655fcd34f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -569,9 +569,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
mutex_unlock(&rvu->rsrc_lock);
}
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct msg_rsp *rsp)
+static u16 nix_get_channel(u16 chan, bool cpt_link)
+{
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+ return cpt_link ? chan | BIT(11) : chan;
+}
+
+static int nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp, bool cpt_link)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, pf, type, err;
@@ -579,6 +587,7 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
struct nix_bp *bp;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -589,6 +598,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
@@ -597,8 +609,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ chan_v = nix_get_channel(chan, cpt_link);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg & ~BIT_ULL(16));
if (type == NIX_INTF_TYPE_LBK) {
@@ -617,6 +630,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, true);
+}
+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
@@ -696,15 +723,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
return bpid;
}
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp,
+ bool cpt_link)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -717,6 +746,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
type != NIX_INTF_TYPE_SDP)
return 0;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
@@ -730,9 +762,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return -EINVAL;
}
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ chan_v = nix_get_channel(chan, cpt_link);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
cfg &= ~GENMASK_ULL(8, 0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
@@ -750,6 +784,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, true);
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index dbc971266865..cb6513ab35e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -15,5 +15,6 @@ rvu_rep-y := rep.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
new file mode 100644
index 000000000000..09a5b5268205
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <net/xfrm.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <crypto/aead.h>
+#include <crypto/gcm.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "cn10k_ipsec.h"
+
+static bool is_dev_support_ipsec_offload(struct pci_dev *pdev)
+{
+ return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
+}
+
+static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf)
+{
+ enum cn10k_cpt_hw_state_e state;
+
+ while (true) {
+ state = atomic_cmpxchg(&pf->ipsec.cpt_state,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE);
+ if (state == CN10K_CPT_HW_AVAILABLE)
+ return true;
+ if (state == CN10K_CPT_HW_UNAVAILABLE)
+ return false;
+
+ mdelay(1);
+ }
+}
+
+static void cn10k_cpt_device_set_available(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE);
+}
+
+static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE);
+}
+
+static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
+{
+ struct rsrc_attach *attach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
+ if (!attach)
+ goto unlock;
+
+ attach->cptlfs = true;
+ attach->modify = true;
+
+ /* Send attach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
+{
+ struct rsrc_detach *detach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
+ if (!detach)
+ goto unlock;
+
+ detach->partial = true;
+ detach->cptlfs = true;
+
+ /* Send detach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
+{
+ struct cpt_lf_alloc_req_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ /* PF function */
+ req->nix_pf_func = pf->pcifunc;
+ /* Enable SE-IE Engine Group */
+ req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
+
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
+{
+ mutex_lock(&pf->mbox.lock);
+ otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
+static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ req->dir = CPT_INLINE_OUTBOUND;
+ req->enable = 1;
+ req->nix_pf_func = pf->pcifunc;
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
+{
+ u64 reg_val;
+
+ /* Set Execution Enable of instruction queue */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ reg_val |= BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Set iqueue's enqueuing */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
+ reg_val |= BIT_ULL(0);
+ otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
+}
+
+static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
+{
+ u32 inflight, grb_cnt, gwb_cnt;
+ u32 nq_ptr, dq_ptr;
+ int timeout = 20;
+ u64 reg_val;
+ int cnt;
+
+ /* Disable instructions enqueuing */
+ otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
+
+ /* Wait for instruction queue to become empty.
+ * CPT_LF_INPROG.INFLIGHT count is zero
+ */
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ if (!inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
+ break;
+ }
+ } while (1);
+
+ /* Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ reg_val &= ~BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Wait for instruction queue to become empty */
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ if (reg_val & BIT_ULL(31))
+ cnt = 0;
+ else
+ cnt++;
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
+ nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ } while ((cnt < 10) && (nq_ptr != dq_ptr));
+
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
+ gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
+ if (inflight == 0 && gwb_cnt < 40 &&
+ (grb_cnt == 0 || grb_cnt == 40))
+ cnt++;
+ else
+ cnt = 0;
+ } while (cnt < 10);
+}
+
+/* Allocate memory for CPT outbound Instruction queue.
+ * Instruction queue memory format is:
+ * -----------------------------
+ * | Instruction Group memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | x 16 Bytes) |
+ * | |
+ * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
+ * | Flow Control (128 Bytes) |
+ * | |
+ * -----------------------------
+ * | Instruction Memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | × 40 × 64 bytes) |
+ * | |
+ * -----------------------------
+ */
+static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
+ CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
+
+ iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr)
+ return -ENOMEM;
+
+ /* iq->vaddr/dma_addr points to Flow Control location */
+ iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
+ return 0;
+}
+
+static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ if (iq->real_vaddr)
+ dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
+ iq->real_dma_addr);
+
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+}
+
+static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
+{
+ u64 reg_val;
+ int ret;
+
+ /* Allocate Memory for CPT IQ */
+ ret = cn10k_outb_cptlf_iq_alloc(pf);
+ if (ret)
+ return ret;
+
+ /* Disable IQ */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
+
+ /* Set IQ size */
+ reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
+ CN10K_CPT_EXTRA_SIZE_DIV40);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
+
+ return 0;
+}
+
+static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
+{
+ int ret;
+
+ /* Initialize CPTLF Instruction Queue (IQ) */
+ ret = cn10k_outb_cptlf_iq_init(pf);
+ if (ret)
+ return ret;
+
+ /* Configure CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_config(pf);
+ if (ret)
+ goto iq_clean;
+
+ /* Enable CPTLF IQ */
+ cn10k_outb_cptlf_iq_enable(pf);
+ return 0;
+iq_clean:
+ cn10k_outb_cptlf_iq_free(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret;
+
+ /* Attach a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_attach(pf);
+ if (ret)
+ return ret;
+
+ /* Allocate a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_alloc(pf);
+ if (ret)
+ goto detach;
+
+ /* Initialize the CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_init(pf);
+ if (ret)
+ goto lf_free;
+
+ pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
+ CN10K_CPT_LF_NQX(0));
+
+ /* Set ipsec offload enabled for this device */
+ pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ cn10k_cpt_device_set_available(pf);
+ return 0;
+
+lf_free:
+ cn10k_outb_cptlf_free(pf);
+detach:
+ cn10k_outb_cptlf_detach(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
+{
+ int ret;
+
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ netdev_err(pf->netdev, "CPT LF device unavailable\n");
+ return -ENODEV;
+ }
+
+ /* Set ipsec offload disabled for this device */
+ pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ /* Disable CPTLF Instruction Queue (IQ) */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address and size to 0 */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
+
+ /* Free CPTLF IQ */
+ cn10k_outb_cptlf_iq_free(pf);
+
+ /* Free and detach CPT LF */
+ cn10k_outb_cptlf_free(pf);
+ ret = cn10k_outb_cptlf_detach(pf);
+ if (ret)
+ netdev_err(pf->netdev, "Failed to detach CPT LF\n");
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return ret;
+}
+
+static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst,
+ u64 size)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, inst, size);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf,
+ struct cpt_res_s *res)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u64 *completion_ptr = (u64 *)res;
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ netdev_err(pf->netdev, "CPT response timeout\n");
+ return -EBUSY;
+ }
+ } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) ==
+ CN10K_CPT_COMP_E_NOTDONE);
+
+ if (!(res->compcode == CN10K_CPT_COMP_E_GOOD ||
+ res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) {
+ netdev_err(pf->netdev, "compcode=%x doneint=%x\n",
+ res->compcode, res->doneint);
+ netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n",
+ res->uc_compcode, (u64)res->uc_info, res->esn);
+ }
+ return 0;
+}
+
+static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
+{
+ dma_addr_t res_iova, dptr_iova, sa_iova;
+ struct cn10k_tx_sa_s *sa_dptr;
+ struct cpt_inst_s inst = {};
+ struct cpt_res_s *res;
+ u32 sa_size, off;
+ u64 *sptr, *dptr;
+ u64 reg_val;
+ int ret;
+
+ sa_iova = sa_info->iova;
+ if (!sa_iova)
+ return -EINVAL;
+
+ res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s),
+ &res_iova, GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ sa_size = sizeof(struct cn10k_tx_sa_s);
+ sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC);
+ if (!sa_dptr) {
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res,
+ res_iova);
+ return -ENOMEM;
+ }
+
+ sptr = (__force u64 *)sa_info->base;
+ dptr = (__force u64 *)sa_dptr;
+ for (off = 0; off < (sa_size / 8); off++)
+ *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off));
+
+ res->compcode = CN10K_CPT_COMP_E_NOTDONE;
+ inst.res_addr = res_iova;
+ inst.dptr = (u64)dptr_iova;
+ inst.param2 = sa_size >> 3;
+ inst.dlen = sa_size;
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA;
+ inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA;
+ inst.cptr = sa_iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* Check if CPT-LF available */
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ dma_wmb();
+ ret = cn10k_wait_for_cpt_respose(pf, res);
+ if (ret)
+ goto set_available;
+
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7);
+ otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
+
+set_available:
+ cn10k_cpt_device_set_available(pf);
+free_mem:
+ dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova);
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova);
+ return ret;
+}
+
+static int cn10k_ipsec_get_hw_ctx_offset(void)
+{
+ /* Offset on Hardware-context offset in word */
+ return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F;
+}
+
+static int cn10k_ipsec_get_ctx_push_size(void)
+{
+ /* Context push size is round up and in multiple of 8 Byte */
+ return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F;
+}
+
+static int cn10k_ipsec_get_aes_key_len(int key_len)
+{
+ /* key_len is aes key length in bytes */
+ switch (key_len) {
+ case 16:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_128;
+ case 24:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_192;
+ default:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_256;
+ }
+}
+
+static void cn10k_outb_prepare_sa(struct xfrm_state *x,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int key_len = (x->aead->alg_key_len + 7) / 8;
+ struct net_device *netdev = x->xso.dev;
+ u8 *key = x->aead->alg_key;
+ struct otx2_nic *pf;
+ u32 *tmp_salt;
+ u64 *tmp_key;
+ int idx;
+
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+
+ /* context size, 128 Byte aligned up */
+ pf = netdev_priv(netdev);
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset();
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+
+ /* Ucode to skip two words of CPT_CTX_HW_S */
+ sa_entry->ctx_hdr_size = 1;
+
+ /* Allow Atomic operation (AOP) */
+ sa_entry->aop_valid = 1;
+
+ /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */
+ sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB;
+ sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP;
+ sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM;
+ sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET;
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL;
+ else
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT;
+
+ /* Last 4 bytes are salt */
+ key_len -= 4;
+ sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len);
+ memcpy(sa_entry->cipher_key, key, key_len);
+ tmp_key = (u64 *)sa_entry->cipher_key;
+
+ for (idx = 0; idx < key_len / 8; idx++)
+ tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]);
+
+ memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4);
+ tmp_salt = (u32 *)&sa_entry->iv_gcm_salt;
+ *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt);
+
+ /* Write SA context data to memory before enabling */
+ wmb();
+
+ /* Enable SA */
+ sa_entry->sa_valid = 1;
+}
+
+static int cn10k_ipsec_validate_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload authenticated xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only AES-GCM-ICV16 xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload compressed xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET && x->props.family != AF_INET6) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only IPv4/v6 xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload other than crypto-mode");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only tunnel/transport xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only ESP xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulated xfrm state may not be offloaded");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without aead");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD ICV length other than 128bit");
+ return -EINVAL;
+ }
+ if (x->aead->alg_key_len != 128 + 32 &&
+ x->aead->alg_key_len != 192 + 32 &&
+ x->aead->alg_key_len != 256 + 32) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD key length other than 128/192/256bit");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with tfc padding");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without geniv");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with geniv other than seqiv");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported");
+ return -EOPNOTSUPP;
+}
+
+static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ err = cn10k_ipsec_validate_state(x, extack);
+ if (err)
+ return err;
+
+ pf = netdev_priv(netdev);
+
+ err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
+ if (err)
+ return err;
+
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ cn10k_outb_prepare_sa(x, sa_entry);
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA");
+ qmem_free(pf->dev, sa_info);
+ return err;
+ }
+
+ x->xso.offload_handle = (unsigned long)sa_info;
+ /* Enable static branch when first SA setup */
+ if (!pf->ipsec.outb_sa_count)
+ static_branch_enable(&cn10k_ipsec_sa_enabled);
+ pf->ipsec.outb_sa_count++;
+ return 0;
+}
+
+static int cn10k_ipsec_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return cn10k_ipsec_inb_add_state(x, extack);
+ else
+ return cn10k_ipsec_outb_add_state(x, extack);
+}
+
+static void cn10k_ipsec_del_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return;
+
+ pf = netdev_priv(netdev);
+
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+ /* Disable SA in CPT h/w */
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->aop_valid = 1;
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err)
+ netdev_err(netdev, "Error (%d) deleting SA\n", err);
+
+ x->xso.offload_handle = 0;
+ qmem_free(pf->dev, sa_info);
+
+ /* If no more SA's then update netdev feature for potential change
+ * in NETIF_F_HW_ESP.
+ */
+ if (!--pf->ipsec.outb_sa_count)
+ queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
+}
+
+static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+ return true;
+}
+
+static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = cn10k_ipsec_add_state,
+ .xdo_dev_state_delete = cn10k_ipsec_del_state,
+ .xdo_dev_offload_ok = cn10k_ipsec_offload_ok,
+};
+
+static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
+{
+ struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
+ sa_work);
+ struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec);
+
+ /* Disable static branch when no more SA enabled */
+ static_branch_disable(&cn10k_ipsec_sa_enabled);
+ rtnl_lock();
+ netdev_update_features(pf->netdev);
+ rtnl_unlock();
+}
+
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ /* IPsec offload supported on cn10k */
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return -EOPNOTSUPP;
+
+ /* Initialize CPT for outbound ipsec offload */
+ if (enable)
+ return cn10k_outb_cpt_init(netdev);
+
+ /* Don't do CPT cleanup if SA installed */
+ if (pf->ipsec.outb_sa_count) {
+ netdev_err(pf->netdev, "SA installed on this device\n");
+ return -EBUSY;
+ }
+
+ return cn10k_outb_cpt_clean(pf);
+}
+
+int cn10k_ipsec_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u32 sa_size;
+
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return 0;
+
+ /* Each SA entry size is 128 Byte round up in size */
+ sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ?
+ (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) *
+ OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s);
+ pf->ipsec.sa_size = sa_size;
+
+ INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
+ pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0);
+ if (!pf->ipsec.sa_workq) {
+ netdev_err(pf->netdev, "SA alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ /* Set xfrm device ops */
+ netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
+ netdev->hw_features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_ipsec_init);
+
+void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return;
+
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ return;
+
+ if (pf->ipsec.sa_workq) {
+ destroy_workqueue(pf->ipsec.sa_workq);
+ pf->ipsec.sa_workq = NULL;
+ }
+
+ cn10k_outb_cpt_clean(pf);
+}
+EXPORT_SYMBOL(cn10k_ipsec_clean);
+
+static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 *src;
+
+ src = (u8 *)skb->data + ETH_HLEN;
+
+ if (x->props.family == AF_INET) {
+ iph = (struct iphdr *)src;
+ return ntohs(iph->tot_len);
+ }
+
+ ipv6h = (struct ipv6hdr *)src;
+ return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr);
+}
+
+/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure.
+ * SG of NIX and CPT are same in size.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct cpt_sg_s *cpt_sg = NULL;
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u64 *cpt_iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+ cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size);
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ cpt_sg += (seg / MAX_SEGS_PER_SG) * 4;
+ cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[seg % MAX_SEGS_PER_SG] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+ *cpt_iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+
+ *cpt_sg = *(struct cpt_sg_s *)sg;
+ cpt_sg->rsvd_63_50 = 0;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+static u16 cn10k_ipsec_get_param1(u8 iv_offset)
+{
+ u16 param1_val;
+
+ /* Set Crypto mode, disable L3/L4 checksum */
+ param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM |
+ CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM;
+ param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT;
+ return param1_val;
+}
+
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ struct cpt_inst_s inst;
+ struct cpt_res_s *res;
+ struct xfrm_state *x;
+ struct qmem *sa_info;
+ dma_addr_t dptr_iova;
+ struct sec_path *sp;
+ u8 encap_offset;
+ u8 auth_offset;
+ u8 gthr_size;
+ u8 iv_offset;
+ u16 dlen;
+
+ /* Check for IPSEC offload enabled */
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ goto drop;
+
+ sp = skb_sec_path(skb);
+ if (unlikely(!sp->len))
+ goto drop;
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x))
+ goto drop;
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL)
+ goto drop;
+
+ dlen = cn10k_ipsec_get_ip_data_len(x, skb);
+ if (dlen == 0 && netif_msg_tx_err(pf)) {
+ netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
+ goto drop;
+ }
+
+ /* Check for valid SA context */
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ if (!sa_info)
+ goto drop;
+
+ memset(&inst, 0, sizeof(struct cpt_inst_s));
+
+ /* Get authentication offset */
+ if (x->props.family == AF_INET)
+ auth_offset = sizeof(struct iphdr);
+ else
+ auth_offset = sizeof(struct ipv6hdr);
+
+ /* IV offset is after ESP header */
+ iv_offset = auth_offset + sizeof(struct ip_esp_hdr);
+ /* Encap will start after IV */
+ encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
+
+ /* CPT Instruction word-1 */
+ res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head));
+ res->compcode = 0;
+ inst.res_addr = sq->cpt_resp->iova + (64 * sq->head);
+
+ /* CPT Instruction word-2 */
+ inst.rvu_pf_func = pf->pcifunc;
+
+ /* CPT Instruction word-3:
+ * Set QORD to force CPT_RES_S write completion
+ */
+ inst.qord = 1;
+
+ /* CPT Instruction word-4 */
+ /* inst.dlen should not include ICV length */
+ inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8);
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC;
+ inst.param1 = cn10k_ipsec_get_param1(iv_offset);
+
+ inst.param2 = encap_offset <<
+ CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT;
+ inst.param2 |= (u16)auth_offset <<
+ CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT;
+
+ /* CPT Instruction word-5 */
+ gthr_size = num_segs / MAX_SEGS_PER_SG;
+ gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size;
+
+ gthr_size &= 0xF;
+ dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2)));
+ inst.dptr = dptr_iova | ((u64)gthr_size << 60);
+
+ /* CPT Instruction word-6 */
+ inst.rptr = inst.dptr;
+
+ /* CPT Instruction word-7 */
+ inst.cptr = sa_info->iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* CPT Instruction word-0 */
+ inst.nixtxl = (size / 16) - 1;
+ inst.dat_offset = ETH_HLEN;
+ inst.nixtx_offset = sq->sqe_size;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Finally Flush the CPT instruction */
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ return true;
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
new file mode 100644
index 000000000000..9965df0faa3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef CN10K_IPSEC_H
+#define CN10K_IPSEC_H
+
+#include <linux/types.h>
+
+DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+/* CPT instruction size in bytes */
+#define CN10K_CPT_INST_SIZE 64
+
+/* CPT instruction (CPT_INST_S) queue length */
+#define CN10K_CPT_INST_QLEN 8200
+
+/* CPT instruction queue size passed to HW is in units of
+ * 40*CPT_INST_S messages.
+ */
+#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
+
+/* CPT needs 320 free entries */
+#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
+#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
+
+/* CPT instruction queue length in bytes */
+#define CN10K_CPT_INST_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
+ CN10K_CPT_INST_QLEN_EXTRA_BYTES)
+
+/* CPT instruction group queue length in bytes */
+#define CN10K_CPT_INST_GRP_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
+
+/* CPT FC length in bytes */
+#define CN10K_CPT_Q_FC_LEN 128
+
+/* Default CPT engine group for ipsec offload */
+#define CN10K_DEF_CPT_IPSEC_EGRP 1
+
+/* CN10K CPT LF registers */
+#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
+#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
+#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
+#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
+#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
+#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
+#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
+#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
+#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
+
+/* IPSEC Instruction opcodes */
+#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
+#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
+#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL
+
+enum cn10k_cpt_comp_e {
+ CN10K_CPT_COMP_E_NOTDONE = 0x00,
+ CN10K_CPT_COMP_E_GOOD = 0x01,
+ CN10K_CPT_COMP_E_FAULT = 0x02,
+ CN10K_CPT_COMP_E_HWERR = 0x04,
+ CN10K_CPT_COMP_E_INSTERR = 0x05,
+ CN10K_CPT_COMP_E_WARN = 0x06,
+ CN10K_CPT_COMP_E_MASK = 0x3F
+};
+
+struct cn10k_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+enum cn10k_cpt_hw_state_e {
+ CN10K_CPT_HW_UNAVAILABLE,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE
+};
+
+struct cn10k_ipsec {
+ /* Outbound CPT */
+ u64 io_addr;
+ atomic_t cpt_state;
+ struct cn10k_cpt_inst_queue iq;
+
+ /* SA info */
+ u32 sa_size;
+ u32 outb_sa_count;
+ struct work_struct sa_work;
+ struct workqueue_struct *sa_workq;
+};
+
+/* CN10K IPSEC Security Association (SA) */
+/* SA direction */
+#define CN10K_IPSEC_SA_DIR_INB 0
+#define CN10K_IPSEC_SA_DIR_OUTB 1
+/* SA protocol */
+#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0
+#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1
+/* SA Encryption Type */
+#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5
+/* SA IPSEC mode Transport/Tunnel */
+#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0
+#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1
+/* SA AES Key Length */
+#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1
+#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2
+#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3
+/* IV Source */
+#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0
+#define CN10K_IPSEC_SA_IV_SRC_PACKET 3
+
+struct cn10k_tx_sa_s {
+ u64 esn_en : 1; /* W0 */
+ u64 rsvd_w0_1_8 : 8;
+ u64 hw_ctx_off : 7;
+ u64 ctx_id : 16;
+ u64 rsvd_w0_32_47 : 16;
+ u64 ctx_push_size : 7;
+ u64 rsvd_w0_55 : 1;
+ u64 ctx_hdr_size : 2;
+ u64 aop_valid : 1;
+ u64 rsvd_w0_59 : 1;
+ u64 ctx_size : 4;
+ u64 w1; /* W1 */
+ u64 sa_valid : 1; /* W2 */
+ u64 sa_dir : 1;
+ u64 rsvd_w2_2_3 : 2;
+ u64 ipsec_mode : 1;
+ u64 ipsec_protocol : 1;
+ u64 aes_key_len : 2;
+ u64 enc_type : 3;
+ u64 rsvd_w2_11_19 : 9;
+ u64 iv_src : 2;
+ u64 rsvd_w2_22_31 : 10;
+ u64 rsvd_w2_32_63 : 32;
+ u64 w3; /* W3 */
+ u8 cipher_key[32]; /* W4 - W7 */
+ u32 rsvd_w8_0_31; /* W8 : IV */
+ u32 iv_gcm_salt;
+ u64 rsvd_w9_w30[22]; /* W9 - W30 */
+ u64 hw_ctx[6]; /* W31 - W36 */
+};
+
+/* CPT instruction parameter-1 */
+#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1
+#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2
+#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20
+#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8
+
+/* CPT instruction parameter-2 */
+#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0
+#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8
+
+/* CPT Instruction Structure */
+struct cpt_inst_s {
+ u64 nixtxl : 3; /* W0 */
+ u64 doneint : 1;
+ u64 rsvd_w0_4_15 : 12;
+ u64 dat_offset : 8;
+ u64 ext_param1 : 8;
+ u64 nixtx_offset : 20;
+ u64 rsvd_w0_52_63 : 12;
+ u64 res_addr; /* W1 */
+ u64 tag : 32; /* W2 */
+ u64 tt : 2;
+ u64 grp : 10;
+ u64 rsvd_w2_44_47 : 4;
+ u64 rvu_pf_func : 16;
+ u64 qord : 1; /* W3 */
+ u64 rsvd_w3_1_2 : 2;
+ u64 wqe_ptr : 61;
+ u64 dlen : 16; /* W4 */
+ u64 param2 : 16;
+ u64 param1 : 16;
+ u64 opcode_major : 8;
+ u64 opcode_minor : 8;
+ u64 dptr; /* W5 */
+ u64 rptr; /* W6 */
+ u64 cptr : 60; /* W7 */
+ u64 ctx_val : 1;
+ u64 egrp : 3;
+};
+
+/* CPT Instruction Result Structure */
+struct cpt_res_s {
+ u64 compcode : 7; /* W0 */
+ u64 doneint : 1;
+ u64 uc_compcode : 8;
+ u64 uc_info : 48;
+ u64 esn; /* W1 */
+};
+
+/* CPT SG structure */
+struct cpt_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_63_50 : 14;
+};
+
+/* CPT LF_INPROG Register */
+#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
+#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
+#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
+
+/* CPT LF_Q_GRP_PTR Register */
+#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
+#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
+
+/* CPT LF CTX Flush Register */
+#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0)
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int cn10k_ipsec_init(struct net_device *netdev);
+void cn10k_ipsec_clean(struct otx2_nic *pf);
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset);
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size);
+#else
+static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
+{
+ return 0;
+}
+
+static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+}
+
+static inline __maybe_unused
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ return 0;
+}
+
+static inline bool __maybe_unused
+otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ return true;
+}
+
+static inline bool __maybe_unused
+cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ return true;
+}
+#endif
+#endif // CN10K_IPSEC_H
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 6cc7a78968fc..f3b9daffaec3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
return "SA";
default:
return "Unknown";
- };
-
- return "Unknown";
+ }
}
static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 523ecb798a7a..2b49bfec7869 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -10,12 +10,19 @@
#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bitfield.h>
+#include <linux/dcbnl.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
+{
+ return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en;
+}
+
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
@@ -964,6 +971,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
+ /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG.
+ * SG of NIX and CPT are same in size. Allocate memory for CPT SG
+ * same as NIX SQE for base address alignment.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+ err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt,
+ sq->sqe_size * 2);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64);
+ if (err)
+ return err;
+
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
@@ -1722,18 +1752,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
-#ifdef CONFIG_DCB
- req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
- req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
-#else
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
-#endif
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+ }
return otx2_sync_mbox_msg(&pfvf->mbox);
}
EXPORT_SYMBOL(otx2_nix_config_bp);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+EXPORT_SYMBOL(otx2_nix_cpt_config_bp);
+
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
@@ -1947,3 +2002,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* Crypto hardware need write permission for ipsec crypto offload */
+ if (unlikely(xfrm_offload(skb))) {
+ dir = DMA_BIDIRECTIONAL;
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ }
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, dir);
+}
+
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ struct sk_buff *skb = NULL;
+ int seg;
+
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(xfrm_offload(skb)))
+ dir = DMA_BIDIRECTIONAL;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], dir);
+ }
+ sg->num_segs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 566848663fea..65814e3dc93f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -30,6 +30,7 @@
#include <rvu_trace.h>
#include "qos.h"
#include "rep.h"
+#include "cn10k_ipsec.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -40,6 +41,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7
@@ -55,6 +57,9 @@
#define NIX_PF_PFC_PRIO_MAX 8
#endif
+/* Number of segments per SG structure */
+#define MAX_SEGS_PER_SG 3
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -448,6 +453,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18)
#define OTX2_FLAG_PORT_UP BIT_ULL(19)
+#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20)
u64 flags;
u64 *cq_op_addr;
@@ -499,9 +505,9 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
-#ifdef CONFIG_DCB
/* PFC */
u8 pfc_en;
+#ifdef CONFIG_DCB
u8 *queue_to_pfc_map;
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
@@ -522,6 +528,9 @@ struct otx2_nic {
u16 rep_pf_map[RVU_MAX_REP];
u16 esw_mode;
#endif
+
+ /* Inline ipsec */
+ struct cn10k_ipsec ipsec;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -572,6 +581,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -621,6 +639,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
break;
+ case BLKTYPE_CPT:
+ blkaddr = BLKADDR_CPT0;
+ break;
default:
blkaddr = BLKADDR_RVUM;
break;
@@ -985,6 +1006,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
@@ -1149,4 +1171,8 @@ static inline int mcam_entry_cmp(const void *a, const void *b)
{
return *(u16 *)a - *(u16 *)b;
}
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len);
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 294fba58b670..f110dfa42360 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -435,6 +435,9 @@ process_pfc:
return err;
}
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pfvf, false);
+
/* Request Per channel Bpids */
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e310f99b1736..e1dde93e8af8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -26,6 +26,7 @@
#include "cn10k.h"
#include "qos.h"
#include <rvu_trace.h>
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -1484,6 +1485,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
if (!sq->sqe)
continue;
qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->sqe_ring);
+ qmem_free(pf->dev, sq->cpt_resp);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
@@ -1551,6 +1554,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pf, false);
+
/* Enable backpressure for CGX mapped PF/VFs */
if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, true);
@@ -2273,6 +2279,10 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
+ if (changed & NETIF_F_HW_ESP)
+ return cn10k_ipsec_ethtool_init(netdev,
+ features & NETIF_F_HW_ESP);
+
return otx2_handle_ntuple_tc_features(netdev, features);
}
@@ -3162,10 +3172,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* reset CGX/RPM MAC stats */
otx2_reset_mac_stats(pf);
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_mcs_free;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_mcs_free;
+ goto err_ipsec_clean;
}
err = otx2_wq_init(pf);
@@ -3206,6 +3220,8 @@ err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(pf);
err_mcs_free:
cn10k_mcs_free(pf);
err_del_mcam_entries:
@@ -3403,6 +3419,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_ipsec_clean(pf);
cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 04bc06a80e23..224cef938927 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -11,6 +11,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -26,12 +27,25 @@
*/
#define PTP_SYNC_SEC_OFFSET 34
+DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
bool *need_xdp_flush);
+static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
+ struct sk_buff *skb)
+{
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ sq->sqe_base = sq->sqe_ring->base + sq->sqe_size +
+ (sq->head * (sq->sqe_size * 2));
+ else
+ sq->sqe_base = sq->sqe->base;
+}
+
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
{
@@ -80,38 +94,6 @@ static unsigned int frag_num(unsigned int i)
#endif
}
-static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
- struct sk_buff *skb, int seg, int *len)
-{
- const skb_frag_t *frag;
- struct page *page;
- int offset;
-
- /* First segment is always skb->data */
- if (!seg) {
- page = virt_to_page(skb->data);
- offset = offset_in_page(skb->data);
- *len = skb_headlen(skb);
- } else {
- frag = &skb_shinfo(skb)->frags[seg - 1];
- page = skb_frag_page(frag);
- offset = skb_frag_off(frag);
- *len = skb_frag_size(frag);
- }
- return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
-}
-
-static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
-{
- int seg;
-
- for (seg = 0; seg < sg->num_segs; seg++) {
- otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE);
- }
- sg->num_segs = 0;
-}
-
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
struct nix_cqe_tx_s *cqe)
@@ -625,7 +607,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
sq->head &= (sq->sqe_cnt - 1);
}
-#define MAX_SEGS_PER_SG 3
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
@@ -1161,6 +1142,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_nic *pfvf = dev;
+ bool ret;
/* Check if there is enough room between producer
* and consumer index.
@@ -1177,6 +1159,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
+
if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
@@ -1196,6 +1179,9 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
return true;
}
+ /* Set sqe base address */
+ otx2_sq_set_sqe_base(sq, skb);
+
/* Set SQE's SEND_HDR.
* Do not clear the first 64bit as it contains constant info.
*/
@@ -1208,7 +1194,13 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
/* Add SG subdesc with data frags */
- if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset);
+ else
+ ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset);
+
+ if (!ret) {
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
return false;
}
@@ -1217,11 +1209,15 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
sqe_hdr->sizem1 = (offset / 16) - 1;
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs,
+ offset);
+
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
-
return true;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index e1db5f961877..d23810963fdb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -101,6 +101,9 @@ struct otx2_snd_queue {
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
+ /* SQE ring and CPT response queue for Inline IPSEC */
+ struct qmem *sqe_ring;
+ struct qmem *cpt_resp;
} ____cacheline_aligned_in_smp;
enum cq_type {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 839fc77c11b2..e926c6ce96cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -14,6 +14,7 @@
#include "otx2_reg.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicvf"
#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
@@ -693,10 +694,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->bus->number, n);
}
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_ptp_destroy;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_ipsec_clean;
}
err = otx2_vf_wq_init(vf);
@@ -730,6 +735,8 @@ err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(vf);
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
@@ -782,6 +789,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ cn10k_ipsec_clean(vf);
otx2_ptp_destroy(vf);
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 22ca6ee9665e..440a4c42b405 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -280,6 +280,7 @@ prestera_mac_select_pcs(struct phylink_config *config,
}
static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct prestera_port *port = container_of(pcs, struct prestera_port,
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 6c683a12d5aa..415d784de741 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -15,6 +15,7 @@
#include <linux/u64_stats_sync.h>
#include <net/dsa.h>
#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
#include <uapi/linux/ppp_defs.h>
#define AIROHA_MAX_NUM_GDM_PORTS 1
@@ -23,8 +24,12 @@
#define AIROHA_MAX_NUM_XSI_RSTS 5
#define AIROHA_MAX_MTU 2000
#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
#define AIROHA_NUM_TX_RING 32
#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
#define AIROHA_FE_MC_MAX_VLAN_PORT 16
#define AIROHA_NUM_TX_IRQ 2
@@ -40,6 +45,9 @@
#define PSE_RSV_PAGES 128
#define PSE_QUEUE_RSV_PAGES 64
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
/* FE */
#define PSE_BASE 0x0100
#define CSR_IFC_BASE 0x0200
@@ -541,9 +549,24 @@
#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
#define REG_LMGR_INIT_CFG 0x1000
#define LMGR_INIT_START BIT(31)
#define LMGR_SRAM_MODE_MASK BIT(30)
@@ -565,13 +588,34 @@
#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
#define REG_TXWRR_MODE_CFG 0x1020
#define TWRR_WEIGHT_SCALE_MASK BIT(31)
#define TWRR_WEIGHT_BASE_MASK BIT(3)
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
#define REG_PSE_BUF_USAGE_CFG 0x1028
#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
#define REG_GLB_TRTCM_CFG 0x1080
#define GLB_TRTCM_EN_MASK BIT(31)
#define GLB_TRTCM_MODE_MASK BIT(30)
@@ -720,6 +764,40 @@ enum {
FE_PSE_PORT_DROP = 0xf,
};
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
struct airoha_queue_entry {
union {
void *buf;
@@ -810,6 +888,12 @@ struct airoha_gdm_port {
int id;
struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
};
struct airoha_eth {
@@ -1789,6 +1873,10 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
+ /* xmit ring drop default setting */
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
+ TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
+
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
@@ -1955,6 +2043,27 @@ static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
}
+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
+ /* Tx-cpu transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ /* Tx-fwd transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_SRC_MASK, 1) |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ }
+}
+
static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
@@ -2005,6 +2114,7 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+ airoha_qdma_init_qos_stats(qdma);
return 0;
}
@@ -2138,17 +2248,14 @@ static void airoha_hw_cleanup(struct airoha_qdma *qdma)
if (!qdma->q_rx[i].ndesc)
continue;
- napi_disable(&qdma->q_rx[i].napi);
netif_napi_del(&qdma->q_rx[i].napi);
airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
if (qdma->q_rx[i].page_pool)
page_pool_destroy(qdma->q_rx[i].page_pool);
}
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
- napi_disable(&qdma->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
netif_napi_del(&qdma->q_tx_irq[i].napi);
- }
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
if (!qdma->q_tx[i].ndesc)
@@ -2173,6 +2280,21 @@ static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
}
}
+static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_disable(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ napi_disable(&qdma->q_rx[i].napi);
+ }
+}
+
static void airoha_update_hw_stats(struct airoha_gdm_port *port)
{
struct airoha_eth *eth = port->qdma->eth;
@@ -2413,21 +2535,44 @@ static void airoha_dev_get_stats64(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int queue, channel;
+
+ /* For dsa device select QoS channel according to the dsa user port
+ * index, rely on port id otherwise. Select QoS queue based on the
+ * skb priority.
+ */
+ channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+ queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
+ queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
+
+ return queue < dev->num_tx_queues ? queue : 0;
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct skb_shared_info *sinfo = skb_shinfo(skb);
struct airoha_gdm_port *port = netdev_priv(dev);
- u32 msg0 = 0, msg1, len = skb_headlen(skb);
- int i, qid = skb_get_queue_mapping(skb);
+ u32 msg0, msg1, len = skb_headlen(skb);
struct airoha_qdma *qdma = port->qdma;
u32 nr_frags = 1 + sinfo->nr_frags;
struct netdev_queue *txq;
struct airoha_queue *q;
void *data = skb->data;
+ int i, qid;
u16 index;
u8 fport;
+ qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
+ qid / AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
+ qid % AIROHA_NUM_QOS_QUEUES);
if (skb->ip_summed == CHECKSUM_PARTIAL)
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
@@ -2597,13 +2742,399 @@ airoha_ethtool_get_rmon_stats(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
+ int channel, enum tx_sched_mode mode,
+ const u16 *weights, u8 n_weights)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_TX_RING; i++)
+ airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
+ TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
+
+ for (i = 0; i < n_weights; i++) {
+ u32 status;
+ int err;
+
+ airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
+ TWRR_RW_CMD_MASK |
+ FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
+ FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
+ FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
+ err = read_poll_timeout(airoha_qdma_rr, status,
+ status & TWRR_RW_CMD_DONE,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC,
+ true, port->qdma,
+ REG_TXWRR_WEIGHT_CFG);
+ if (err)
+ return err;
+ }
+
+ airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
+ CHAN_QOS_MODE_MASK(channel),
+ mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
+
+ return 0;
+}
+
+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
+ int channel)
+{
+ static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
+ enum tx_sched_mode mode = TC_SCH_SP;
+ u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+ int i, nstrict = 0, nwrr, qidx;
+
+ if (p->bands > AIROHA_NUM_QOS_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < p->bands; i++) {
+ if (!p->quanta[i])
+ nstrict++;
+ }
+
+ /* this configuration is not supported by the hw */
+ if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
+ return -EINVAL;
+
+ /* EN7581 SoC supports fixed QoS band priority where WRR queues have
+ * lowest priorities with respect to SP ones.
+ * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
+ */
+ nwrr = p->bands - nstrict;
+ qidx = nstrict && nwrr ? nstrict : 0;
+ for (i = 1; i <= p->bands; i++) {
+ if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
+ return -EINVAL;
+
+ qidx = i == nwrr ? 0 : qidx + 1;
+ }
+
+ for (i = 0; i < nwrr; i++)
+ w[i] = p->weights[nstrict + i];
+
+ if (!nstrict)
+ mode = TC_SCH_WRR8;
+ else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
+ mode = nstrict + 1;
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL(channel << 1));
+ u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL((channel << 1) + 1));
+ u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
+ (fwd_tx_packets - port->fwd_tx_packets);
+ _bstats_update(opt->stats.bstats, 0, tx_packets);
+
+ port->cpu_tx_packets = cpu_tx_packets;
+ port->fwd_tx_packets = fwd_tx_packets;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
+ struct tc_ets_qopt_offload *opt)
+{
+ int channel;
+
+ if (opt->parent == TC_H_ROOT)
+ return -EINVAL;
+
+ channel = TC_H_MAJ(opt->handle) >> 16;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+
+ switch (opt->command) {
+ case TC_ETS_REPLACE:
+ return airoha_qdma_set_tx_ets_sched(port, channel, opt);
+ case TC_ETS_DESTROY:
+ /* PRIO is default qdisc scheduler */
+ return airoha_qdma_set_tx_prio_sched(port, channel);
+ case TC_ETS_STATS:
+ return airoha_qdma_get_tx_ets_stats(port, channel, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode, u32 val)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 config = TRTCM_PARAM_RW_MASK |
+ FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_mode_type mode,
+ bool enable, u32 enable_mask)
+{
+ u32 val;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &val, NULL))
+ return -EINVAL;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
+ int channel, u32 addr,
+ enum trtcm_mode_type mode,
+ u32 rate_val, u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &config, NULL))
+ return -EINVAL;
+
+ val = airoha_qdma_rr(qdma, addr);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_TOKEN_RATE_MODE, mode, rate);
+ if (err)
+ return err;
+
+ val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
+ int channel, u32 rate,
+ u32 bucket_size)
+{
+ int i, err;
+
+ for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
+ err = airoha_qdma_set_trtcm_config(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG, i,
+ !!rate, TRTCM_METER_MODE);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG,
+ i, rate, bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+ u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
+ struct net_device *dev = port->dev;
+ int num_tx_queues = dev->real_num_tx_queues;
+ int err;
+
+ if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
+ return -EINVAL;
+ }
+
+ err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed configuring htb offload");
+ return err;
+ }
+
+ if (opt->command == TC_HTB_NODE_MODIFY)
+ return 0;
+
+ err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
+ if (err) {
+ airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed setting real_num_tx_queues");
+ return err;
+ }
+
+ set_bit(channel, port->qos_sq_bmap);
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
+{
+ struct net_device *dev = port->dev;
+
+ netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
+ airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
+ clear_bit(queue, port->qos_sq_bmap);
+}
+
+static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ airoha_tc_remove_htb_queue(port, channel);
+
+ return 0;
+}
+
+static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
+{
+ int q;
+
+ for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
+ airoha_tc_remove_htb_queue(port, q);
+
+ return 0;
+}
+
+static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ opt->qid = channel;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_HTB_CREATE:
+ break;
+ case TC_HTB_DESTROY:
+ return airoha_tc_htb_destroy(port);
+ case TC_HTB_NODE_MODIFY:
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ return airoha_tc_htb_alloc_leaf_queue(port, opt);
+ case TC_HTB_LEAF_DEL:
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return airoha_tc_htb_delete_leaf_queue(port, opt);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ return airoha_tc_get_htb_get_leaf_queue(port, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_ETS:
+ return airoha_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_QDISC_HTB:
+ return airoha_tc_setup_qdisc_htb(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops airoha_netdev_ops = {
.ndo_init = airoha_dev_init,
.ndo_open = airoha_dev_open,
.ndo_stop = airoha_dev_stop,
+ .ndo_select_queue = airoha_dev_select_queue,
.ndo_start_xmit = airoha_dev_xmit,
.ndo_get_stats64 = airoha_dev_get_stats64,
.ndo_set_mac_address = airoha_dev_set_macaddr,
+ .ndo_setup_tc = airoha_dev_tc_setup,
};
static const struct ethtool_ops airoha_ethtool_ops = {
@@ -2640,7 +3171,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
}
dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
- AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
+ AIROHA_NUM_NETDEV_TX_RINGS,
+ AIROHA_NUM_RX_RING);
if (!dev) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
@@ -2653,12 +3185,18 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
dev->watchdog_timeo = 5 * HZ;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_TC;
dev->features |= dev->hw_features;
dev->dev.of_node = np;
dev->irq = qdma->irq;
SET_NETDEV_DEV(dev, eth->dev);
+ /* reserve hw queues for HTB offloading */
+ err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
+ if (err)
+ return err;
+
err = of_get_ethdev_address(np, dev);
if (err) {
if (err == -EPROBE_DEFER)
@@ -2738,7 +3276,7 @@ static int airoha_probe(struct platform_device *pdev)
err = airoha_hw_init(pdev, eth);
if (err)
- goto error;
+ goto error_hw_cleanup;
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_qdma_start_napi(&eth->qdma[i]);
@@ -2753,13 +3291,16 @@ static int airoha_probe(struct platform_device *pdev)
err = airoha_alloc_gdm_port(eth, np);
if (err) {
of_node_put(np);
- goto error;
+ goto error_napi_stop;
}
}
return 0;
-error:
+error_napi_stop:
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_stop_napi(&eth->qdma[i]);
+error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_hw_cleanup(&eth->qdma[i]);
@@ -2780,8 +3321,10 @@ static void airoha_remove(struct platform_device *pdev)
struct airoha_eth *eth = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ airoha_qdma_stop_napi(&eth->qdma[i]);
airoha_hw_cleanup(&eth->qdma[i]);
+ }
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index be3d0876c521..568bbe5f83f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o
#
# Netdev basic
@@ -60,6 +60,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += en/tc/ct_fs_hmfs.o
endif
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
@@ -123,6 +124,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
steering/sws/dr_ste_v0.o \
steering/sws/dr_ste_v1.o \
steering/sws/dr_ste_v2.o \
+ steering/sws/dr_ste_v3.o \
steering/sws/dr_cmd.o \
steering/sws/dr_fw.o \
steering/sws/dr_action.o \
@@ -150,8 +152,9 @@ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
steering/hws/bwc.o \
steering/hws/debug.o \
steering/hws/vport.o \
- steering/hws/bwc_complex.o
-
+ steering/hws/bwc_complex.o \
+ steering/hws/fs_hws_pools.o \
+ steering/hws/fs_hws.o
#
# SF device
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 9aed29fa4900..d6e736c1fb24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -292,7 +292,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
if (rule->dest_attr.type &
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id =
- rule->dest_attr.counter_id;
+ mlx5_fc_id(rule->dest_attr.counter);
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
index 62b3f7ff5562..e5b30801314b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -48,4 +48,14 @@ mlx5_ct_fs_smfs_ops_get(void)
}
#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+#if IS_ENABLED(CONFIG_MLX5_HW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
new file mode 100644
index 000000000000..a4263137fef5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc_priv.h"
+#include "en/tc/ct_fs.h"
+#include "fs_core.h"
+#include "steering/hws/fs_hws_pools.h"
+#include "steering/hws/mlx5hws.h"
+#include "steering/hws/table.h"
+
+struct mlx5_ct_fs_hmfs_matcher {
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ refcount_t ref;
+};
+
+/* We need {ipv4, ipv6} x {tcp, udp, gre} matchers. */
+#define NUM_MATCHERS (2 * 3)
+
+struct mlx5_ct_fs_hmfs {
+ struct mlx5hws_table *ct_tbl;
+ struct mlx5hws_table *ct_nat_tbl;
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5hws_action *fwd_action;
+ struct mlx5hws_action *last_action;
+ struct mlx5hws_context *ctx;
+ struct mutex lock; /* Guards matchers */
+ struct mlx5_ct_fs_hmfs_matcher matchers[NUM_MATCHERS];
+ struct mlx5_ct_fs_hmfs_matcher matchers_nat[NUM_MATCHERS];
+};
+
+struct mlx5_ct_fs_hmfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5hws_bwc_rule *hws_bwc_rule;
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_fc *counter;
+};
+
+static u32 get_matcher_idx(bool ipv4, bool tcp, bool gre)
+{
+ return ipv4 * 3 + tcp * 2 + gre;
+}
+
+static int mlx5_ct_fs_hmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ ct_tbl = ct->fs_hws_table.hws_table;
+ ct_nat_tbl = ct_nat->fs_hws_table.hws_table;
+ post_ct_tbl = post_ct->fs_hws_table.hws_table;
+ fs_hmfs->ct_nat = ct_nat;
+
+ if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to init, missing backing hws tables");
+ return -EOPNOTSUPP;
+ }
+
+ netdev_dbg(fs->netdev, "using hmfs steering");
+
+ fs_hmfs->ct_tbl = ct_tbl;
+ fs_hmfs->ct_nat_tbl = ct_nat_tbl;
+ fs_hmfs->ctx = ct_tbl->ctx;
+ mutex_init(&fs_hmfs->lock);
+
+ fs_hmfs->fwd_action = mlx5hws_action_create_dest_table(ct_tbl->ctx, post_ct_tbl, flags);
+ if (!fs_hmfs->fwd_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create fwd action\n");
+ return -EINVAL;
+ }
+ fs_hmfs->last_action = mlx5hws_action_create_last(ct_tbl->ctx, flags);
+ if (!fs_hmfs->last_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create last action\n");
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_ct_fs_hmfs_destroy(struct mlx5_ct_fs *fs)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ mlx5hws_action_destroy(fs_hmfs->last_action);
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+}
+
+static struct mlx5hws_bwc_matcher *
+mlx5_ct_fs_hmfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5hws_table *tbl,
+ struct mlx5_flow_spec *spec, bool ipv4, bool tcp, bool gre)
+{
+ u8 match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+ struct mlx5hws_match_parameters mask = {
+ .match_buf = spec->match_criteria,
+ .match_sz = sizeof(spec->match_criteria),
+ };
+ u32 priority = get_matcher_idx(ipv4, tcp, gre); /* Static priority based on params. */
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+
+ hws_bwc_matcher = mlx5hws_bwc_matcher_create(tbl, priority, match_criteria_enable, &mask);
+ if (!hws_bwc_matcher)
+ return ERR_PTR(-EINVAL);
+
+ return hws_bwc_matcher;
+}
+
+static struct mlx5_ct_fs_hmfs_matcher *
+mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ bool nat, bool ipv4, bool tcp, bool gre)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ u32 matcher_idx = get_matcher_idx(ipv4, tcp, gre);
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ struct mlx5hws_table *tbl;
+
+ hmfs_matcher = nat ?
+ (fs_hmfs->matchers_nat + matcher_idx) :
+ (fs_hmfs->matchers + matcher_idx);
+
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ return hmfs_matcher;
+
+ mutex_lock(&fs_hmfs->lock);
+
+ /* Retry with lock, as the matcher might be already created by another cpu. */
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ goto out_unlock;
+
+ tbl = nat ? fs_hmfs->ct_nat_tbl : fs_hmfs->ct_tbl;
+
+ hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre);
+ if (IS_ERR(hws_bwc_matcher)) {
+ netdev_warn(fs->netdev,
+ "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
+ nat, ipv4, tcp, gre, PTR_ERR(hws_bwc_matcher));
+
+ hmfs_matcher = ERR_CAST(hws_bwc_matcher);
+ goto out_unlock;
+ }
+
+ hmfs_matcher->hws_bwc_matcher = hws_bwc_matcher;
+ refcount_set(&hmfs_matcher->ref, 1);
+
+out_unlock:
+ mutex_unlock(&fs_hmfs->lock);
+ return hmfs_matcher;
+}
+
+static void
+mlx5_ct_fs_hmfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ if (!refcount_dec_and_mutex_lock(&hmfs_matcher->ref, &fs_hmfs->lock))
+ return;
+
+ mlx5hws_bwc_matcher_destroy(hmfs_matcher->hws_bwc_matcher);
+ mutex_unlock(&fs_hmfs->lock);
+}
+
+#define NUM_CT_HMFS_RULES 4
+
+static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
+ struct mlx5_flow_attr *attr,
+ struct mlx5hws_rule_action *rule_actions)
+{
+ struct mlx5_fs_hws_action *mh_action = &attr->modify_hdr->fs_hws_action;
+
+ memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
+ rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
+ /* Modify header is special, it may require extra arguments outside the action itself. */
+ if (mh_action->mh_data) {
+ rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
+ rule_actions[1].modify_header.data = mh_action->mh_data->data;
+ }
+ rule_actions[1].action = mh_action->hws_action;
+ rule_actions[2].action = fs_hmfs->fwd_action;
+ rule_actions[3].action = fs_hmfs->last_action;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_hmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ struct mlx5hws_match_parameters match_params = {
+ .match_buf = spec->match_value,
+ .match_sz = ARRAY_SIZE(spec->match_value),
+ };
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule;
+ bool nat, tcp, ipv4, gre;
+ int err;
+
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ hmfs_rule = kzalloc(sizeof(*hmfs_rule), GFP_KERNEL);
+ if (!hmfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ nat = (attr->ft == fs_hmfs->ct_nat);
+ ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+ tcp = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_TCP;
+ gre = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_GRE;
+
+ hmfs_matcher = mlx5_ct_fs_hmfs_matcher_get(fs, spec, nat, ipv4, tcp, gre);
+ if (IS_ERR(hmfs_matcher)) {
+ err = PTR_ERR(hmfs_matcher);
+ goto err_free_rule;
+ }
+ hmfs_rule->hmfs_matcher = hmfs_matcher;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+ hmfs_rule->counter = attr->counter;
+
+ hmfs_rule->hws_bwc_rule =
+ mlx5hws_bwc_rule_create(hmfs_matcher->hws_bwc_matcher, &match_params,
+ spec->flow_context.flow_source, rule_actions);
+ if (!hmfs_rule->hws_bwc_rule) {
+ err = -EINVAL;
+ goto err_put_matcher;
+ }
+
+ return &hmfs_rule->fs_rule;
+
+err_put_matcher:
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_matcher);
+err_free_rule:
+ kfree(hmfs_rule);
+ return ERR_PTR(err);
+}
+
+static void mlx5_ct_fs_hmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ mlx5hws_bwc_rule_destroy(hmfs_rule->hws_bwc_rule);
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_rule->hmfs_matcher);
+ kfree(hmfs_rule);
+}
+
+static int mlx5_ct_fs_hmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ int err;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+
+ err = mlx5hws_bwc_rule_action_update(hmfs_rule->hws_bwc_rule, rule_actions);
+ if (err) {
+ mlx5_fc_put_hws_action(attr->counter);
+ return err;
+ }
+
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ hmfs_rule->counter = attr->counter;
+
+ return 0;
+}
+
+static struct mlx5_ct_fs_ops hmfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_hmfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_hmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_hmfs_ct_rule_update,
+
+ .init = mlx5_ct_fs_hmfs_init,
+ .destroy = mlx5_ct_fs_hmfs_destroy,
+
+ .priv_size = sizeof(struct mlx5_ct_fs_hmfs),
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return &hmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 45737d039252..0c97c5899904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -13,7 +13,6 @@
#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
#define ct_dbg(fmt, args...)\
netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
-#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
struct mlx5_ct_fs_smfs_matcher {
struct mlx5dr_matcher *dr_matcher;
@@ -220,78 +219,6 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
mlx5_smfs_action_destroy(fs_smfs->fwd_action);
}
-static inline bool
-mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys)
-{
-#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
- const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
- DISS_BIT(META);
- const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
- const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
-
- return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
- used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
-}
-
-static bool
-mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
-{
- struct flow_match_ipv4_addrs ipv4_addrs;
- struct flow_match_ipv6_addrs ipv6_addrs;
- struct flow_match_control control;
- struct flow_match_basic basic;
- struct flow_match_ports ports;
- struct flow_match_tcp tcp;
-
- if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
- ct_dbg("rule uses unexpected dissectors (0x%016llx)",
- flow_rule->match.dissector->used_keys);
- return false;
- }
-
- flow_rule_match_basic(flow_rule, &basic);
- flow_rule_match_control(flow_rule, &control);
- flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
- flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
- if (basic.key->ip_proto != IPPROTO_GRE)
- flow_rule_match_ports(flow_rule, &ports);
- if (basic.key->ip_proto == IPPROTO_TCP)
- flow_rule_match_tcp(flow_rule, &tcp);
-
- if (basic.mask->n_proto != htons(0xFFFF) ||
- (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
- basic.mask->ip_proto != 0xFF ||
- (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
- basic.key->ip_proto != IPPROTO_GRE)) {
- ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
- ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
- basic.key->ip_proto, basic.mask->ip_proto);
- return false;
- }
-
- if (basic.key->ip_proto != IPPROTO_GRE &&
- (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
- ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
- ports.mask->src, ports.mask->dst);
- return false;
- }
-
- if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
- ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
- return false;
- }
-
- return true;
-}
-
static struct mlx5_ct_fs_rule *
mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
@@ -304,7 +231,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
int num_actions = 0, err;
bool nat, tcp, ipv4, gre;
- if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
return ERR_PTR(-EOPNOTSUPP);
smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a84ebac2f011..a065e8fafb1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -2065,10 +2065,19 @@ mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
int err;
- if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
- ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
- ct_dbg("Using SMFS ct flow steering provider");
- fs_ops = mlx5_ct_fs_smfs_ops_get();
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_HMFS) {
+ ct_dbg("Using HMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_hmfs_ops_get();
+ } else if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+ ct_dbg("Using SMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_smfs_ops_get();
+ }
+
+ if (!fs_ops) {
+ ct_dbg("Requested flow steering mode is not enabled.");
+ return -EOPNOTSUPP;
+ }
}
ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
@@ -2421,3 +2430,74 @@ out_inc_drop:
atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
return false;
}
+
+static bool mlx5e_tc_ct_valid_used_dissector_keys(const u64 used_keys)
+{
+#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
+ const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
+ DISS_BIT(META);
+ const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
+
+ return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+ used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
+}
+
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule)
+{
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
+ struct flow_match_control control;
+ struct flow_match_basic basic;
+ struct flow_match_ports ports;
+ struct flow_match_tcp tcp;
+
+ if (!mlx5e_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected dissectors (0x%016llx)",
+ flow_rule->match.dissector->used_keys);
+ return false;
+ }
+
+ flow_rule_match_basic(flow_rule, &basic);
+ flow_rule_match_control(flow_rule, &control);
+ flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+ flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+ if (basic.key->ip_proto != IPPROTO_GRE)
+ flow_rule_match_ports(flow_rule, &ports);
+ if (basic.key->ip_proto == IPPROTO_TCP)
+ flow_rule_match_tcp(flow_rule, &tcp);
+
+ if (basic.mask->n_proto != htons(0xFFFF) ||
+ (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+ basic.mask->ip_proto != 0xFF ||
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
+ basic.key->ip_proto != IPPROTO_GRE)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+ ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+ basic.key->ip_proto, basic.mask->ip_proto);
+ return false;
+ }
+
+ if (basic.key->ip_proto != IPPROTO_GRE &&
+ (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
+ netdev_dbg(dev, "ct_debug: rule uses ports match (src 0x%04x, dst 0x%04x)",
+ ports.mask->src, ports.mask->dst);
+ return false;
+ }
+
+ if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected tcp match (flags 0x%02x)",
+ tcp.mask->flags);
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index b66c5f98067f..5e9dbdd4a5e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -128,6 +128,9 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule);
+
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
@@ -202,5 +205,12 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
return false;
}
+static inline bool
+mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev,
+ struct flow_rule *flow_rule)
+{
+ return false;
+}
+
#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
#endif /* __MLX5_EN_TC_CT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index ca92e518be76..501709ac310f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -94,25 +94,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 esn, esn_msb;
u8 overlap;
- switch (x->xso.type) {
- case XFRM_DEV_OFFLOAD_PACKET:
- switch (x->xso.dir) {
- case XFRM_DEV_OFFLOAD_IN:
- esn = x->replay_esn->seq;
- esn_msb = x->replay_esn->seq_hi;
- break;
- case XFRM_DEV_OFFLOAD_OUT:
- esn = x->replay_esn->oseq;
- esn_msb = x->replay_esn->oseq_hi;
- break;
- default:
- WARN_ON(true);
- return false;
- }
- break;
- case XFRM_DEV_OFFLOAD_CRYPTO:
- /* Already parsed by XFRM core */
+ switch (x->xso.dir) {
+ case XFRM_DEV_OFFLOAD_IN:
esn = x->replay_esn->seq;
+ esn_msb = x->replay_esn->seq_hi;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ esn = x->replay_esn->oseq;
+ esn_msb = x->replay_esn->oseq_hi;
break;
default:
WARN_ON(true);
@@ -121,11 +110,15 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
overlap = sa_entry->esn_state.overlap;
- if (esn >= x->replay_esn->replay_window)
- seq_bottom = esn - x->replay_esn->replay_window + 1;
+ if (!x->replay_esn->replay_window) {
+ seq_bottom = esn;
+ } else {
+ if (esn >= x->replay_esn->replay_window)
+ seq_bottom = esn - x->replay_esn->replay_window + 1;
- if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
- esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
+ esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ }
if (sa_entry->esn_state.esn_msb)
sa_entry->esn_state.esn = esn;
@@ -724,6 +717,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = 1;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
@@ -768,9 +767,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
MLX5_IPSEC_RESCHED);
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- x->props.mode == XFRM_MODE_TUNNEL)
- xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
- MLX5E_IPSEC_TUNNEL_SA);
+ x->props.mode == XFRM_MODE_TUNNEL) {
+ xa_lock_bh(&ipsec->sadb);
+ __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+ MLX5E_IPSEC_TUNNEL_SA);
+ xa_unlock_bh(&ipsec->sadb);
+ }
out:
x->xso.offload_handle = (unsigned long)sa_entry;
@@ -797,7 +799,6 @@ err_xfrm:
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
@@ -806,12 +807,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
-
- if (attrs->mode == XFRM_MODE_TUNNEL &&
- attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- /* Make sure that no ARP requests are running in parallel */
- flush_workqueue(ipsec->wq);
-
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e51b03d4c717..e7b64679f121 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -194,7 +194,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
@@ -223,7 +223,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
}
sa_entry->ipsec_rule.trailer.fc = flow_counter;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -275,7 +275,7 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
@@ -348,7 +348,7 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -686,7 +686,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
rx->ft.status = ft;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
+ dest[1].counter = rx->fc->cnt;
err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
@@ -873,7 +873,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ dest.counter = tx->fc->cnt;
fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
@@ -1649,7 +1649,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1718,23 +1718,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
- else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
-
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (attrs->reqid)
- setup_fte_reg_c4(spec, attrs->reqid);
+ setup_fte_reg_c4(spec, attrs->reqid);
err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
@@ -1762,7 +1760,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1835,7 +1833,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+ dest[dstn].counter = tx->fc->drop;
dstn++;
break;
default:
@@ -1913,7 +1911,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
case XFRM_POLICY_BLOCK:
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+ dest[dstn].counter = rx->fc->drop;
dstn++;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 53cfa39188cb..820debf3fbbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -91,8 +91,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
- struct mlx5_accel_esp_xfrm_attrs *attrs)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
@@ -120,8 +121,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+ if (!attrs->replay_esn.trigger)
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ sa_entry->esn_state.esn);
+ }
if (attrs->lft.hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
@@ -175,7 +180,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+ mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0ec17c276bdd..bd41b75d246e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3946,6 +3946,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards);
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 6b3b1afe8312..9ba99609999f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1282,7 +1282,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
+ dest[dest_ix].counter = attr->counter;
dest_ix++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 6b4c9ffad95b..7dd1dc3f77c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -135,7 +135,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
+ drop_ctr_dst.counter = drop_counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index 093ed86a0acd..1c37098e09ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -260,7 +260,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
+ drop_ctr_dst.counter = counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index c5ea1d1d2b03..5f647358a05c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -570,7 +570,8 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge,
struct mlx5_eswitch *esw)
{
@@ -628,7 +629,7 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[0].ft = bridge->egress_ft;
dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dests[1].counter_id = counter_id;
+ dests[1].counter = counter;
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
ARRAY_SIZE(dests));
@@ -639,17 +640,19 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
- return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, bridge->br_offloads->esw);
}
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
@@ -671,7 +674,7 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
goto out;
}
- handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, peer_esw);
out:
@@ -1385,10 +1388,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
handle = peer ?
mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
- addr, vlan, mlx5_fc_id(counter),
- bridge) :
+ addr, vlan, counter, bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
- mlx5_fc_id(counter), bridge);
+ counter, bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 06076dd9ec64..20cc01ceee8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -721,7 +721,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[i].counter_id = mlx5_fc_id(attr->counter);
+ dest[i].counter = attr->counter;
i++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 676005854dad..ae20c061e0fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -217,7 +217,8 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
int err;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
- underlay_qpn == 0)
+ underlay_qpn == 0 &&
+ (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX))
return 0;
if (ft->type == FS_FT_FDB &&
@@ -718,7 +719,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
- dst->dest_attr.counter_id);
+ mlx5_fc_id(dst->dest_attr.counter));
in_dests += dst_cnt_size;
list_size++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2eabfcc247c6..22dc23d991d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -658,6 +658,7 @@ static void del_sw_hw_rule(struct fs_node *node)
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ mlx5_fc_local_destroy(rule->dest_attr.counter);
goto out;
}
@@ -820,11 +821,17 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return index;
fte->index = index + fg->start_index;
+retry_insert:
ret = rhashtable_insert_fast(&fg->ftes_hash,
&fte->hash,
rhash_fte);
- if (ret)
+ if (ret) {
+ if (ret == -EBUSY) {
+ cond_resched();
+ goto retry_insert;
+ }
goto err_ida_remove;
+ }
tree_add_node(&fte->node, &fg->node);
list_add_tail(&fte->node.list, &fg->node.children);
@@ -2709,6 +2716,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_BYPASS_PRIO;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
@@ -3528,35 +3536,42 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
char *value = val.vstr;
- int err = 0;
+ u8 eswitch_mode;
- if (!strcmp(value, "dmfs")) {
+ if (!strcmp(value, "dmfs"))
return 0;
- } else if (!strcmp(value, "smfs")) {
- u8 eswitch_mode;
- bool smfs_cap;
- eswitch_mode = mlx5_eswitch_mode(dev);
- smfs_cap = mlx5_fs_dr_is_supported(dev);
+ if (!strcmp(value, "smfs")) {
+ bool smfs_cap = mlx5_fs_dr_is_supported(dev);
if (!smfs_cap) {
- err = -EOPNOTSUPP;
NL_SET_ERR_MSG_MOD(extack,
"Software managed steering is not supported by current device");
+ return -EOPNOTSUPP;
}
+ } else if (!strcmp(value, "hmfs")) {
+ bool hmfs_cap = mlx5_fs_hws_is_supported(dev);
- else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ if (!hmfs_cap) {
NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported when eswitch offloads enabled.");
- err = -EOPNOTSUPP;
+ "Hardware steering is not supported by current device");
+ return -EOPNOTSUPP;
}
} else {
NL_SET_ERR_MSG_MOD(extack,
- "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
- err = -EINVAL;
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\", \"hmfs\"]");
+ return -EINVAL;
}
- return err;
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Moving to %s is not supported when eswitch offloads enabled.",
+ value);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
@@ -3568,6 +3583,8 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
if (!strcmp(ctx->val.vstr, "smfs"))
mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else if (!strcmp(ctx->val.vstr, "hmfs"))
+ mode = MLX5_FLOW_STEERING_MODE_HMFS;
else
mode = MLX5_FLOW_STEERING_MODE_DMFS;
dev->priv.steering->mode = mode;
@@ -3580,10 +3597,17 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ switch (dev->priv.steering->mode) {
+ case MLX5_FLOW_STEERING_MODE_SMFS:
strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
- else
+ break;
+ case MLX5_FLOW_STEERING_MODE_HMFS:
+ strscpy(ctx->val.vstr, "hmfs", sizeof(ctx->val.vstr));
+ break;
+ default:
strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
+ }
+
return 0;
}
@@ -3658,8 +3682,7 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
- MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) {
err = init_rdma_rx_root_ns(steering);
if (err)
goto err;
@@ -4003,6 +4026,8 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
cmds = mlx5_fs_cmd_get_dr_cmds();
+ else if (mode == MLX5_FLOW_STEERING_MODE_HMFS)
+ cmds = mlx5_fs_cmd_get_hws_cmds();
else
cmds = mlx5_fs_cmd_get_fw_cmds();
if (!cmds)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index bad2df0715ec..20837e526679 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -38,6 +38,7 @@
#include <linux/rhashtable.h>
#include <linux/llist.h>
#include <steering/sws/fs_dr.h>
+#include <steering/hws/fs_hws.h>
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -64,6 +65,7 @@ struct mlx5_modify_hdr {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -74,6 +76,7 @@ struct mlx5_pkt_reformat {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -126,7 +129,8 @@ enum fs_fte_status {
enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_DMFS,
- MLX5_FLOW_STEERING_MODE_SMFS
+ MLX5_FLOW_STEERING_MODE_SMFS,
+ MLX5_FLOW_STEERING_MODE_HMFS,
};
enum mlx5_flow_steering_capabilty {
@@ -190,7 +194,10 @@ struct mlx5_flow_handle {
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
- struct mlx5_fs_dr_table fs_dr_table;
+ union {
+ struct mlx5_fs_dr_table fs_dr_table;
+ struct mlx5_fs_hws_table fs_hws_table;
+ };
u32 id;
u16 vport;
unsigned int max_fte;
@@ -247,7 +254,10 @@ struct fs_fte_dup {
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
- struct mlx5_fs_dr_rule fs_dr_rule;
+ union {
+ struct mlx5_fs_dr_rule fs_dr_rule;
+ struct mlx5_fs_hws_rule fs_hws_rule;
+ };
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
struct fs_fte_action act_dests;
struct fs_fte_dup *dup;
@@ -280,7 +290,10 @@ struct mlx5_flow_group_mask {
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
- struct mlx5_fs_dr_matcher fs_dr_matcher;
+ union {
+ struct mlx5_fs_dr_matcher fs_dr_matcher;
+ struct mlx5_fs_hws_matcher fs_hws_matcher;
+ };
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
@@ -293,7 +306,10 @@ struct mlx5_flow_group {
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
enum mlx5_flow_steering_mode mode;
- struct mlx5_fs_dr_domain fs_dr_domain;
+ union {
+ struct mlx5_fs_dr_domain fs_dr_domain;
+ struct mlx5_fs_hws_context fs_hws_context;
+ };
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
@@ -303,6 +319,42 @@ struct mlx5_flow_root_namespace {
const struct mlx5_flow_cmds *cmds;
};
+enum mlx5_fc_type {
+ MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_LOCAL,
+};
+
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ u32 id;
+ bool aging;
+ enum mlx5_fc_type type;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_cache cache;
+ /* last{packets,bytes} are used for calculating deltas since last reading. */
+ u64 lastpackets;
+ u64 lastbytes;
+};
+
+struct mlx5_fc_bulk_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fc_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ u32 base_id;
+ struct mlx5_fc_bulk_hws_data hws_data;
+ struct mlx5_fc fcs[];
+};
+
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 62d0c689796b..492775d3d193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "fs_core.h"
+#include "fs_pool.h"
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
@@ -43,33 +44,6 @@
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
-};
-
-struct mlx5_fc {
- u32 id;
- bool aging;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_cache cache;
- /* last{packets,bytes} are used for calculating deltas since last reading. */
- u64 lastpackets;
- u64 lastbytes;
-};
-
-struct mlx5_fc_pool {
- struct mlx5_core_dev *dev;
- struct mutex pool_lock; /* protects pool lists */
- struct list_head fully_used;
- struct list_head partially_used;
- struct list_head unused;
- int available_fcs;
- int used_fcs;
- int threshold;
-};
-
struct mlx5_fc_stats {
struct xarray counters;
@@ -80,13 +54,13 @@ struct mlx5_fc_stats {
int bulk_query_len;
bool bulk_query_alloc_failed;
unsigned long next_bulk_query_alloc;
- struct mlx5_fc_pool fc_pool;
+ struct mlx5_fs_pool fc_pool;
};
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
-static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
-static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
+static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
@@ -186,6 +160,9 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
+ if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
+ return;
+
if (counter->bulk)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
@@ -435,15 +412,7 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
fc_stats->sampling_interval);
}
-/* Flow counter bluks */
-
-struct mlx5_fc_bulk {
- struct list_head pool_list;
- u32 base_id;
- int bulk_len;
- unsigned long *bitmask;
- struct mlx5_fc fcs[] __counted_by(bulk_len);
-};
+/* Flow counter bulks */
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
u32 id)
@@ -452,16 +421,16 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
counter->id = id;
}
-static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
{
- return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+ return counter->bulk->base_id;
}
-static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
+ void *pool_ctx)
{
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
- struct mlx5_fc_bulk *bulk;
- int err = -ENOMEM;
+ struct mlx5_fc_bulk *fc_bulk;
int bulk_len;
u32 base_id;
int i;
@@ -469,207 +438,141 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
- if (!bulk)
- goto err_alloc_bulk;
-
- bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
- if (!bulk->bitmask)
- goto err_alloc_bitmask;
+ fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
+ if (!fc_bulk)
+ return NULL;
- err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
- if (err)
- goto err_mlx5_cmd_bulk_alloc;
+ if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
+ goto fc_bulk_free;
- bulk->base_id = base_id;
- bulk->bulk_len = bulk_len;
- for (i = 0; i < bulk_len; i++) {
- mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
- set_bit(i, bulk->bitmask);
- }
+ if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
+ goto fs_bulk_cleanup;
+ fc_bulk->base_id = base_id;
+ for (i = 0; i < bulk_len; i++)
+ mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
- return bulk;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+ return &fc_bulk->fs_bulk;
-err_mlx5_cmd_bulk_alloc:
- kvfree(bulk->bitmask);
-err_alloc_bitmask:
- kvfree(bulk);
-err_alloc_bulk:
- return ERR_PTR(err);
+fs_bulk_cleanup:
+ mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
+fc_bulk_free:
+ kvfree(fc_bulk);
+ return NULL;
}
static int
-mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
{
- if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
+ struct mlx5_fc_bulk,
+ fs_bulk);
+
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
return -EBUSY;
}
- mlx5_cmd_fc_free(dev, bulk->base_id);
- kvfree(bulk->bitmask);
- kvfree(bulk);
+ mlx5_cmd_fc_free(dev, fc_bulk->base_id);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(fc_bulk);
return 0;
}
-static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
{
- int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
-
- if (free_fc_index >= bulk->bulk_len)
- return ERR_PTR(-ENOSPC);
-
- clear_bit(free_fc_index, bulk->bitmask);
- return &bulk->fcs[free_fc_index];
-}
-
-static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
-{
- int fc_index = fc->id - bulk->base_id;
-
- if (test_bit(fc_index, bulk->bitmask))
- return -EINVAL;
-
- set_bit(fc_index, bulk->bitmask);
- return 0;
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
}
/* Flow counters pool API */
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
-{
- fc_pool->dev = dev;
- mutex_init(&fc_pool->pool_lock);
- INIT_LIST_HEAD(&fc_pool->fully_used);
- INIT_LIST_HEAD(&fc_pool->partially_used);
- INIT_LIST_HEAD(&fc_pool->unused);
- fc_pool->available_fcs = 0;
- fc_pool->used_fcs = 0;
- fc_pool->threshold = 0;
-}
+static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
+ .bulk_destroy = mlx5_fc_bulk_destroy,
+ .bulk_create = mlx5_fc_bulk_create,
+ .update_threshold = mlx5_fc_pool_update_threshold,
+};
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+static void
+mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_bulk *tmp;
-
- list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops, NULL);
}
-static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
{
- fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
- fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+ mlx5_fs_pool_cleanup(fc_pool);
}
-static struct mlx5_fc_bulk *
-mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fc_bulk *fc_bulk;
+ int err;
- new_bulk = mlx5_fc_bulk_create(dev);
- if (!IS_ERR(new_bulk))
- fc_pool->available_fcs += new_bulk->bulk_len;
- mlx5_fc_pool_update_threshold(fc_pool);
- return new_bulk;
+ err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
+ return &fc_bulk->fcs[pool_index.index];
}
static void
-mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
{
+ struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
struct mlx5_core_dev *dev = fc_pool->dev;
- fc_pool->available_fcs -= bulk->bulk_len;
- mlx5_fc_bulk_destroy(dev, bulk);
- mlx5_fc_pool_update_threshold(fc_pool);
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = fc->id - fc->bulk->base_id;
+ if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
}
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
- struct list_head *next_list,
- bool move_non_full_bulk)
+/**
+ * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which
+ * was already acquired using its counter id and bulk data.
+ *
+ * @counter_id: counter acquired counter id
+ * @offset: counter offset from bulk base
+ * @bulk_size: counter's bulk size as was allocated
+ *
+ * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
+ */
+struct mlx5_fc *
+mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
{
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc *fc;
-
- if (list_empty(src_list))
- return ERR_PTR(-ENODATA);
-
- bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
- fc = mlx5_fc_bulk_acquire_fc(bulk);
- if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
- list_move(&bulk->pool_list, next_list);
- return fc;
-}
+ struct mlx5_fc_bulk *fc_bulk;
+ struct mlx5_fc *counter;
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
-{
- struct mlx5_fc_bulk *new_bulk;
- struct mlx5_fc *fc;
-
- mutex_lock(&fc_pool->pool_lock);
-
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
- &fc_pool->fully_used, false);
- if (IS_ERR(fc))
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
- &fc_pool->partially_used,
- true);
- if (IS_ERR(fc)) {
- new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
- if (IS_ERR(new_bulk)) {
- fc = ERR_CAST(new_bulk);
- goto out;
- }
- fc = mlx5_fc_bulk_acquire_fc(new_bulk);
- list_add(&new_bulk->pool_list, &fc_pool->partially_used);
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ kfree(counter);
+ return ERR_PTR(-ENOMEM);
}
- fc_pool->available_fcs--;
- fc_pool->used_fcs++;
-out:
- mutex_unlock(&fc_pool->pool_lock);
- return fc;
+ counter->type = MLX5_FC_TYPE_LOCAL;
+ counter->id = counter_id;
+ fc_bulk->base_id = counter_id - offset;
+ fc_bulk->fs_bulk.bulk_len = bulk_size;
+ counter->bulk = fc_bulk;
+ return counter;
}
+EXPORT_SYMBOL(mlx5_fc_local_create);
-static void
-mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+void mlx5_fc_local_destroy(struct mlx5_fc *counter)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk = fc->bulk;
- int bulk_free_fcs_amount;
-
- mutex_lock(&fc_pool->pool_lock);
-
- if (mlx5_fc_bulk_release_fc(bulk, fc)) {
- mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
- goto unlock;
- }
-
- fc_pool->available_fcs++;
- fc_pool->used_fcs--;
-
- bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
- if (bulk_free_fcs_amount == 1)
- list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
- if (bulk_free_fcs_amount == bulk->bulk_len) {
- list_del(&bulk->pool_list);
- if (fc_pool->available_fcs > fc_pool->threshold)
- mlx5_fc_pool_free_bulk(fc_pool, bulk);
- else
- list_add(&bulk->pool_list, &fc_pool->unused);
- }
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
-unlock:
- mutex_unlock(&fc_pool->pool_lock);
+ kfree(counter->bulk);
+ kfree(counter);
}
+EXPORT_SYMBOL(mlx5_fc_local_destroy);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
new file mode 100644
index 000000000000..f6c226664602
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_pool.h"
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len)
+{
+ int i;
+
+ fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!fs_bulk->bitmask)
+ return -ENOMEM;
+
+ fs_bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++)
+ set_bit(i, fs_bulk->bitmask);
+
+ return 0;
+}
+
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
+{
+ kvfree(fs_bulk->bitmask);
+}
+
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len);
+
+ WARN_ON_ONCE(!pool_index || !fs_bulk);
+ if (free_index >= fs_bulk->bulk_len)
+ return -ENOSPC;
+
+ clear_bit(free_index, fs_bulk->bitmask);
+ pool_index->fs_bulk = fs_bulk;
+ pool_index->index = free_index;
+ return 0;
+}
+
+static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index)
+{
+ if (test_bit(index, fs_bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(index, fs_bulk->bitmask);
+ return 0;
+}
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx)
+{
+ WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create ||
+ !ops->update_threshold);
+ pool->dev = dev;
+ pool->pool_ctx = pool_ctx;
+ mutex_init(&pool->pool_lock);
+ INIT_LIST_HEAD(&pool->fully_used);
+ INIT_LIST_HEAD(&pool->partially_used);
+ INIT_LIST_HEAD(&pool->unused);
+ pool->available_units = 0;
+ pool->used_units = 0;
+ pool->threshold = 0;
+ pool->ops = ops;
+}
+
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool)
+{
+ struct mlx5_core_dev *dev = pool->dev;
+ struct mlx5_fs_bulk *bulk;
+ struct mlx5_fs_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+ struct mlx5_fs_bulk *new_bulk;
+
+ new_bulk = fs_pool->ops->bulk_create(dev, fs_pool->pool_ctx);
+ if (new_bulk)
+ fs_pool->available_units += new_bulk->bulk_len;
+ fs_pool->ops->update_threshold(fs_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+
+ fs_pool->available_units -= bulk->bulk_len;
+ fs_pool->ops->bulk_destroy(dev, bulk);
+ fs_pool->ops->update_threshold(fs_pool);
+}
+
+static int
+mlx5_fs_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *fs_bulk;
+ int err;
+
+ if (list_empty(src_list))
+ return -ENODATA;
+
+ fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list);
+ err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index);
+ if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0)
+ list_move(&fs_bulk->pool_list, next_list);
+ return err;
+}
+
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *new_bulk;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used,
+ &fs_pool->fully_used, false,
+ pool_index);
+ if (err)
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused,
+ &fs_pool->partially_used,
+ true, pool_index);
+ if (err) {
+ new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool);
+ if (!new_bulk) {
+ err = -ENOENT;
+ goto out;
+ }
+ err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index);
+ WARN_ON_ONCE(err);
+ list_add(&new_bulk->pool_list, &fs_pool->partially_used);
+ }
+ fs_pool->available_units--;
+ fs_pool->used_units++;
+
+out:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
+
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *bulk = pool_index->fs_bulk;
+ int bulk_free_amount;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ /* TBD would rather return void if there was no warn here in original code */
+ err = mlx5_fs_bulk_release_index(bulk, pool_index->index);
+ if (err)
+ goto unlock;
+
+ fs_pool->available_units++;
+ fs_pool->used_units--;
+
+ bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk);
+ if (bulk_free_amount == 1)
+ list_move_tail(&bulk->pool_list, &fs_pool->partially_used);
+ if (bulk_free_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fs_pool->available_units > fs_pool->threshold)
+ mlx5_fs_pool_free_bulk(fs_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fs_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
new file mode 100644
index 000000000000..f04ec3107498
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_POOL_H__
+#define __MLX5_FS_POOL_H__
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_fs_bulk {
+ struct list_head pool_list;
+ int bulk_len;
+ unsigned long *bitmask;
+};
+
+struct mlx5_fs_pool_index {
+ struct mlx5_fs_bulk *fs_bulk;
+ int index;
+};
+
+struct mlx5_fs_pool;
+
+struct mlx5_fs_pool_ops {
+ int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk);
+ struct mlx5_fs_bulk * (*bulk_create)(struct mlx5_core_dev *dev,
+ void *pool_ctx);
+ void (*update_threshold)(struct mlx5_fs_pool *pool);
+};
+
+struct mlx5_fs_pool {
+ struct mlx5_core_dev *dev;
+ void *pool_ctx;
+ const struct mlx5_fs_pool_ops *ops;
+ struct mutex pool_lock; /* protects pool lists */
+ struct list_head fully_used;
+ struct list_head partially_used;
+ struct list_head unused;
+ int available_units;
+ int used_units;
+ int threshold;
+};
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len);
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk);
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx);
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool);
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+
+#endif /* __MLX5_FS_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 76ad46bf477d..b253d1673398 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -281,6 +281,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, shampo)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_SHAMPO, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index f4b777d4e108..62b6faa4276a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -105,20 +105,20 @@ static int mapping_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool hash = false;
bool lag_active;
+ int i, idx = 0;
int num_ports;
- int i;
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, ports,
&num_ports);
hash = true;
} else {
- for (i = 0; i < ldev->ports; i++)
- ports[i] = ldev->v2p_map[i];
+ mlx5_ldev_for_each(i, 0, ldev)
+ ports[idx++] = ldev->v2p_map[i];
num_ports = ldev->ports;
}
}
@@ -144,11 +144,8 @@ static int members_show(struct seq_file *file, void *priv)
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
+ mlx5_ldev_for_each(i, 0, ldev)
seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
- }
mutex_unlock(&ldev->lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 7f68468c2e75..cea5aa314f6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -43,10 +43,6 @@
#include "mp.h"
#include "mpesw.h"
-enum {
- MLX5_LAG_EGRESS_PORT_1 = 1,
- MLX5_LAG_EGRESS_PORT_2,
-};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -72,7 +68,7 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
int num_enabled;
int idx;
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, enabled_ports,
&num_enabled);
for (idx = 0; idx < num_enabled; idx++)
active_port |= BIT_MASK(enabled_ports[idx]);
@@ -80,23 +76,30 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
return active_port;
}
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
- unsigned long flags)
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
+ int mode, unsigned long flags)
{
bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
&flags);
int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
+ u8 *ports = ldev->v2p_map;
+ int idx0, idx1;
void *lag_ctx;
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
switch (port_sel_mode) {
case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
break;
case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
@@ -113,17 +116,23 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
return mlx5_cmd_exec_in(dev, create_lag, in);
}
-static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
u8 *ports)
{
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+ int idx0, idx1;
+
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
MLX5_SET(modify_lag_in, in, field_select, 0x1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
return mlx5_cmd_exec_in(dev, modify_lag, in);
}
@@ -148,33 +157,31 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
+static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_disabled)
{
int i;
*num_disabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (!tracker->netdev_state[i].tx_enabled ||
!tracker->netdev_state[i].link_up)
ports[(*num_disabled)++] = i;
- }
}
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled)
{
int i;
*num_enabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
ports[(*num_enabled)++] = i;
- }
if (*num_enabled == 0)
- mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
+ mlx5_infer_tx_disabled(tracker, ldev, ports, num_enabled);
}
static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
@@ -192,7 +199,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
int j;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
- mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(tracker, ldev, enabled_ports,
&num_enabled);
for (i = 0; i < num_enabled; i++) {
err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
@@ -203,7 +210,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
buf[written - 2] = 0;
mlx5_core_info(dev, "lag map active ports: %s\n", buf);
} else {
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
err = scnprintf(buf + written, 10,
@@ -286,13 +293,55 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].netdev == ndev)
return i;
return -ENOENT;
}
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return -ENOENT;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (num == seq)
+ return i;
+ num++;
+ }
+ return -ENOENT;
+}
+
+int mlx5_lag_num_devs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ (void)i;
+ num++;
+ }
+ return num;
+}
+
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev)
+ if (ldev->pf[i].netdev)
+ num++;
+ return num;
+}
+
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
return ldev->mode == MLX5_LAG_MODE_ROCE;
@@ -310,7 +359,7 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
* with mapping that points to active ports.
*/
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
- u8 num_ports,
+ struct mlx5_lag *ldev,
u8 buckets,
u8 *ports)
{
@@ -323,7 +372,7 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
int i;
int j;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
enabled[enabled_ports_num++] = i;
@@ -334,15 +383,16 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
/* Use native mapping by default where each port's buckets
* point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
*/
- for (i = 0; i < num_ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < buckets; j++) {
idx = i * buckets + j;
- ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
+ ports[idx] = i + 1;
}
+ }
/* If all ports are disabled/enabled keep native mapping */
- if (enabled_ports_num == num_ports ||
- disabled_ports_num == num_ports)
+ if (enabled_ports_num == ldev->ports ||
+ disabled_ports_num == ldev->ports)
return;
/* Go over the disabled ports and for each assign a random active port */
@@ -358,7 +408,7 @@ static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].has_drop)
return true;
return false;
@@ -368,7 +418,7 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (!ldev->pf[i].has_drop)
continue;
@@ -396,7 +446,7 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
if (!ldev->tracker.has_inactive)
return;
- mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
+ mlx5_infer_tx_disabled(tracker, ldev, disabled_ports, &num_disabled);
for (i = 0; i < num_disabled; i++) {
disabled_index = disabled_ports[i];
@@ -428,10 +478,15 @@ static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
u8 active_ports;
int ret;
+ if (idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[idx].dev;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
ret = mlx5_lag_port_sel_modify(ldev, ports);
if (ret ||
@@ -442,7 +497,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_active_port(dev0, active_ports);
}
- return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
+ return mlx5_cmd_modify_lag(dev0, ldev, ports);
}
static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
@@ -450,7 +505,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
unsigned long flags;
- int i;
+ int i, last_idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
@@ -458,11 +513,15 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
if (!ldev)
goto unlock;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->tracker.netdev_state[i].tx_enabled)
ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
+ if (!ndev) {
+ last_idx = mlx5_lag_get_dev_index_by_seq(ldev, ldev->ports - 1);
+ if (last_idx < 0)
+ goto unlock;
+ ndev = ldev->pf[last_idx].netdev;
+ }
if (ndev)
dev_hold(ndev);
@@ -476,16 +535,21 @@ unlock:
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
int idx;
int err;
int i;
int j;
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
+ if (first_idx < 0)
+ return;
+
+ dev0 = ldev->pf[first_idx].dev;
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ports[idx] == ldev->v2p_map[idx])
@@ -523,8 +587,13 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
unsigned long *flags)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev0 = ldev->pf[first_idx].dev;
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
return -EINVAL;
@@ -544,11 +613,13 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
unsigned long *flags)
{
- struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct lag_func *dev0;
- if (mode == MLX5_LAG_MODE_MPESW)
+ if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW)
return;
+ dev0 = &ldev->pf[first_idx];
if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
if (ldev->ports > 2)
@@ -593,12 +664,18 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
+ int i, j;
int err;
- int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
+ mlx5_ldev_for_each(i, first_idx + 1, ldev) {
struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
@@ -608,9 +685,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
}
return 0;
err:
- for (; i > MLX5_LAG_P1; i--)
+ mlx5_ldev_for_each_reverse(j, i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
- ldev->pf[i].dev->priv.eswitch);
+ ldev->pf[j].dev->priv.eswitch);
return err;
}
@@ -620,16 +697,21 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
unsigned long flags)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
+ struct mlx5_core_dev *dev0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
if (tracker)
mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
+ err = mlx5_cmd_create_lag(dev0, ldev, mode, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -661,17 +743,22 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
bool shared_fdb)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
unsigned long flags = 0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
if (err)
return err;
if (mode != MLX5_LAG_MODE_MPESW) {
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ldev->v2p_map);
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
ldev->v2p_map);
@@ -709,20 +796,26 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags;
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
int err;
int i;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
@@ -754,6 +847,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
u8 mode;
@@ -761,30 +855,29 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
bool roce_support;
int i;
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
- return false;
+ if (first_idx < 0 || mlx5_lag_num_devs(ldev) != ldev->ports)
+ return false;
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[first_idx].dev;
mode = mlx5_eswitch_mode(dev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
#else
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
- roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
- for (i = 1; i < ldev->ports; i++)
+ roce_support = mlx5_get_roce_state(ldev->pf[first_idx].dev);
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
return false;
@@ -795,10 +888,7 @@ void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -812,10 +902,7 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -828,11 +915,16 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
bool roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
roce_lag = __mlx5_lag_is_roce(ldev);
if (shared_fdb) {
@@ -842,7 +934,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
}
- for (i = 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, idx + 1, ldev)
mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
}
@@ -854,17 +946,21 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (idx < 0)
+ return false;
+
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
dev = ldev->pf[i].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
@@ -876,7 +972,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[idx].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
@@ -892,11 +988,11 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
bool roce_lag = true;
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
@@ -917,13 +1013,18 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct lag_tracker tracker = { };
+ struct mlx5_core_dev *dev0;
struct net_device *ndev;
bool do_bond, roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
@@ -956,7 +1057,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
if (mlx5_get_roce_state(ldev->pf[i].dev))
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
}
@@ -966,7 +1067,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
@@ -977,7 +1078,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
@@ -1010,12 +1111,9 @@ struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
int i;
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[i].dev) {
- devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
- break;
- }
- }
+ i = mlx5_get_next_ldev_func(ldev, 0);
+ if (i < MLX5_MAX_PORTS)
+ devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
mutex_unlock(&ldev->lock);
return devcom;
}
@@ -1068,7 +1166,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
u8 bond_status = 0;
int num_slaves = 0;
int changed = 0;
- int idx;
+ int i, idx = -1;
if (!netif_is_lag_master(upper))
return 0;
@@ -1083,8 +1181,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
*/
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
- idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0) {
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == ndev_tmp) {
+ idx++;
+ break;
+ }
+ }
+ if (i < MLX5_MAX_PORTS) {
slave = bond_slave_get_rcu(ndev_tmp);
if (slave)
has_inactive |= bond_is_slave_inactive(slave);
@@ -1234,15 +1337,12 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
}
static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
unsigned long flags;
- if (fn >= ldev->ports)
- return;
-
spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -1257,7 +1357,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
int i;
spin_lock_irqsave(&lag_lock, flags);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
@@ -1267,13 +1367,10 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev)
+ struct mlx5_core_dev *dev)
{
unsigned int fn = mlx5_get_dev_index(dev);
- if (fn >= ldev->ports)
- return;
-
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
}
@@ -1281,16 +1378,13 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- int i;
-
- for (i = 0; i < ldev->ports; i++)
- if (ldev->pf[i].dev == dev)
- break;
+ int fn;
- if (i == ldev->ports)
+ fn = mlx5_get_dev_index(dev);
+ if (ldev->pf[fn].dev != dev)
return;
- ldev->pf[i].dev = NULL;
+ ldev->pf[fn].dev = NULL;
dev->priv.lag = NULL;
}
@@ -1398,7 +1492,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev)
{
struct mlx5_lag *ldev;
- int i;
+ int num = 0;
ldev = mlx5_lag_dev(dev);
if (!ldev)
@@ -1406,17 +1500,33 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev);
-
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].netdev)
- break;
-
- if (i >= ldev->ports)
+ num = mlx5_lag_num_netdevs(ldev);
+ if (num >= ldev->ports)
set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx)
+{
+ int i;
+
+ for (i = start_idx; i >= end_idx; i--)
+ if (ldev->pf[i].dev)
+ return i;
+ return -1;
+}
+
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx)
+{
+ int i;
+
+ for (i = start_idx; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ return i;
+ return MLX5_MAX_PORTS;
+}
+
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1467,12 +1577,13 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
unsigned long flags;
- bool res;
+ bool res = false;
+ int idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_active(ldev) &&
- dev == ldev->pf[MLX5_LAG_P1].dev;
+ idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ res = ldev && __mlx5_lag_is_active(ldev) && idx >= 0 && dev == ldev->pf[idx].dev;
spin_unlock_irqrestore(&lag_lock, flags);
return res;
@@ -1555,7 +1666,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == slave) {
port = i;
break;
@@ -1594,13 +1705,13 @@ struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int
if (!ldev)
goto unlock;
- if (*i == ldev->ports)
+ if (*i == MLX5_MAX_PORTS)
goto unlock;
- for (idx = *i; idx < ldev->ports; idx++)
+ mlx5_ldev_for_each(idx, *i, ldev)
if (ldev->pf[idx].dev != dev)
break;
- if (idx == ldev->ports) {
+ if (idx == MLX5_MAX_PORTS) {
*i = idx;
goto unlock;
}
@@ -1621,10 +1732,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
+ int ret = 0, i, j, idx = 0;
struct mlx5_lag *ldev;
unsigned long flags;
int num_ports;
- int ret, i, j;
void *out;
out = kvzalloc(outlen, GFP_KERNEL);
@@ -1643,8 +1754,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
- for (i = 0; i < ldev->ports; i++)
- mdev[i] = ldev->pf[i].dev;
+ mlx5_ldev_for_each(i, 0, ldev)
+ mdev[idx++] = ldev->pf[i].dev;
} else {
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 50fcb1eee574..01cf72366947 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -103,7 +103,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
@@ -119,9 +119,24 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
+#define mlx5_ldev_for_each(i, start_index, ldev) \
+ for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \
+ i = tmp, tmp < MLX5_MAX_PORTS; tmp++)
+
+#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev) \
+ for (int tmp = start_index, tmp1 = end_index; \
+ tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \
+ i = tmp, tmp >= tmp1; tmp--)
+
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx);
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
+int mlx5_lag_num_devs(struct mlx5_lag *ldev);
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index b1aa494c76ba..aee17fcf3b36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -17,7 +17,10 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
#define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!mlx5_lag_is_ready(ldev))
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
+
+ if (idx0 < 0 || idx1 < 0 || !mlx5_lag_is_ready(ldev))
return false;
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
@@ -26,8 +29,8 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
- ldev->pf[MLX5_LAG_P2].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[idx0].dev,
+ ldev->pf[idx1].dev);
}
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
@@ -50,43 +53,45 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
struct lag_tracker tracker = {};
- if (!__mlx5_lag_is_multipath(ldev))
+ if (idx0 < 0 || idx1 < 0 || !__mlx5_lag_is_multipath(ldev))
return;
switch (port) {
case MLX5_LAG_NORMAL_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
case MLX5_LAG_P1_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P2].link_up = false;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].tx_enabled = false;
+ tracker.netdev_state[idx1].link_up = false;
break;
case MLX5_LAG_P2_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P1].link_up = false;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = false;
+ tracker.netdev_state[idx0].link_up = false;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx0].dev,
"Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
+ if (tracker.netdev_state[idx0].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx0].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
+ if (tracker.netdev_state[idx1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -150,9 +155,14 @@ mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
+
+ if (idx < 0)
+ return;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -179,17 +189,19 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
}
if (nh_dev0 == nh_dev1) {
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx].dev,
"Multipath offload doesn't support routes with multiple nexthops of the same device");
return;
}
if (!nh_dev1) {
if (__mlx5_lag_is_active(ldev)) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
-
- i++;
- mlx5_lag_set_port_affinity(ldev, i);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ dev_idx++;
+ if (ldev->pf[i].netdev == nh_dev0)
+ break;
+ }
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
@@ -214,6 +226,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct fib_info *fi)
{
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
/* Check the nh event is related to the route */
if (!mp->fib.mfi || mp->fib.mfi != fi)
@@ -221,11 +234,15 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == fib_nh->fib_nh_dev)
+ break;
+ dev_idx++;
+ }
- if (i >= 0) {
- i = (i + 1) % 2 + 1; /* peer port */
- mlx5_lag_set_port_affinity(ldev, i);
+ if (dev_idx >= 0) {
+ dev_idx = (dev_idx + 1) % 2 + 1; /* peer port */
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 571ea26edd0c..ffac0bd6c895 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -15,7 +15,7 @@ static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
u32 pf_metadata;
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = ldev->lag_mpesw.pf_metadata[i];
@@ -36,7 +36,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
u32 pf_metadata;
int i, err;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = mlx5_esw_match_metadata_alloc(esw);
@@ -52,7 +52,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
goto err_metadata;
}
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
(void *)0);
@@ -68,13 +68,15 @@ err_metadata:
#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
int err;
int i;
- if (ldev->mode != MLX5_LAG_MODE_NONE)
+ if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
+ dev0 = ldev->pf[idx].dev;
if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
return -EOPNOTSUPP;
@@ -98,7 +100,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
@@ -112,7 +114,7 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ab2717012b79..bde79cac33a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -39,15 +39,18 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
- int err, i;
- int idx;
- int j;
+ struct mlx5_core_dev *dev;
+ int err, i, j, k, idx;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
ft_attr.max_fte = ldev->ports * ldev->buckets;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
@@ -74,7 +77,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
u8 affinity;
@@ -88,13 +91,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- do {
+ mlx5_ldev_for_each_reverse(k, i, 0, ldev) {
while (j--) {
- idx = i * ldev->buckets + j;
+ idx = k * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
j = ldev->buckets;
- } while (i--);
+ };
goto destroy_fg;
}
}
@@ -295,11 +298,16 @@ static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_definer *lag_definer;
+ struct mlx5_core_dev *dev;
u32 *match_definer_mask;
int format_id, err;
+ if (first_idx < 0)
+ return ERR_PTR(-EINVAL);
+
+ dev = ldev->pf[first_idx].dev;
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
return ERR_PTR(-ENOMEM);
@@ -341,12 +349,15 @@ free_lag_definer:
static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
- int idx;
- int i;
- int j;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev;
+ int idx, i, j;
- for (i = 0; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return;
+
+ dev = ldev->pf[first_idx].dev;
+ mlx5_ldev_for_each(i, first_idx, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
@@ -501,10 +512,15 @@ static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
@@ -512,10 +528,15 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
@@ -530,7 +551,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
- return err;
+ goto clear_port_sel;
if (port_sel->tunnel) {
err = mlx5_lag_create_inner_ttc_table(ldev);
@@ -549,6 +570,8 @@ destroy_inner:
mlx5_destroy_ttc_table(port_sel->inner.ttc);
destroy_definers:
mlx5_lag_destroy_definers(ldev);
+clear_port_sel:
+ memset(port_sel, 0, sizeof(*port_sel));
return err;
}
@@ -565,7 +588,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ldev->v2p_map[idx] == ports[idx])
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 4822d01123b4..d61a1a9297c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -322,17 +322,16 @@ static void mlx5_pps_out(struct work_struct *work)
}
}
-static void mlx5_timestamp_overflow(struct work_struct *work)
+static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
{
- struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
- timer = container_of(dwork, struct mlx5_timer, overflow_work);
- clock = container_of(timer, struct mlx5_clock, timer);
+ clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
mdev = container_of(clock, struct mlx5_core_dev, clock);
+ timer = &clock->timer;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
@@ -343,7 +342,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_sequnlock_irqrestore(&clock->lock, flags);
out:
- schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
+ return timer->overflow_period;
}
static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
@@ -517,6 +516,7 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
timer->cycles.mult = mult;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
+ ptp_schedule_worker(clock->ptp, timer->overflow_period);
return 0;
}
@@ -852,6 +852,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
+ .do_aux_work = mlx5_timestamp_overflow,
};
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
@@ -1052,12 +1053,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
do_div(ns, NSEC_PER_SEC / HZ);
timer->overflow_period = ns;
- INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
- if (timer->overflow_period)
- schedule_delayed_work(&timer->overflow_work, 0);
- else
+ if (!timer->overflow_period) {
+ timer->overflow_period = HZ;
mlx5_core_warn(mdev,
- "invalid overflow period, overflow_work is not scheduled\n");
+ "invalid overflow period, overflow_work is scheduled once per second\n");
+ }
if (clock_info)
clock_info->overflow_period = timer->overflow_period;
@@ -1172,6 +1172,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
mlx5_eq_notifier_register(mdev, &clock->pps_nb);
+
+ if (clock->ptp)
+ ptp_schedule_worker(clock->ptp, 0);
}
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
@@ -1188,7 +1191,6 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
}
cancel_work_sync(&clock->pps_info.out_work);
- cancel_delayed_work_sync(&clock->timer.overflow_work);
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
index 4a078113e292..762d55ba9e51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -497,7 +497,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&dest, 0, sizeof(struct mlx5_flow_destination));
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ dest.counter = tx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -519,7 +519,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ dest.counter = tx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1200,7 +1200,7 @@ static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ roce_dest[dstn].counter = rx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
if (IS_ERR(rule)) {
@@ -1592,7 +1592,7 @@ static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ dest.counter = rx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 220a9ac75c8b..ec956c4bcebd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -368,6 +368,10 @@ int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_ty
u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
+ if (WARN_ON(!dev->caps.hca[cap_type]))
+ /* this cap_type must be added to mlx5_hca_caps_alloc() */
+ return -EINVAL;
+
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
@@ -664,6 +668,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
ilog2(max_uc_list));
+ /* enable absolute native port num */
+ if (MLX5_CAP_GEN_MAX(dev, abs_native_port_num))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, abs_native_port_num, 1);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -941,9 +949,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
mlx5_pci_vsc_init(dev);
- err = pci_enable_ptm(pdev, NULL);
- if (err)
- mlx5_core_info(dev, "PTM is not supported by PCIe\n");
+ pci_enable_ptm(pdev, NULL);
return 0;
@@ -1788,6 +1794,7 @@ static const int types[] = {
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
+ MLX5_CAP_SHAMPO,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e393391966e0..39a209b9b684 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -56,6 +56,8 @@ bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierar
return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN;
case TSAR_ELEMENT_TSAR_TYPE_ETS:
return cap & TSAR_TYPE_CAP_MASK_ETS;
+ case TSAR_ELEMENT_TSAR_TYPE_TC_ARB:
+ return cap & TSAR_TYPE_CAP_MASK_TC_ARB;
}
return false;
@@ -87,6 +89,8 @@ bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hie
return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP:
return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT:
+ return cap & ELEMENT_TYPE_CAP_MASK_RATE_LIMIT;
}
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a96be98be032..b96909fbeb12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -257,6 +257,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
return 0;
esw_err:
+ mlx5_sf_function_id_erase(table, sf);
mlx5_sf_free(table, sf);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index a897cdc60fdb..b5332c54d4fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -11,31 +11,29 @@
/* This is the longest supported action sequence for FDB table:
* DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
*/
-static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
- [MLX5HWS_TABLE_TYPE_FDB] = {
- BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
- BIT(MLX5HWS_ACTION_TYP_CTR),
- BIT(MLX5HWS_ACTION_TYP_TAG),
- BIT(MLX5HWS_ACTION_TYP_ASO_METER),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_TBL) |
- BIT(MLX5HWS_ACTION_TYP_VPORT) |
- BIT(MLX5HWS_ACTION_TYP_DROP) |
- BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
- BIT(MLX5HWS_ACTION_TYP_RANGE) |
- BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
- BIT(MLX5HWS_ACTION_TYP_LAST),
- },
+static const u32 action_order_arr[MLX5HWS_ACTION_TYP_MAX] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
};
static const char * const mlx5hws_action_type_str[] = {
@@ -83,8 +81,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
int ret;
mutex_lock(&ctx->ctrl_lock);
- if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+ if (ctx->common_res.shared_stc[stc_type]) {
+ ctx->common_res.shared_stc[stc_type]->refcount++;
mutex_unlock(&ctx->ctrl_lock);
return 0;
}
@@ -124,8 +122,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
goto free_shared_stc;
}
- ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+ ctx->common_res.shared_stc[stc_type] = shared_stc;
+ ctx->common_res.shared_stc[stc_type]->refcount = 1;
mutex_unlock(&ctx->ctrl_lock);
@@ -178,16 +176,16 @@ static void hws_action_put_shared_stc(struct mlx5hws_action *action,
}
mutex_lock(&ctx->ctrl_lock);
- if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+ if (--ctx->common_res.shared_stc[stc_type]->refcount) {
mutex_unlock(&ctx->ctrl_lock);
return;
}
- shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+ shared_stc = ctx->common_res.shared_stc[stc_type];
mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
kfree(shared_stc);
- ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+ ctx->common_res.shared_stc[stc_type] = NULL;
mutex_unlock(&ctx->ctrl_lock);
}
@@ -206,10 +204,10 @@ bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
enum mlx5hws_action_type *user_actions,
enum mlx5hws_table_type table_type)
{
- const u32 *order_arr = action_order_arr[table_type];
+ const u32 *order_arr = action_order_arr;
+ bool valid_combo;
u8 order_idx = 0;
u8 user_idx = 0;
- bool valid_combo;
if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
mlx5hws_err(ctx, "Invalid table_type %d", table_type);
@@ -321,8 +319,8 @@ int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
__must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
bool use_fixup;
u32 obj_0_id;
int ret;
@@ -387,8 +385,8 @@ void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
struct mlx5hws_pool_chunk *stc)
__must_hold(&ctx->ctrl_lock)
{
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
u32 obj_id;
/* Modify the STC not to point to an object */
@@ -473,6 +471,7 @@ static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
break;
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
attr->dest_table_id = obj_id;
@@ -561,7 +560,7 @@ hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto out_err;
}
@@ -585,7 +584,7 @@ hws_action_destroy_stcs(struct mlx5hws_action *action)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
mutex_unlock(&ctx->ctrl_lock);
}
@@ -1639,8 +1638,8 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
- default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
+ stc_pool = ctx->stc_pool;
+ default_stc = ctx->common_res.default_stc;
obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
rtc_attr.stc_base = obj_id;
@@ -1731,7 +1730,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
ste_attr.used_id_rtc_0 = &used_rtc_0_id;
ste_attr.used_id_rtc_1 = &used_rtc_1_id;
- common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+ common_res = &ctx->common_res;
/* init an empty match STE which will always hit */
ste_attr.wqe_ctrl = &wqe_ctrl;
@@ -1750,7 +1749,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
- htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+ htonl(hit_ft_action->stc.offset);
wqe_data_arr = (__force __be32 *)&range_wqe_data;
@@ -1843,7 +1842,7 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto error_unlock;
@@ -1875,7 +1874,50 @@ struct mlx5hws_action *
mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
u32 sampler_id, u32 flags)
{
- mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_cmd_set_fte_dest dest;
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (flags != (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ mlx5hws_err(ctx, "Unsupported flags for flow sampler\n");
+ return NULL;
+ }
+
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ dest.destination_id = sampler_id;
+
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ return NULL;
+
+ action = hws_action_create_generic(ctx, flags,
+ MLX5HWS_ACTION_TYP_SAMPLER);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->flow_sampler.fw_island = fw_island;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
return NULL;
}
@@ -1914,6 +1956,11 @@ static void hws_action_destroy_hws(struct mlx5hws_action *action)
}
kfree(action->dest_array.dest_list);
break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev,
+ action->flow_sampler.fw_island);
+ break;
case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5HWS_ACTION_TYP_MODIFY_HDR:
shared_arg = false;
@@ -1970,8 +2017,8 @@ __must_hold(&ctx->ctrl_lock)
struct mlx5hws_action_default_stc *default_stc;
int ret;
- if (ctx->common_res[tbl_type].default_stc) {
- ctx->common_res[tbl_type].default_stc->refcount++;
+ if (ctx->common_res.default_stc) {
+ ctx->common_res.default_stc->refcount++;
return 0;
}
@@ -2023,8 +2070,8 @@ __must_hold(&ctx->ctrl_lock)
goto free_nop_dw7;
}
- ctx->common_res[tbl_type].default_stc = default_stc;
- ctx->common_res[tbl_type].default_stc->refcount++;
+ ctx->common_res.default_stc = default_stc;
+ ctx->common_res.default_stc->refcount++;
return 0;
@@ -2046,9 +2093,7 @@ __must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_action_default_stc *default_stc;
- default_stc = ctx->common_res[tbl_type].default_stc;
-
- default_stc = ctx->common_res[tbl_type].default_stc;
+ default_stc = ctx->common_res.default_stc;
if (--default_stc->refcount)
return;
@@ -2058,7 +2103,7 @@ __must_hold(&ctx->ctrl_lock)
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
kfree(default_stc);
- ctx->common_res[tbl_type].default_stc = NULL;
+ ctx->common_res.default_stc = NULL;
}
static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
@@ -2150,8 +2195,7 @@ hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
{
struct mlx5hws_action *action = apply->rule_action[action_idx].action;
- apply->wqe_ctrl->stc_ix[stc_idx] =
- htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[stc_idx] = htonl(action->stc.offset);
}
static void
@@ -2181,7 +2225,7 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
rule_action = &apply->rule_action[setter->idx_double];
action = rule_action->action;
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2240,7 +2284,7 @@ hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2272,7 +2316,7 @@ hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2434,6 +2478,7 @@ int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
case MLX5HWS_ACTION_TYP_DROP:
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
case MLX5HWS_ACTION_TYP_VPORT:
case MLX5HWS_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
index e8f562c31826..64b76075f7f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
@@ -70,12 +70,12 @@ struct mlx5hws_action_default_stc {
struct mlx5hws_pool_chunk nop_dw6;
struct mlx5hws_pool_chunk nop_dw7;
struct mlx5hws_pool_chunk default_hit;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_action_shared_stc {
struct mlx5hws_pool_chunk stc_chunk;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_actions_apply_data {
@@ -124,7 +124,7 @@ struct mlx5hws_action {
struct mlx5hws_context *ctx;
union {
struct {
- struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool_chunk stc;
union {
struct {
u32 pat_id;
@@ -166,6 +166,9 @@ struct mlx5hws_action {
struct mlx5hws_cmd_set_fte_dest *dest_list;
} dest_array;
struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ } flow_sampler;
+ struct {
u8 type;
u8 start_anchor;
u8 end_anchor;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index baacf662c0ab..3dbd4efa21a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -152,6 +152,8 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
if (!bwc_matcher)
return NULL;
+ atomic_set(&bwc_matcher->num_of_rules, 0);
+
/* Check if the required match params can be all matched
* in single STE, otherwise complex matcher is needed.
*/
@@ -199,10 +201,12 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- if (bwc_matcher->num_of_rules)
+ u32 num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
+
+ if (num_of_rules)
mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
"BWC matcher destroy: matcher still has %d rules\n",
- bwc_matcher->num_of_rules);
+ num_of_rules);
mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
@@ -215,6 +219,8 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
u32 *pending_rules,
bool drain)
{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT * MSEC_PER_SEC);
struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
bool got_comp = *pending_rules >= burst_th;
@@ -250,6 +256,11 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
}
got_comp = !!ret;
+
+ if (unlikely(!got_comp && time_after(jiffies, timeout))) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d - TIMEOUT\n", queue_id);
+ return -ETIMEDOUT;
+ }
}
return err;
@@ -309,7 +320,7 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- bwc_matcher->num_of_rules++;
+ atomic_inc(&bwc_matcher->num_of_rules);
bwc_rule->bwc_queue_idx = idx;
list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
}
@@ -318,7 +329,7 @@ static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- bwc_matcher->num_of_rules--;
+ atomic_dec(&bwc_matcher->num_of_rules);
list_del_init(&bwc_rule->list_node);
}
@@ -334,22 +345,21 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_rule_attr *rule_attr)
{
struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_flow_op_result completion;
+ u32 expected_completions = 1;
int ret;
ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
if (unlikely(ret))
return ret;
- do {
- ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
- } while (ret != 1);
-
- if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
- (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
- bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
- mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
- completion.status, bwc_rule->rule->status);
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING)) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: rule status %d\n",
+ bwc_rule->rule->status);
return -EINVAL;
}
@@ -458,8 +468,22 @@ hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
{
struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
- return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
- caps->ste_alloc_log_max - 1;
+ /* check the match RTC size */
+ if ((bwc_matcher->size_log +
+ MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
+ (caps->ste_alloc_log_max - 1))
+ return true;
+
+ /* check the action RTC size */
+ if ((bwc_matcher->size_log +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP +
+ ilog2(roundup_pow_of_two(bwc_matcher->matcher->action_ste.max_stes)) +
+ MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT) >
+ (caps->ste_alloc_log_max - 1))
+ return true;
+
+ return false;
}
static bool
@@ -615,8 +639,12 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match
ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
&pending_rules[i], false);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n",
+ ret);
goto free_bwc_rules;
+ }
}
}
} while (!all_done);
@@ -629,8 +657,11 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match
mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
ret = hws_bwc_queue_poll(ctx, queue_id,
&pending_rules[i], true);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n", ret);
goto free_bwc_rules;
+ }
}
}
@@ -704,7 +735,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
* Need to check again if we really need rehash.
* If the reason for rehash was size, but not any more - skip rehash.
*/
- if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
+ if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher,
+ atomic_read(&bwc_matcher->num_of_rules)))
return 0;
/* Now we're done all the checking - do the rehash:
@@ -797,7 +829,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
}
/* check if number of rules require rehash */
- num_of_rules = bwc_matcher->num_of_rules;
+ num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
mutex_unlock(queue_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index 0b745968e21e..f9f569131dde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -8,10 +8,18 @@
#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
-#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+/* Max number of AT attach operations for the same matcher.
+ * When the limit is reached, next attempt to attach new AT
+ * will result in creation of a new matcher and moving all
+ * the rules to this matcher.
+ */
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
#define MLX5HWS_BWC_MAX_ACTS 16
+#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+
struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
@@ -19,7 +27,7 @@ struct mlx5hws_bwc_matcher {
u8 num_of_at;
u16 priority;
u8 size_log;
- u32 num_of_rules; /* atomically accessed */
+ atomic_t num_of_rules;
struct list_head *rules;
};
@@ -60,9 +68,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
- * reguler HWS queues, and the other half are BWC queues.
+ * regular HWS queues, and the other half are BWC queues.
*/
- return (ctx->queues - 1) / 2;
+ if (mlx5hws_context_bwc_supported(ctx))
+ return (ctx->queues - 1) / 2;
+ return 0;
}
static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index c00c138c3366..487e75476b0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -257,6 +257,12 @@ int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
dest->ext_reformat_id);
}
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ MLX5_SET(dest_format, in_dests,
+ destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
default:
ret = -EOPNOTSUPP;
goto out;
@@ -359,7 +365,7 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
ft_attr->type = fw_ft_type;
ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
- default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+ default_miss_tbl = ctx->common_res.default_miss->ft_id;
if (!default_miss_tbl) {
pr_warn("HWS: no flow table ID for default miss\n");
return;
@@ -622,12 +628,12 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
u32 pd,
u32 *arg_id)
{
+ u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
void *attr;
int ret;
- attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
@@ -635,8 +641,8 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
MLX5_SET(general_obj_in_cmd_hdr,
attr, op_param.create.log_obj_range, log_obj_range);
- attr = MLX5_ADDR_OF(create_arg_in, in, arg);
- MLX5_SET(arg, attr, access_pd, pd);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
+ MLX5_SET(modify_header_arg, attr, access_pd, pd);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (ret) {
@@ -812,7 +818,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_packet_reformat_create_attr *attr,
u32 *reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {0};
size_t insz, cmd_data_sz, cmd_total_sz;
void *prctx;
void *pdata;
@@ -845,7 +851,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
goto out;
}
- *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+ *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
out:
kfree(in);
return ret;
@@ -854,13 +860,13 @@ out:
int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
u32 reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {0};
int ret;
- MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
- MLX5_SET(dealloc_packet_reformat_in, in,
+ MLX5_SET(dealloc_packet_reformat_context_in, in,
packet_reformat_id, reformat_id);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
@@ -889,73 +895,6 @@ int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
return ret;
}
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
-{
- u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
- void *key;
- int ret;
-
- MLX5_SET(allow_other_vhca_access_in,
- in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_type_to_be_accessed, attr->obj_type);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_id_to_be_accessed, attr->obj_id);
-
- key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
- memcpy(key, attr->access_key, sizeof(attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret)
- mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
-
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id)
-{
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
- void *attr;
- void *key;
- int ret;
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, obj_type, alias_attr->obj_type);
- MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
- MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
- MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
-
- key = MLX5_ADDR_OF(alias_context, attr, access_key);
- memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret) {
- mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
- goto out;
- }
-
- *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id)
-{
- return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
-}
-
int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_generate_wqe_attr *attr,
struct mlx5_cqe64 *ret_cqe)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 434f62b0904e..610c63d81ad9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -63,7 +63,7 @@ struct mlx5hws_cmd_forward_tbl {
u8 type;
u32 ft_id;
u32 fg_id;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_cmd_rtc_create_attr {
@@ -334,14 +334,6 @@ mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_forward_tbl *tbl);
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id);
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id);
-
int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
@@ -352,9 +344,6 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
enum mlx5hws_table_type type,
struct mlx5hws_cmd_ft_modify_attr *ft_attr);
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
-
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
u16 vport_number, u16 *gvmi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
index fd48b05e91e0..9cda2774fd64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -23,7 +23,6 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
struct mlx5hws_pool_attr pool_attr = {0};
u8 max_log_sz;
int ret;
- int i;
ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
if (ret)
@@ -39,23 +38,17 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- pool_attr.table_type = i;
- ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
- if (!ctx->stc_pool[i]) {
- mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
- ret = -ENOMEM;
- goto free_stc_pools;
- }
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ ctx->stc_pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool\n");
+ ret = -ENOMEM;
+ goto uninit_cache;
}
return 0;
-free_stc_pools:
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
-
+uninit_cache:
mlx5hws_definer_uninit_cache(ctx->definer_cache);
uninit_pat_cache:
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -64,12 +57,8 @@ uninit_pat_cache:
static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
{
- int i;
-
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
- }
+ if (ctx->stc_pool)
+ mlx5hws_pool_destroy(ctx->stc_pool);
mlx5hws_definer_uninit_cache(ctx->definer_cache);
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -161,8 +150,10 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx,
if (ret)
goto uninit_pd;
- if (attr->bwc)
- ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+ /* Context has support for backward compatible API,
+ * and does not have support for native HWS API.
+ */
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
index 47f5cc8de73f..38c3647444ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -8,6 +8,7 @@ enum mlx5hws_context_flags {
MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+ MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3,
};
enum mlx5hws_context_shared_stc_type {
@@ -37,8 +38,8 @@ struct mlx5hws_context {
struct mlx5_core_dev *mdev;
struct mlx5hws_cmd_query_caps *caps;
u32 pd_num;
- struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
- struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool *stc_pool;
+ struct mlx5hws_context_common_res common_res;
struct mlx5hws_pattern_cache *pattern_cache;
struct mlx5hws_definer_cache *definer_cache;
struct mutex ctrl_lock; /* control lock to protect the whole context */
@@ -58,6 +59,11 @@ static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
}
+static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT;
+}
+
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
index 5b200b4bc1a8..696275fd0ce2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -148,8 +148,8 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->match_ste.rtc_1_id,
(int)ste_1_id);
- ste = &matcher->action_ste[0].ste;
- ste_pool = matcher->action_ste[0].pool;
+ ste = &matcher->action_ste.ste;
+ ste_pool = matcher->action_ste.pool;
if (ste_pool) {
ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
@@ -171,10 +171,8 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
return ret;
seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
- matcher->action_ste[0].rtc_0_id,
- (int)ste_0_id,
- matcher->action_ste[0].rtc_1_id,
- (int)ste_1_id,
+ matcher->action_ste.rtc_0_id, (int)ste_0_id,
+ matcher->action_ste.rtc_1_id, (int)ste_1_id,
0,
mlx5hws_debug_icm_to_idx(icm_addr_0),
mlx5hws_debug_icm_to_idx(icm_addr_1));
@@ -368,9 +366,10 @@ static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_contex
static int hws_debug_dump_context_stc_resource(struct seq_file *f,
struct mlx5hws_context *ctx,
- u32 tbl_type,
struct mlx5hws_pool_resource *resource)
{
+ u32 tbl_type = MLX5HWS_TABLE_TYPE_BASE + MLX5HWS_TABLE_TYPE_FDB;
+
seq_printf(f, "%d,0x%llx,%u,%u\n",
MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
HWS_PTR_TO_ID(ctx),
@@ -382,31 +381,22 @@ static int hws_debug_dump_context_stc_resource(struct seq_file *f,
static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
{
- struct mlx5hws_pool *stc_pool;
- u32 table_type;
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
int ret;
- int i;
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- stc_pool = ctx->stc_pool[i];
- table_type = MLX5HWS_TABLE_TYPE_BASE + i;
-
- if (!stc_pool)
- continue;
+ if (!stc_pool)
+ return 0;
- if (stc_pool->resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->resource[0]);
- if (ret)
- return ret;
- }
+ if (stc_pool->resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->resource[0]);
+ if (ret)
+ return ret;
+ }
- if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->mirror_resource[0]);
- if (ret)
- return ret;
- }
+ if (stc_pool->mirror_resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->mirror_resource[0]);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 8fe96eb76baf..10ece7df1cfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -70,7 +70,7 @@
u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
_HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
_HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
- (bit_off) % BITS_IN_DW, second_dw_mask); \
+ (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
} else { \
_HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
} \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 9432d5084def..5c1a2086efba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -785,7 +785,7 @@ struct mlx5hws_definer_cache {
struct mlx5hws_definer_cache_item {
struct mlx5hws_definer definer;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
struct list_head list_node;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
new file mode 100644
index 000000000000..f34bbbbba1c2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -0,0 +1,1377 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <linux/mlx5/vport.h>
+#include <mlx5_core.h>
+#include <fs_core.h>
+#include <fs_cmd.h>
+#include "fs_hws_pools.h"
+#include "mlx5hws.h"
+
+#define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
+#define MLX5HWS_CTX_QUEUE_SIZE 256
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index);
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index);
+
+static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_context *fs_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ enum mlx5hws_action_type action_type;
+ int err = -ENOSPC;
+
+ hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
+ if (!hws_pool->tag_action)
+ return err;
+ hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
+ if (!hws_pool->pop_vlan_action)
+ goto destroy_tag;
+ hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
+ if (!hws_pool->push_vlan_action)
+ goto destroy_pop_vlan;
+ hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
+ if (!hws_pool->drop_action)
+ goto destroy_push_vlan;
+ action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
+ hws_pool->decapl2_action =
+ mlx5hws_action_create_reformat(ctx, action_type, 1,
+ &reformat_hdr, 0, flags);
+ if (!hws_pool->decapl2_action)
+ goto destroy_drop;
+ hws_pool->remove_hdr_vlan_action =
+ mlx5_fs_create_action_remove_header_vlan(ctx);
+ if (!hws_pool->remove_hdr_vlan_action)
+ goto destroy_decapl2;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (err)
+ goto destroy_remove_hdr;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
+ if (err)
+ goto cleanup_insert_hdr;
+ xa_init(&hws_pool->el2tol3tnl_pools);
+ xa_init(&hws_pool->el2tol2tnl_pools);
+ xa_init(&hws_pool->mh_pools);
+ xa_init(&hws_pool->table_dests);
+ xa_init(&hws_pool->vport_dests);
+ xa_init(&hws_pool->vport_vhca_dests);
+ return 0;
+
+cleanup_insert_hdr:
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+destroy_remove_hdr:
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+destroy_decapl2:
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+destroy_drop:
+ mlx5hws_action_destroy(hws_pool->drop_action);
+destroy_push_vlan:
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+destroy_pop_vlan:
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+destroy_tag:
+ mlx5hws_action_destroy(hws_pool->tag_action);
+ return err;
+}
+
+static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5hws_action *action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i;
+
+ xa_for_each(&hws_pool->vport_vhca_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_vhca_dests);
+ xa_for_each(&hws_pool->vport_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_dests);
+ xa_destroy(&hws_pool->table_dests);
+ xa_for_each(&hws_pool->mh_pools, i, pool)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
+ xa_destroy(&hws_pool->mh_pools);
+ xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol2tnl_pools);
+ xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol3tnl_pools);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+ mlx5hws_action_destroy(hws_pool->drop_action);
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+ mlx5hws_action_destroy(hws_pool->tag_action);
+}
+
+static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ struct mlx5hws_context_attr hws_ctx_attr = {};
+ int err;
+
+ hws_ctx_attr.queues = min_t(int, num_online_cpus(),
+ MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
+ hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
+
+ ns->fs_hws_context.hws_ctx =
+ mlx5hws_context_open(ns->dev, &hws_ctx_attr);
+ if (!ns->fs_hws_context.hws_ctx) {
+ mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
+ return -EINVAL;
+ }
+ err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
+ mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
+ return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+}
+
+static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns,
+ u16 peer_vhca_id)
+{
+ struct mlx5hws_context *peer_ctx = NULL;
+
+ if (peer_ns)
+ peer_ctx = peer_ns->fs_hws_context.hws_ctx;
+ mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
+ peer_vhca_id);
+ return 0;
+}
+
+static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_table *next_tbl;
+ int err;
+
+ if (!ns->fs_hws_context.hws_ctx)
+ return -EINVAL;
+
+ /* if no change required, return */
+ if (!next_ft && !ft->fs_hws_table.miss_ft_set)
+ return 0;
+
+ next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
+ err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
+ return err;
+ }
+ ft->fs_hws_table.miss_ft_set = !!next_tbl;
+ return 0;
+}
+
+static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
+ ft->id, flags);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed creating dest table action\n");
+ return -ENOMEM;
+ }
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
+ if (err)
+ mlx5hws_action_destroy(dest_ft_action);
+ return err;
+}
+
+static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ dest_ft_action = xa_erase(dests_xa, ft->id);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
+ return -ENOENT;
+ }
+
+ err = mlx5hws_action_destroy(dest_ft_action);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
+ return err;
+}
+
+static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table_attr *ft_attr,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
+ struct mlx5hws_table_attr tbl_attr = {};
+ struct mlx5hws_table *tbl;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+ err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
+ next_ft);
+ if (err)
+ return err;
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+ return err;
+ }
+
+ if (ns->table_type != FS_FT_FDB) {
+ mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
+ ns->table_type);
+ return -EOPNOTSUPP;
+ }
+
+ tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
+ tbl_attr.level = ft_attr->level;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl) {
+ mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
+ return -EINVAL;
+ }
+
+ ft->fs_hws_table.hws_table = tbl;
+ ft->id = mlx5hws_table_get_id(tbl);
+
+ if (next_ft) {
+ err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+ if (err)
+ goto destroy_table;
+ }
+
+ ft->max_fte = INT_MAX;
+
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ goto clear_ft_miss;
+ return 0;
+
+clear_ft_miss:
+ mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+destroy_table:
+ mlx5hws_table_destroy(tbl);
+ ft->fs_hws_table.hws_table = NULL;
+ return err;
+}
+
+static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ int err;
+
+ err = mlx5_fs_del_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+
+ err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
+
+ err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
+
+ return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+}
+
+static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
+ disconnect);
+}
+
+static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft, u32 *in,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5hws_match_parameters mask;
+ struct mlx5hws_bwc_matcher *matcher;
+ u8 match_criteria_enable;
+ u32 priority;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
+
+ mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ mask.match_sz = sizeof(fg->mask.match_criteria);
+
+ match_criteria_enable = MLX5_GET(create_flow_group_in, in,
+ match_criteria_enable);
+ priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
+ matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
+ priority, match_criteria_enable,
+ &mask);
+ if (!matcher) {
+ mlx5_core_err(ns->dev, "Failed creating matcher\n");
+ return -EINVAL;
+ }
+
+ fg->fs_hws_matcher.matcher = matcher;
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
+
+ return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst,
+ bool is_dest_type_uplink)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5hws_action *dest;
+ struct xarray *dests_xa;
+ bool vhca_id_valid;
+ unsigned long idx;
+ u16 vport_num;
+ int err;
+
+ vhca_id_valid = is_dest_type_uplink ||
+ (dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
+ vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
+ if (vhca_id_valid) {
+ dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
+ idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
+ } else {
+ dests_xa = &fs_ctx->hws_pool.vport_dests;
+ idx = vport_num;
+ }
+dest_load:
+ dest = xa_load(dests_xa, idx);
+ if (dest)
+ return dest;
+
+ dest = mlx5hws_action_create_dest_vport(ctx, vport_num, vhca_id_valid,
+ dest_attr->vport.vhca_id, flags);
+
+ err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
+ if (err) {
+ mlx5hws_action_destroy(dest);
+ dest = NULL;
+
+ if (err == -EBUSY)
+ /* xarray entry was already stored by another thread */
+ goto dest_load;
+ }
+
+ return dest;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5hws_action_create_dest_match_range(ctx,
+ dest_attr->range.field,
+ dest_attr->range.hit_ft,
+ dest_attr->range.miss_ft,
+ dest_attr->range.min,
+ dest_attr->range.max,
+ flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_dest_attr *dests,
+ u32 num_of_dests, bool ignore_flow_level,
+ u32 flow_source)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
+ ignore_flow_level,
+ flow_source, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.push_vlan_action;
+}
+
+static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
+{
+ u16 n_ethtype = vlan->ethtype;
+ u8 prio = vlan->prio;
+ u16 vid = vlan->vid;
+
+ return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.pop_vlan_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.decapl2_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.drop_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.tag_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_last(ctx, flags);
+}
+
+static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action)
+{
+ switch (mlx5hws_action_get_type(fs_action->action)) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ mlx5_fc_put_hws_action(fs_action->counter);
+ break;
+ default:
+ mlx5hws_action_destroy(fs_action->action);
+ }
+}
+
+static void
+mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions,
+ int *num_fs_actions)
+{
+ int i;
+
+ /* Free in reverse order to handle action dependencies */
+ for (i = *num_fs_actions - 1; i >= 0; i--)
+ mlx5_fs_destroy_fs_action(*fs_actions + i);
+ *num_fs_actions = 0;
+ kfree(*fs_actions);
+ *fs_actions = NULL;
+}
+
+/* Splits FTE's actions into cached, rule and destination actions.
+ * The cached and destination actions are saved on the fte hws rule.
+ * The rule actions are returned as a parameter, together with their count.
+ * We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
+static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte,
+ struct mlx5hws_rule_action **ractions)
+{
+ struct mlx5_flow_act *fte_action = &fte->act_dests.action;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action_dest_attr *dest_actions;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_rule_action *fs_actions;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5hws_action *dest_action;
+ struct mlx5hws_action *tmp_action;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+ bool delay_encap_set = false;
+ struct mlx5_flow_rule *dst;
+ int num_dest_actions = 0;
+ int num_fs_actions = 0;
+ int num_actions = 0;
+ int err;
+
+ *ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
+ GFP_KERNEL);
+ if (!*ractions) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*fs_actions), GFP_KERNEL);
+ if (!fs_actions) {
+ err = -ENOMEM;
+ goto free_actions_alloc;
+ }
+
+ dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*dest_actions), GFP_KERNEL);
+ if (!dest_actions) {
+ err = -ENOMEM;
+ goto free_fs_actions_alloc;
+ }
+
+ /* The order of the actions are must to be kept, only the following
+ * order is supported by HW steering:
+ * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
+ * -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
+ * -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
+ */
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_dest_actions_alloc;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ int reformat_type = fte_action->pkt_reformat->reformat_type;
+
+ if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else {
+ delay_encap_set = true;
+ }
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
+ (*ractions)[num_actions].modify_header.offset = mh_data->offset;
+ (*ractions)[num_actions].modify_header.data = mh_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->modify_hdr->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (delay_encap_set) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_fc *counter;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ counter = dst->dest_attr.counter;
+ tmp_action = mlx5_fc_get_hws_action(ctx, counter);
+ if (!tmp_action) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ (*ractions)[num_actions].counter.offset =
+ mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].counter = counter;
+ }
+ }
+
+ if (fte->act_dests.flow_context.flow_tag) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ tmp_action = mlx5_fs_get_action_tag(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_flow_destination *attr = &dst->dest_attr;
+ bool type_uplink =
+ attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+
+ if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (attr->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
+ dst);
+ if (dest_action)
+ break;
+ dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
+ type_uplink);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+ }
+
+ if (num_dest_actions == 1) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = dest_actions->dest;
+ } else if (num_dest_actions > 1) {
+ u32 flow_source = fte->act_dests.flow_context.flow_source;
+ bool ignore_flow_level;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ ignore_flow_level =
+ !!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ tmp_action = mlx5_fs_create_action_dest_array(ctx, dest_actions,
+ num_dest_actions,
+ ignore_flow_level,
+ flow_source);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_create_action_last(ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+
+ kfree(dest_actions);
+
+ /* Actions created specifically for this rule will be destroyed
+ * once rule is deleted.
+ */
+ fte->fs_hws_rule.num_fs_actions = num_fs_actions;
+ fte->fs_hws_rule.hws_fs_actions = fs_actions;
+
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(&fs_actions, &num_fs_actions);
+free_dest_actions_alloc:
+ kfree(dest_actions);
+free_fs_actions_alloc:
+ kfree(fs_actions);
+free_actions_alloc:
+ kfree(*ractions);
+ *ractions = NULL;
+out_err:
+ return err;
+}
+
+static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ struct mlx5hws_match_parameters params;
+ struct mlx5hws_rule_action *ractions;
+ struct mlx5hws_bwc_rule *rule;
+ int err = 0;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+ /* Packet reformat on terminamtion table not supported yet */
+ if (fte->act_dests.action.action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ return -EOPNOTSUPP;
+ return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+ }
+
+ err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (err)
+ goto out_err;
+
+ params.match_sz = sizeof(fte->val);
+ params.match_buf = fte->val;
+
+ rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
+ fte->act_dests.flow_context.flow_source,
+ ractions);
+ kfree(ractions);
+ if (!rule) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ fte->fs_hws_rule.bwc_rule = rule;
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+out_err:
+ mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
+ return err;
+}
+
+static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+ err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
+ rule->bwc_rule = NULL;
+
+ mlx5_fs_destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
+ struct mlx5hws_rule_action *ractions;
+ int saved_num_fs_actions;
+ int ret;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
+ modify_mask, fte);
+
+ if ((modify_mask & ~allowed_mask) != 0)
+ return -EINVAL;
+
+ saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
+ saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
+
+ ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
+ kfree(ractions);
+ if (ret)
+ goto restore_actions;
+
+ mlx5_fs_destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions);
+ return ret;
+
+restore_actions:
+ mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+ fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
+ fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
+ return ret;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
+
+ /* MAC anchor not supported in HWS reformat, use VLAN anchor */
+ remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
+ remove_hdr_vlan.offset = 0;
+ remove_hdr_vlan.size = sizeof(struct vlan_hdr);
+ return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (!params ||
+ params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
+ params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
+ params->size != sizeof(struct vlan_hdr))
+ return NULL;
+
+ return fs_ctx->hws_pool.remove_hdr_vlan_action;
+}
+
+static int
+mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if ((!params->data && params->size) || (params->data && !params->size) ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
+ mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
+ return -EINVAL;
+ }
+ if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
+ params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
+ params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
+ mlx5_core_err(mdev, "Only vlan insert header supported\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (params->param_0 || params->param_1) {
+ mlx5_core_err(dev, "Invalid reformat params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
+ enum mlx5hws_action_type reformat_type, size_t size)
+{
+ struct mlx5_fs_pool *pr_pool;
+ unsigned long index = size;
+ int err;
+
+ pr_pool = xa_load(pr_pools, index);
+ if (pr_pool)
+ return pr_pool;
+
+ pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
+ if (!pr_pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
+ if (err)
+ goto free_pr_pool;
+ err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pr_pool;
+ return pr_pool;
+
+cleanup_pr_pool:
+ mlx5_fs_hws_pr_pool_cleanup(pr_pool);
+free_pr_pool:
+ kfree(pr_pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index)
+{
+ xa_erase(pr_pools, index);
+ mlx5_fs_hws_pr_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int
+mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5_fs_hws_actions_pool *hws_pool;
+ struct mlx5hws_action *hws_action = NULL;
+ struct mlx5_fs_hws_pr *pr_data = NULL;
+ struct mlx5_fs_pool *pr_pool = NULL;
+ struct mlx5_core_dev *dev = ns->dev;
+ u8 hdr_idx = 0;
+ int err;
+
+ if (!params)
+ return -EINVAL;
+
+ hws_pool = &fs_ctx->hws_pool;
+
+ switch (params->type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ hdr_idx = params->size == ETH_HLEN ?
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ err = mlx5_fs_verify_insert_header_params(dev, params);
+ if (err)
+ return err;
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_REMOVE_HDR:
+ hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
+ if (!hws_action)
+ mlx5_core_err(dev, "Only vlan remove header supported\n");
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
+ params->type);
+ return -EOPNOTSUPP;
+ }
+
+ if (pr_pool) {
+ pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
+ if (IS_ERR_OR_NULL(pr_data))
+ return !pr_data ? -EINVAL : PTR_ERR(pr_data);
+ hws_action = pr_data->bulk->hws_action;
+ if (!hws_action) {
+ mlx5_core_err(dev,
+ "Failed allocating packet-reformat action\n");
+ err = -EINVAL;
+ goto release_pr;
+ }
+ pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
+ if (!pr_data->data) {
+ err = -ENOMEM;
+ goto release_pr;
+ }
+ pr_data->hdr_idx = hdr_idx;
+ pr_data->data_size = params->size;
+ pkt_reformat->fs_hws_action.pr_data = pr_data;
+ }
+
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ pkt_reformat->fs_hws_action.hws_action = hws_action;
+ return 0;
+
+release_pr:
+ if (pr_pool && pr_data)
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ return err;
+}
+
+static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_pool *pr_pool;
+
+ if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
+ return;
+
+ if (!pkt_reformat->fs_hws_action.pr_data) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ pr_data = pkt_reformat->fs_hws_action.pr_data;
+
+ switch (pkt_reformat->reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
+ return;
+ }
+ if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ kfree(pr_data->data);
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ pkt_reformat->fs_hws_action.pr_data = NULL;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern,
+ struct xarray *mh_pools, unsigned long index)
+{
+ struct mlx5_fs_pool *pool;
+ int err;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
+ if (err)
+ goto free_pool;
+ err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pool;
+ return pool;
+
+cleanup_pool:
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+free_pool:
+ kfree(pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index)
+{
+ xa_erase(mh_pools, index);
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5hws_action_mh_pattern pattern = {};
+ struct mlx5_fs_hws_mh *mh_data = NULL;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i, cnt = 0;
+ bool known_pattern;
+ int err;
+
+ pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
+ pattern.data = modify_actions;
+
+ known_pattern = false;
+ xa_for_each(&hws_pool->mh_pools, i, pool) {
+ if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
+ known_pattern = true;
+ break;
+ }
+ cnt++;
+ }
+
+ if (!known_pattern) {
+ pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
+ &hws_pool->mh_pools, cnt);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ }
+ mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
+ if (IS_ERR(mh_data)) {
+ err = PTR_ERR(mh_data);
+ goto destroy_pool;
+ }
+ hws_action = mh_data->bulk->hws_action;
+ mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
+ if (!mh_data->data) {
+ err = -ENOMEM;
+ goto release_mh;
+ }
+ modify_hdr->fs_hws_action.mh_data = mh_data;
+ modify_hdr->fs_hws_action.fs_pool = pool;
+ modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ modify_hdr->fs_hws_action.hws_action = hws_action;
+
+ return 0;
+
+release_mh:
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+destroy_pool:
+ if (!known_pattern)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
+ return err;
+}
+
+static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_mh *mh_data;
+ struct mlx5_fs_pool *pool;
+
+ if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
+ mlx5_core_err(ns->dev, "Failed release modify-header\n");
+ return;
+ }
+
+ mh_data = modify_hdr->fs_hws_action.mh_data;
+ kfree(mh_data->data);
+ pool = modify_hdr->fs_hws_action.fs_pool;
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+ modify_hdr->fs_hws_action.mh_data = NULL;
+}
+
+static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ if (ft_type != FS_FT_FDB)
+ return 0;
+
+ return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
+}
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return mlx5hws_is_supported(dev);
+}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
+ .create_flow_table = mlx5_cmd_hws_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_hws_modify_flow_table,
+ .update_root_ft = mlx5_cmd_hws_update_root_ft,
+ .create_flow_group = mlx5_cmd_hws_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
+ .create_fte = mlx5_cmd_hws_create_fte,
+ .delete_fte = mlx5_cmd_hws_delete_fte,
+ .update_fte = mlx5_cmd_hws_update_fte,
+ .packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_hws_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
+ .create_ns = mlx5_cmd_hws_create_ns,
+ .destroy_ns = mlx5_cmd_hws_destroy_ns,
+ .set_peer = mlx5_cmd_hws_set_peer,
+ .get_capabilities = mlx5_cmd_hws_get_capabilities,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return &mlx5_flow_cmds_hws;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
new file mode 100644
index 000000000000..cbddb72d4362
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef _MLX5_FS_HWS_
+#define _MLX5_FS_HWS_
+
+#include "mlx5hws.h"
+#include "fs_hws_pools.h"
+
+struct mlx5_fs_hws_actions_pool {
+ struct mlx5hws_action *tag_action;
+ struct mlx5hws_action *pop_vlan_action;
+ struct mlx5hws_action *push_vlan_action;
+ struct mlx5hws_action *drop_action;
+ struct mlx5hws_action *decapl2_action;
+ struct mlx5hws_action *remove_hdr_vlan_action;
+ struct mlx5_fs_pool insert_hdr_pool;
+ struct mlx5_fs_pool dl3tnltol2_pool;
+ struct xarray el2tol3tnl_pools;
+ struct xarray el2tol2tnl_pools;
+ struct xarray mh_pools;
+ struct xarray table_dests;
+ struct xarray vport_vhca_dests;
+ struct xarray vport_dests;
+};
+
+struct mlx5_fs_hws_context {
+ struct mlx5hws_context *hws_ctx;
+ struct mlx5_fs_hws_actions_pool hws_pool;
+};
+
+struct mlx5_fs_hws_table {
+ struct mlx5hws_table *hws_table;
+ bool miss_ft_set;
+};
+
+struct mlx5_fs_hws_action {
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *fs_pool;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+};
+
+struct mlx5_fs_hws_matcher {
+ struct mlx5hws_bwc_matcher *matcher;
+};
+
+struct mlx5_fs_hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct mlx5_fc *counter;
+ };
+};
+
+struct mlx5_fs_hws_rule {
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5_fs_hws_rule_action *hws_fs_actions;
+ int num_fs_actions;
+};
+
+#ifdef CONFIG_MLX5_HW_STEERING
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);
+
+#else
+
+static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_MLX5_HWS_STEERING */
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
new file mode 100644
index 000000000000..2ae4ac62b0e2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_hws_pools.h"
+
+#define MLX5_FS_HWS_DEFAULT_BULK_LEN 65536
+#define MLX5_FS_HWS_POOL_MAX_THRESHOLD BIT(18)
+#define MLX5_FS_HWS_POOL_USED_BUFF_RATIO 10
+
+static struct mlx5hws_action *
+mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr[2] = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX].sz = ETH_HLEN;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX].sz = ETH_HLEN + VLAN_HLEN;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 2,
+ reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_insert_header insert_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ insert_hdr.hdr.sz = MLX5_FS_INSERT_HDR_VLAN_SIZE;
+ insert_hdr.anchor = MLX5_FS_INSERT_HDR_VLAN_ANCHOR;
+ insert_hdr.offset = MLX5_FS_INSERT_HDR_VLAN_OFFSET;
+
+ return mlx5hws_action_create_insert_header(ctx, 1, &insert_hdr,
+ log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5hws_context *ctx;
+ size_t encap_data_size;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ encap_data_size = pr_pool_ctx->encap_data_size;
+ switch (pr_pool_ctx->reformat_type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ return mlx5_fs_dl3tnltol2_bulk_action_create(ctx);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ return mlx5_fs_el2tol3tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ return mlx5_fs_el2tol2tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ return mlx5_fs_insert_hdr_bulk_action_create(ctx);
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int bulk_len;
+ int i;
+
+ if (!pool_ctx)
+ return NULL;
+ pr_pool_ctx = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ pr_bulk = kvzalloc(struct_size(pr_bulk, prs_data, bulk_len), GFP_KERNEL);
+ if (!pr_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len))
+ goto free_pr_bulk;
+
+ for (i = 0; i < bulk_len; i++) {
+ pr_bulk->prs_data[i].bulk = pr_bulk;
+ pr_bulk->prs_data[i].offset = i;
+ }
+
+ pr_bulk->hws_action = mlx5_fs_pr_bulk_action_create(dev, pr_pool_ctx);
+ if (!pr_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &pr_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&pr_bulk->fs_bulk);
+free_pr_bulk:
+ kvfree(pr_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+
+ pr_bulk = container_of(fs_bulk, struct mlx5_fs_hws_pr_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all reformats were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(pr_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(pr_bulk);
+
+ return 0;
+}
+
+static void mlx5_hws_pool_update_threshold(struct mlx5_fs_pool *hws_pool)
+{
+ hws_pool->threshold = min_t(int, MLX5_FS_HWS_POOL_MAX_THRESHOLD,
+ hws_pool->used_units / MLX5_FS_HWS_POOL_USED_BUFF_RATIO);
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_pr_pool_ops = {
+ .bulk_create = mlx5_fs_hws_pr_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_pr_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ if (reformat_type != MLX5HWS_ACTION_TYP_INSERT_HEADER &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2)
+ return -EOPNOTSUPP;
+
+ pr_pool_ctx = kzalloc(sizeof(*pr_pool_ctx), GFP_KERNEL);
+ if (!pr_pool_ctx)
+ return -ENOMEM;
+ pr_pool_ctx->reformat_type = reformat_type;
+ pr_pool_ctx->encap_data_size = encap_data_size;
+ mlx5_fs_pool_init(pr_pool, dev, &mlx5_fs_hws_pr_pool_ops, pr_pool_ctx);
+ return 0;
+}
+
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ mlx5_fs_pool_cleanup(pr_pool);
+ pr_pool_ctx = pr_pool->pool_ctx;
+ if (!pr_pool_ctx)
+ return;
+ kfree(pr_pool_ctx);
+}
+
+struct mlx5_fs_hws_pr *
+mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(pr_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ pr_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_pr_bulk,
+ fs_bulk);
+ return &pr_bulk->prs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = pr_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = pr_data->offset;
+ if (mlx5_fs_pool_release_index(pr_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release packet reformat which is not acquired\n");
+}
+
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data)
+{
+ return pr_data->bulk->hws_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_mh_bulk_action_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_modify_header(ctx, 1, pattern,
+ log_bulk_size, flags);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5hws_action_mh_pattern *pattern;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ struct mlx5hws_context *ctx;
+ int bulk_len;
+
+ if (!pool_ctx)
+ return NULL;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ pattern = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ mh_bulk = kvzalloc(struct_size(mh_bulk, mhs_data, bulk_len), GFP_KERNEL);
+ if (!mh_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
+ goto free_mh_bulk;
+
+ for (int i = 0; i < bulk_len; i++) {
+ mh_bulk->mhs_data[i].bulk = mh_bulk;
+ mh_bulk->mhs_data[i].offset = i;
+ }
+
+ mh_bulk->hws_action = mlx5_fs_mh_bulk_action_create(ctx, pattern);
+ if (!mh_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &mh_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk);
+free_mh_bulk:
+ kvfree(mh_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+
+ mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all modify header were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(mh_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(mh_bulk);
+
+ return 0;
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = {
+ .bulk_create = mlx5_fs_hws_mh_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_mh_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ pool_pattern = kzalloc(sizeof(*pool_pattern), GFP_KERNEL);
+ if (!pool_pattern)
+ return -ENOMEM;
+ pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL);
+ if (!pool_pattern->data) {
+ kfree(pool_pattern);
+ return -ENOMEM;
+ }
+ pool_pattern->sz = pattern->sz;
+ mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops,
+ pool_pattern);
+ return 0;
+}
+
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ mlx5_fs_pool_cleanup(fs_hws_mh_pool);
+ pool_pattern = fs_hws_mh_pool->pool_ctx;
+ if (!pool_pattern)
+ return;
+ kfree(pool_pattern->data);
+ kfree(pool_pattern);
+}
+
+struct mlx5_fs_hws_mh *
+mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk,
+ fs_bulk);
+ return &mh_bulk->mhs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = mh_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = mh_data->offset;
+ if (mlx5_fs_pool_release_index(mh_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n");
+}
+
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+ int num_actions, i;
+
+ pool_pattern = mh_pool->pool_ctx;
+ if (WARN_ON_ONCE(!pool_pattern))
+ return false;
+
+ if (pattern->sz != pool_pattern->sz)
+ return false;
+ num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
+ for (i = 0; i < num_actions; i++) {
+ if ((__force __be32)pattern->data[i] !=
+ (__force __be32)pool_pattern->data[i])
+ return false;
+ }
+ return true;
+}
+
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
+
+ fc_bulk_hws = &fc_bulk->hws_data;
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
+ return fc_bulk_hws->hws_action;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return fc_bulk_hws->hws_action;
+ }
+ fc_bulk_hws->hws_action =
+ mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
+ if (!fc_bulk_hws->hws_action) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return NULL;
+ }
+ refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
+ mutex_unlock(&fc_bulk_hws->lock);
+
+ return fc_bulk_hws->hws_action;
+}
+
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
+{
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
+ return;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fc_bulk_hws->hws_action);
+ fc_bulk_hws->hws_action = NULL;
+ mutex_unlock(&fc_bulk_hws->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
new file mode 100644
index 000000000000..34072551dd21
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_HWS_POOLS_H__
+#define __MLX5_FS_HWS_POOLS_H__
+
+#include <linux/if_vlan.h>
+#include "fs_pool.h"
+#include "fs_core.h"
+
+#define MLX5_FS_INSERT_HDR_VLAN_ANCHOR MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START
+#define MLX5_FS_INSERT_HDR_VLAN_OFFSET offsetof(struct vlan_ethhdr, h_vlan_proto)
+#define MLX5_FS_INSERT_HDR_VLAN_SIZE sizeof(struct vlan_hdr)
+
+enum {
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX = 0,
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX,
+};
+
+struct mlx5_fs_hws_pr {
+ struct mlx5_fs_hws_pr_bulk *bulk;
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ size_t data_size;
+};
+
+struct mlx5_fs_hws_pr_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_pr prs_data[];
+};
+
+struct mlx5_fs_hws_pr_pool_ctx {
+ enum mlx5hws_action_type reformat_type;
+ size_t encap_data_size;
+};
+
+struct mlx5_fs_hws_mh {
+ struct mlx5_fs_hws_mh_bulk *bulk;
+ u32 offset;
+ u8 *data;
+};
+
+struct mlx5_fs_hws_mh_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5_fs_pool *mh_pool;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_mh mhs_data[];
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type);
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool);
+
+struct mlx5_fs_hws_pr *mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool);
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data);
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data);
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern);
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool);
+struct mlx5_fs_hws_mh *mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool);
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data);
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern);
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter);
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter);
+#endif /* __MLX5_FS_HWS_POOLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
index 3c8635f286ce..30ccd635b505 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
@@ -39,7 +39,6 @@
#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
#define MLX5HWS_TABLE_TYPE_BASE 2
-#define MLX5HWS_ACTION_STE_IDX_ANY 0
static inline bool is_mem_zero(const u8 *mem, size_t size)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index 1bb3a6f8c3cd..b61864b32053 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -165,14 +165,14 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
next->match_ste.rtc_0_id,
next->match_ste.rtc_1_id);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect matcher\n");
+ return ret;
}
} else {
ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect last matcher\n");
+ return ret;
}
}
@@ -180,27 +180,19 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
if (prev_ft_id == tbl->ft_id) {
ret = mlx5hws_table_update_connected_miss_tables(tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx,
+ "Fatal error, failed to update connected miss table\n");
+ return ret;
}
}
ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
- goto matcher_reconnect;
+ return ret;
}
return 0;
-
-matcher_reconnect:
- if (list_empty(&tbl->matchers_list) || !prev)
- list_add(&matcher->list_node, &tbl->matchers_list);
- else
- /* insert after prev matcher */
- list_add(&matcher->list_node, &prev->list_node);
-
- return ret;
}
static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
@@ -208,7 +200,7 @@ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
enum mlx5hws_matcher_rtc_type rtc_type,
bool is_mirror)
{
- struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
+ struct mlx5hws_pool_chunk *ste = &matcher->action_ste.ste;
enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
@@ -225,8 +217,7 @@ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
}
static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+ enum mlx5hws_matcher_rtc_type rtc_type)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
@@ -286,14 +277,20 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
break;
case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
rtc_0_id = &action_ste->rtc_0_id;
rtc_1_id = &action_ste->rtc_1_id;
ste_pool = action_ste->pool;
ste = &action_ste->ste;
+ /* Action RTC size calculation:
+ * log((max number of rules in matcher) *
+ * (max number of action STEs per rule) *
+ * (2 to support writing new STEs for update rule))
+ */
ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- attr->table.sz_row_log;
+ attr->table.sz_row_log +
+ MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
rtc_attr.log_size = ste->order;
rtc_attr.log_depth = 0;
rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
@@ -318,8 +315,8 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[tbl->type];
- default_stc = ctx->common_res[tbl->type].default_stc;
+ stc_pool = ctx->stc_pool;
+ default_stc = ctx->common_res.default_stc;
obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
rtc_attr.stc_base = obj_id;
@@ -358,8 +355,7 @@ free_ste:
}
static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+ enum mlx5hws_matcher_rtc_type rtc_type)
{
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
@@ -375,7 +371,7 @@ static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
ste = &matcher->match_ste.ste;
break;
case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
rtc_0_id = action_ste->rtc_0_id;
rtc_1_id = action_ste->rtc_1_id;
ste_pool = action_ste->pool;
@@ -466,20 +462,13 @@ static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
if (!resize_data)
return -ENOMEM;
- resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
-
- resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
- resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
- resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
- resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
- src_matcher->action_ste[0].pool :
- NULL;
- resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
- resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
- resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
- resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
- src_matcher->action_ste[1].pool :
- NULL;
+ resize_data->max_stes = src_matcher->action_ste.max_stes;
+
+ resize_data->stc = src_matcher->action_ste.stc;
+ resize_data->rtc_0_id = src_matcher->action_ste.rtc_0_id;
+ resize_data->rtc_1_id = src_matcher->action_ste.rtc_1_id;
+ resize_data->pool = src_matcher->action_ste.max_stes ?
+ src_matcher->action_ste.pool : NULL;
/* Place the new resized matcher on the dst matcher's list */
list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
@@ -512,49 +501,69 @@ static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
if (resize_data->max_stes) {
mlx5hws_action_free_single_stc(matcher->tbl->ctx,
matcher->tbl->type,
- &resize_data->action_ste[1].stc);
- mlx5hws_action_free_single_stc(matcher->tbl->ctx,
- matcher->tbl->type,
- &resize_data->action_ste[0].stc);
+ &resize_data->stc);
- if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_1_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_1_id);
- }
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_0_id);
+ resize_data->rtc_1_id);
+
mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_0_id);
- if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
- mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
- mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
- }
+ resize_data->rtc_0_id);
+
+ if (resize_data->pool)
+ mlx5hws_pool_destroy(resize_data->pool);
}
kfree(resize_data);
}
}
-static int
-hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
struct mlx5hws_pool_attr pool_attr = {0};
struct mlx5hws_context *ctx = tbl->ctx;
- int ret;
+ u32 required_stes;
+ u8 max_stes = 0;
+ int i, ret;
+
+ if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
+ return 0;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret) {
+ mlx5hws_err(ctx, "Invalid at %d", i);
+ return ret;
+ }
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ max_stes = max(max_stes, required_stes);
+
+ /* Future: Optimize reparse */
+ }
+
+ /* There are no additional STEs required for matcher */
+ if (!max_stes)
+ return 0;
+
+ matcher->action_ste.max_stes = max_stes;
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
/* Allocate action STE mempool */
pool_attr.table_type = tbl->type;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ /* Pool size is similar to action RTC size */
pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- matcher->attr.table.sz_row_log;
+ matcher->attr.table.sz_row_log +
+ MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
hws_matcher_set_pool_attr(&pool_attr, matcher);
action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
if (!action_ste->pool) {
@@ -563,7 +572,7 @@ hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
}
/* Allocate action RTC */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
if (ret) {
mlx5hws_err(ctx, "Failed to create action RTC\n");
goto free_ste_pool;
@@ -587,18 +596,18 @@ hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
return 0;
free_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
free_ste_pool:
mlx5hws_pool_destroy(action_ste->pool);
return ret;
}
-static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
if (!action_ste->max_stes ||
matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
@@ -606,65 +615,10 @@ static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action
return;
mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
mlx5hws_pool_destroy(action_ste->pool);
}
-static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
-{
- bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_context *ctx = tbl->ctx;
- u32 required_stes;
- u8 max_stes = 0;
- int i, ret;
-
- if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
- return 0;
-
- for (i = 0; i < matcher->num_of_at; i++) {
- struct mlx5hws_action_template *at = &matcher->at[i];
-
- ret = hws_matcher_check_and_process_at(matcher, at);
- if (ret) {
- mlx5hws_err(ctx, "Invalid at %d", i);
- return ret;
- }
-
- required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- max_stes = max(max_stes, required_stes);
-
- /* Future: Optimize reparse */
- }
-
- /* There are no additional STEs required for matcher */
- if (!max_stes)
- return 0;
-
- matcher->action_ste[0].max_stes = max_stes;
- matcher->action_ste[1].max_stes = max_stes;
-
- ret = hws_matcher_bind_at_idx(matcher, 0);
- if (ret)
- return ret;
-
- ret = hws_matcher_bind_at_idx(matcher, 1);
- if (ret)
- goto free_at_0;
-
- return 0;
-
-free_at_0:
- hws_matcher_unbind_at_idx(matcher, 0);
- return ret;
-}
-
-static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
-{
- hws_matcher_unbind_at_idx(matcher, 1);
- hws_matcher_unbind_at_idx(matcher, 0);
-}
-
static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_context *ctx = matcher->tbl->ctx;
@@ -810,7 +764,7 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
goto unbind_at;
/* Allocate the RTC for the new matcher */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
if (ret)
goto destroy_end_ft;
@@ -822,7 +776,7 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
return 0;
destroy_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
destroy_end_ft:
hws_matcher_destroy_end_ft(matcher);
unbind_at:
@@ -836,7 +790,7 @@ static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
{
hws_matcher_resize_uninit(matcher);
hws_matcher_disconnect(matcher);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
hws_matcher_destroy_end_ft(matcher);
hws_matcher_unbind_at(matcher);
hws_matcher_unbind_mt(matcher);
@@ -970,10 +924,9 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
return ret;
required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
+ if (matcher->action_ste.max_stes < required_stes) {
mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
- required_stes,
- matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
+ required_stes, matcher->action_ste.max_stes);
return -ENOMEM;
}
@@ -1007,9 +960,9 @@ hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
if (!matcher->mt)
return -ENOMEM;
- matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
- sizeof(*matcher->at),
- GFP_KERNEL);
+ matcher->at = kvcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
+ sizeof(*matcher->at),
+ GFP_KERNEL);
if (!matcher->at) {
mlx5hws_err(ctx, "Failed to allocate action template array\n");
ret = -ENOMEM;
@@ -1035,7 +988,7 @@ free_mt:
static void
hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
{
- kfree(matcher->at);
+ kvfree(matcher->at);
kfree(matcher->mt);
}
@@ -1157,8 +1110,7 @@ static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
return -EINVAL;
}
- if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
- dst_matcher->action_ste[0].max_stes) {
+ if (src_matcher->action_ste.max_stes > dst_matcher->action_ste.max_stes) {
mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
index 81ff487f57be..020de70270c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -18,6 +18,11 @@
/* Required depth of the main large table */
#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+/* Action RTC size multiplier that is required in order
+ * to support rule update for rules with action STEs.
+ */
+#define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1
+
enum mlx5hws_matcher_offset {
MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
@@ -52,15 +57,11 @@ struct mlx5hws_matcher_action_ste {
u8 max_stes;
};
-struct mlx5hws_matcher_resize_data_node {
+struct mlx5hws_matcher_resize_data {
struct mlx5hws_pool_chunk stc;
u32 rtc_0_id;
u32 rtc_1_id;
struct mlx5hws_pool *pool;
-};
-
-struct mlx5hws_matcher_resize_data {
- struct mlx5hws_matcher_resize_data_node action_ste[2];
u8 max_stes;
struct list_head list_node;
};
@@ -78,7 +79,7 @@ struct mlx5hws_matcher {
struct mlx5hws_matcher *col_matcher;
struct mlx5hws_matcher *resize_dst;
struct mlx5hws_matcher_match_ste match_ste;
- struct mlx5hws_matcher_action_ste action_ste[2];
+ struct mlx5hws_matcher_action_ste action_ste;
struct list_head list_node;
struct list_head resize_data;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index f39d636ff39a..5121951f2778 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -70,7 +70,6 @@ enum mlx5hws_send_queue_actions {
struct mlx5hws_context_attr {
u16 queues;
u16 queue_size;
- bool bwc; /* add support for backward compatible API*/
};
struct mlx5hws_table_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index 06db5e4726ae..d9dc4f2d0dc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -344,7 +344,7 @@ void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
memset(wqe_ctrl, 0, wqe_len);
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
- memcpy(wqe_arg, arg_data, wqe_len);
+ memcpy(wqe_arg, arg_data, MLX5HWS_ARG_DATA_SIZE);
send_attr.id = arg_idx++;
mlx5hws_send_engine_post_end(&ctrl, &send_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
index 27ca93385b08..8ddb51980044 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
@@ -31,7 +31,7 @@ struct mlx5hws_pattern_cache_item {
u8 *data;
u16 num_of_actions;
} mh_data;
- u32 refcount;
+ u32 refcount; /* protected by pattern_cache lock */
struct list_head ptrn_list_node;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index fed2d913f3b8..50a81d360bb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -183,7 +183,7 @@ static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
*seg = -1;
/* Find the next free place from the buddy array */
- while (*seg == -1) {
+ while (*seg < 0) {
for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
buddy = hws_pool_buddy_get_next_buddy(pool, i,
order,
@@ -194,7 +194,7 @@ static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
}
*seg = mlx5hws_buddy_alloc_mem(buddy, order);
- if (*seg != -1)
+ if (*seg >= 0)
goto found;
if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
index de92cecbeb92..271490a51b96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
@@ -390,11 +390,6 @@ struct mlx5_ifc_definer_bits {
u8 match_mask[0x160];
};
-struct mlx5_ifc_arg_bits {
- u8 rsvd0[0x88];
- u8 access_pd[0x18];
-};
-
struct mlx5_ifc_header_modify_pattern_in_bits {
u8 modify_field_select[0x40];
@@ -428,11 +423,6 @@ struct mlx5_ifc_create_definer_in_bits {
struct mlx5_ifc_definer_bits definer;
};
-struct mlx5_ifc_create_arg_in_bits {
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
- struct mlx5_ifc_arg_bits arg;
-};
-
struct mlx5_ifc_create_header_modify_pattern_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_header_modify_pattern_in_bits pattern;
@@ -479,36 +469,4 @@ enum {
MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
};
-struct mlx5_ifc_alloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
-
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 reserved_at_40[0x40];
-};
-
#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
index e20c67a04203..a27a2d5ffc7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -129,27 +129,18 @@ static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
static void
hws_rule_save_resize_info(struct mlx5hws_rule *rule,
- struct mlx5hws_send_ste_attr *ste_attr,
- bool is_update)
+ struct mlx5hws_send_ste_attr *ste_attr)
{
if (!mlx5hws_matcher_is_resizable(rule->matcher))
return;
- if (likely(!is_update)) {
+ /* resize_info might already exist (if we're in update flow) */
+ if (likely(!rule->resize_info)) {
rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
if (unlikely(!rule->resize_info)) {
pr_warn("HWS: resize info isn't allocated for rule\n");
return;
}
-
- rule->resize_info->max_stes =
- rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
- rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
- rule->matcher->action_ste[0].pool :
- NULL;
- rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
- rule->matcher->action_ste[1].pool :
- NULL;
}
memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
@@ -204,15 +195,14 @@ hws_rule_load_delete_info(struct mlx5hws_rule *rule,
}
}
-static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
+static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule)
{
struct mlx5hws_matcher *matcher = rule->matcher;
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_pool_chunk ste = {0};
int ret;
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
if (unlikely(ret)) {
@@ -220,68 +210,29 @@ static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
"Failed to allocate STE for rule actions");
return ret;
}
- rule->action_ste_idx = ste.offset;
+
+ rule->action_ste.pool = matcher->action_ste.pool;
+ rule->action_ste.num_stes = matcher->action_ste.max_stes;
+ rule->action_ste.index = ste.offset;
return 0;
}
-static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste)
{
- struct mlx5hws_matcher *matcher = rule->matcher;
struct mlx5hws_pool_chunk ste = {0};
- struct mlx5hws_pool *pool;
- u8 max_stes;
-
- if (mlx5hws_matcher_is_resizable(matcher)) {
- /* Free the original action pool if rule was resized */
- max_stes = rule->resize_info->max_stes;
- pool = rule->resize_info->action_ste_pool[action_ste_selector];
- } else {
- max_stes = matcher->action_ste[action_ste_selector].max_stes;
- pool = matcher->action_ste[action_ste_selector].pool;
- }
-
- /* This release is safe only when the rule match part was deleted */
- ste.order = ilog2(roundup_pow_of_two(max_stes));
- ste.offset = rule->action_ste_idx;
-
- mlx5hws_pool_chunk_free(pool, &ste);
-}
-static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
- struct mlx5hws_rule_attr *attr)
-{
- int action_ste_idx;
- int ret;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 0);
- if (unlikely(ret))
- return ret;
-
- action_ste_idx = rule->action_ste_idx;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 1);
- if (unlikely(ret)) {
- hws_rule_free_action_ste_idx(rule, 0);
- return ret;
- }
-
- /* Both pools have to return the same index */
- if (unlikely(rule->action_ste_idx != action_ste_idx)) {
- pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
- return -EINVAL;
- }
+ if (!action_ste->num_stes)
+ return;
- return 0;
-}
+ ste.order = ilog2(roundup_pow_of_two(action_ste->num_stes));
+ ste.offset = action_ste->index;
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
-{
- if (rule->action_ste_idx > -1) {
- hws_rule_free_action_ste_idx(rule, 1);
- hws_rule_free_action_ste_idx(rule, 0);
- }
+ /* This release is safe only when the rule match STE was deleted
+ * (when the rule is being deleted) or replaced with the new STE that
+ * isn't pointing to old action STEs (when the rule is being updated).
+ */
+ mlx5hws_pool_chunk_free(action_ste->pool, &ste);
}
static void hws_rule_create_init(struct mlx5hws_rule *rule,
@@ -298,14 +249,24 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* In update we use these rtc's */
rule->rtc_0 = 0;
rule->rtc_1 = 0;
- rule->action_ste_selector = 0;
+
+ rule->action_ste.pool = NULL;
+ rule->action_ste.num_stes = 0;
+ rule->action_ste.index = -1;
+
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
} else {
- rule->action_ste_selector = !rule->action_ste_selector;
+ rule->status = MLX5HWS_RULE_STATUS_UPDATING;
}
+ /* Initialize the old action STE info - shallow-copy action_ste.
+ * In create flow this will set old_action_ste fields to initial values.
+ * In update flow this will save the existing action STE info,
+ * so that we will later use it to free old STEs.
+ */
+ rule->old_action_ste = rule->action_ste;
+
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->status = MLX5HWS_RULE_STATUS_CREATING;
/* Init default send STE attributes */
ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
@@ -315,8 +276,8 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* Init default action apply */
apply->tbl_type = tbl->type;
- apply->common_res = &ctx->common_res[tbl->type];
- apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+ apply->common_res = &ctx->common_res;
+ apply->jump_to_action_stc = matcher->action_ste.stc.offset;
apply->require_dep = 0;
}
@@ -332,8 +293,6 @@ static void hws_rule_move_init(struct mlx5hws_rule *rule,
rule->rtc_1 = 0;
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->action_ste_selector = 0;
rule->status = MLX5HWS_RULE_STATUS_CREATING;
rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
}
@@ -394,21 +353,17 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (action_stes) {
/* Allocate action STEs for rules that need more than match STE */
- if (!is_update) {
- ret = hws_rule_alloc_action_ste(rule, attr);
- if (ret) {
- mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
- mlx5hws_send_abort_new_dep_wqe(queue);
- return ret;
- }
+ ret = hws_rule_alloc_action_ste(rule);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
}
/* Skip RX/TX based on the dep_wqe init */
- ste_attr.rtc_0 = dep_wqe->rtc_0 ?
- matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
- ste_attr.rtc_1 = dep_wqe->rtc_1 ?
- matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1_id : 0;
/* Action STEs are written to a specific index last to first */
- ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ ste_attr.direct_index = rule->action_ste.index + action_stes;
apply.next_direct_idx = ste_attr.direct_index;
} else {
apply.next_direct_idx = 0;
@@ -459,7 +414,7 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (!is_update)
hws_rule_save_delete_info(rule, &ste_attr);
- hws_rule_save_resize_info(rule, &ste_attr, is_update);
+ hws_rule_save_resize_info(rule, &ste_attr);
mlx5hws_send_engine_inc_rule(queue);
if (!attr->burst)
@@ -480,7 +435,10 @@ static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
/* Rule failed now we can safely release action STEs */
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
+
+ /* Perhaps the rule failed updating - release old action STEs as well */
+ mlx5hws_rule_free_action_ste(&rule->old_action_ste);
/* Clear complex tag */
hws_rule_clear_delete_info(rule);
@@ -517,7 +475,8 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
}
/* Rule is not completed yet */
- if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING ||
+ rule->status == MLX5HWS_RULE_STATUS_UPDATING)
return -EBUSY;
/* Rule failed and doesn't require cleanup */
@@ -534,7 +493,7 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
hws_rule_gen_comp(queue, rule, false,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
mlx5hws_rule_clear_resize_info(rule);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
index 495cdd17e9f3..b5ee94ac449b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
@@ -15,6 +15,8 @@ enum mlx5hws_rule_status {
MLX5HWS_RULE_STATUS_UNKNOWN,
MLX5HWS_RULE_STATUS_CREATING,
MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_UPDATING,
+ MLX5HWS_RULE_STATUS_UPDATED,
MLX5HWS_RULE_STATUS_DELETING,
MLX5HWS_RULE_STATUS_DELETED,
MLX5HWS_RULE_STATUS_FAILING,
@@ -41,13 +43,17 @@ struct mlx5hws_rule_match_tag {
};
};
+struct mlx5hws_rule_action_ste_info {
+ struct mlx5hws_pool *pool;
+ int index; /* STE array index */
+ u8 num_stes;
+};
+
struct mlx5hws_rule_resize_info {
- struct mlx5hws_pool *action_ste_pool[2];
u32 rtc_0;
u32 rtc_1;
u32 rule_idx;
u8 state;
- u8 max_stes;
u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
};
@@ -58,18 +64,18 @@ struct mlx5hws_rule {
struct mlx5hws_rule_match_tag tag;
struct mlx5hws_rule_resize_info *resize_info;
};
+ struct mlx5hws_rule_action_ste_info action_ste;
+ struct mlx5hws_rule_action_ste_info old_action_ste;
u32 rtc_0; /* The RTC into which the STE was inserted */
u32 rtc_1; /* The RTC into which the STE was inserted */
- int action_ste_idx; /* STE array index */
u8 status; /* enum mlx5hws_rule_status */
- u8 action_ste_selector; /* For rule update - which action STE is in use */
u8 pending_wqes;
bool skip_delete; /* For complex rules - another rule with same tag
* still exists, so don't actually delete this rule.
*/
};
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste);
int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
void *queue, void *user_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index 883b4ed30892..cb6abc4ab7df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -377,17 +377,25 @@ static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
*status = MLX5HWS_FLOW_OP_ERROR;
} else {
- /* Increase the status, this only works on good flow as the enum
- * is arrange it away creating -> created -> deleting -> deleted
+ /* Increase the status, this only works on good flow as
+ * the enum is arranged this way:
+ * - creating -> created
+ * - updating -> updated
+ * - deleting -> deleted
*/
priv->rule->status++;
*status = MLX5HWS_FLOW_OP_SUCCESS;
- /* Rule was deleted now we can safely release action STEs
- * and clear resize info
- */
if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
- mlx5hws_rule_free_action_ste(priv->rule);
+ /* Rule was deleted, now we can safely release
+ * action STEs and clear resize info
+ */
+ mlx5hws_rule_free_action_ste(&priv->rule->action_ste);
mlx5hws_rule_clear_resize_info(priv->rule);
+ } else if (priv->rule->status == MLX5HWS_RULE_STATUS_UPDATED) {
+ /* Rule was updated, free the old action STEs */
+ mlx5hws_rule_free_action_ste(&priv->rule->old_action_ste);
+ /* Update completed - move the rule back to "created" */
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
}
}
}
@@ -633,6 +641,7 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ MLX5_SET(sqc, sqc, non_wire, 1);
ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
@@ -896,15 +905,18 @@ close_cq:
return err;
}
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
{
+ if (!queue->num_entries)
+ return; /* this queue wasn't initialized */
+
hws_send_ring_close(queue);
kfree(queue->completed.entries);
}
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size)
+static int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
{
int err;
@@ -1005,7 +1017,7 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
u16 queue_size)
{
int err = 0;
- u32 i;
+ int i = 0;
/* Open one extra queue for control path */
ctx->queues = queues + 1;
@@ -1021,7 +1033,13 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
goto free_bwc_locks;
}
- for (i = 0; i < ctx->queues; i++) {
+ /* If native API isn't supported, skip the unused native queues:
+ * initialize BWC queues and control queue only.
+ */
+ if (!mlx5hws_context_native_supported(ctx))
+ i = mlx5hws_bwc_get_queue_id(ctx, 0);
+
+ for (; i < ctx->queues; i++) {
err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
if (err)
goto close_send_queues;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
index b50825d6dc53..f833092235c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
@@ -189,12 +189,6 @@ void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
-
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size);
-
void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index 9576e02d00c3..ab1297531232 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -37,6 +37,7 @@ static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
}
static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
@@ -48,8 +49,8 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return 0;
- if (ctx->common_res[tbl_type].default_miss) {
- ctx->common_res[tbl_type].default_miss->refcount++;
+ if (ctx->common_res.default_miss) {
+ ctx->common_res.default_miss->refcount++;
return 0;
}
@@ -70,29 +71,28 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
return -EINVAL;
}
- /* ctx->ctrl_lock must be held here */
- ctx->common_res[tbl_type].default_miss = default_miss;
- ctx->common_res[tbl_type].default_miss->refcount++;
+ ctx->common_res.default_miss = default_miss;
+ ctx->common_res.default_miss->refcount++;
return 0;
}
/* Called under ctx->ctrl_lock */
static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_forward_tbl *default_miss;
struct mlx5hws_context *ctx = tbl->ctx;
- u8 tbl_type = tbl->type;
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return;
- default_miss = ctx->common_res[tbl_type].default_miss;
+ default_miss = ctx->common_res.default_miss;
if (--default_miss->refcount)
return;
mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
- ctx->common_res[tbl_type].default_miss = NULL;
+ ctx->common_res.default_miss = NULL;
}
static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
@@ -478,15 +478,9 @@ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
if (old_miss_tbl)
list_del_init(&tbl->default_miss.next);
- old_miss_tbl = tbl->default_miss.miss_tbl;
- if (old_miss_tbl)
- list_del_init(&old_miss_tbl->default_miss.head);
-
if (miss_tbl)
list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
- mutex_unlock(&ctx->ctrl_lock);
- return 0;
out:
mutex_unlock(&ctx->ctrl_lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 49f22cad92bf..60cb4527588a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_8))
bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
index e94fbb015efa..c8b8ff80c7c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
@@ -555,7 +555,7 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_tx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -566,7 +566,7 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_rx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -1458,6 +1458,8 @@ struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
return mlx5dr_ste_get_ctx_v1();
else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
return mlx5dr_ste_get_ctx_v2();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_8)
+ return mlx5dr_ste_get_ctx_v3();
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 54a6619c3ecb..5f409dc30aca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -160,13 +160,15 @@ struct mlx5dr_ste_ctx {
/* Actions */
u32 actions_caps;
- void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_rx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
- void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_tx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
@@ -197,7 +199,17 @@ struct mlx5dr_ste_ctx {
u16 *used_hw_action_num);
int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
-
+ /* Actions bit set */
+ void (*set_encap)(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size);
+ void (*set_push_vlan)(u8 *ste, u8 *d_action,
+ u32 vlan_hdr);
+ void (*set_pop_vlan)(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num);
+ void (*set_rx_decap)(u8 *hw_ste_p, u8 *s_action);
+ void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
+ u8 *scnd_d_action, u32 reformat_id,
+ int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
@@ -205,5 +217,6 @@ struct mlx5dr_ste_ctx {
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
index e9f6c7ed7a7b..42536bee55e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
@@ -406,7 +406,8 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
}
static void
-dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -476,7 +477,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
}
static void
-dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 1d49704b9542..7f83d77c43ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -5,136 +5,6 @@
#include "mlx5_ifc_dr_ste_v1.h"
#include "dr_ste_v1.h"
-#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
- ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
- DR_STE_V1_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_v1_entry_format {
- DR_STE_V1_TYPE_BWC_BYTE = 0x0,
- DR_STE_V1_TYPE_BWC_DW = 0x1,
- DR_STE_V1_TYPE_MATCH = 0x2,
- DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
-};
-
-/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
-enum {
- DR_STE_V1_LU_TYPE_NOP = 0x0000,
- DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
- DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
- DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
- DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
- DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
- DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
- DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
- DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
- DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
- DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
- DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
- DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
- DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
- DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
- DR_STE_V1_LU_TYPE_GRE = 0x010d,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
- DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
- DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
- DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
-};
-
-enum dr_ste_v1_header_anchors {
- DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
- DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
- DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
- DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
- DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
-};
-
-enum dr_ste_v1_action_size {
- DR_STE_ACTION_SINGLE_SZ = 4,
- DR_STE_ACTION_DOUBLE_SZ = 8,
- DR_STE_ACTION_TRIPLE_SZ = 12,
-};
-
-enum dr_ste_v1_action_insert_ptr_attr {
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
-};
-
-enum dr_ste_v1_action_id {
- DR_STE_V1_ACTION_ID_NOP = 0x00,
- DR_STE_V1_ACTION_ID_COPY = 0x05,
- DR_STE_V1_ACTION_ID_SET = 0x06,
- DR_STE_V1_ACTION_ID_ADD = 0x07,
- DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
- DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
- DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
- DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
- DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
- DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
- DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
- DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
- DR_STE_V1_ACTION_ID_ASO = 0x12,
- DR_STE_V1_ACTION_ID_TRAILER = 0x13,
- DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
- DR_STE_V1_ACTION_ID_MAX = 0x21,
- /* use for special cases */
- DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
-};
-
-enum {
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
-};
-
-enum dr_ste_v1_aso_ctx_type {
- DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
-};
-
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -379,13 +249,12 @@ static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
}
-static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+void dr_ste_v1_set_reparse(u8 *hw_ste_p)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
-static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id, int size)
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -432,8 +301,7 @@ static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
- u32 vlan_hdr)
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
@@ -446,7 +314,7 @@ static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -459,11 +327,8 @@ static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
- u8 *frst_s_action,
- u8 *scnd_d_action,
- u32 reformat_id,
- int size)
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@@ -483,7 +348,7 @@ static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
{
MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
@@ -620,7 +485,8 @@ static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
}
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -640,7 +506,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
@@ -677,8 +543,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -691,9 +557,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@@ -706,10 +572,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
}
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -776,7 +642,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -799,7 +666,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = false;
allow_ctr = false;
} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
- dr_ste_v1_set_rx_decap(last_ste, action);
+ ste_ctx->set_rx_decap(last_ste, action);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_modify_hdr = false;
@@ -827,7 +694,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_ctr = false;
@@ -868,8 +735,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -895,9 +762,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -912,10 +779,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -1027,9 +894,6 @@ void dr_ste_v1_set_action_copy(u8 *d_action,
MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
}
-#define DR_STE_DECAP_L3_ACTION_NUM 8
-#define DR_STE_L2_HDR_MAX_SZ 20
-
int dr_ste_v1_set_action_decap_l3_list(void *data,
u32 data_sz,
u8 *hw_action,
@@ -2330,7 +2194,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
index e2fc69867088..a8d9e308d339 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -7,6 +7,138 @@
#include "dr_types.h"
#include "dr_ste.h"
+#define DR_STE_DECAP_L3_ACTION_NUM 8
+#define DR_STE_L2_HDR_MAX_SZ 20
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+ DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+ DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+ DR_STE_V1_TYPE_BWC_DW = 0x1,
+ DR_STE_V1_TYPE_MATCH = 0x2,
+ DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+ DR_STE_V1_LU_TYPE_NOP = 0x0000,
+ DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
+ DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
+ DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
+ DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
+ DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
+ DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
+ DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
+ DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
+ DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
+ DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
+ DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
+ DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
+ DR_STE_V1_LU_TYPE_GRE = 0x010d,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
+ DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
+ DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
+ DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+ DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
+ DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+ DR_STE_ACTION_SINGLE_SZ = 4,
+ DR_STE_ACTION_DOUBLE_SZ = 8,
+ DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+ DR_STE_V1_ACTION_ID_NOP = 0x00,
+ DR_STE_V1_ACTION_ID_COPY = 0x05,
+ DR_STE_V1_ACTION_ID_SET = 0x06,
+ DR_STE_V1_ACTION_ID_ADD = 0x07,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
+ DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
+ DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
+ DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
+ DR_STE_V1_ACTION_ID_TRAILER = 0x13,
+ DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
+ DR_STE_V1_ACTION_ID_MAX = 0x21,
+ /* use for special cases */
+ DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
+};
+
+enum {
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
+};
+
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
@@ -17,11 +149,18 @@ u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_reparse(u8 *hw_ste_p);
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size);
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr);
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size);
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
u8 length, u32 data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
index 808b013cf48c..0882dba0f64b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -2,167 +2,7 @@
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "dr_ste_v1.h"
-
-enum {
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
-};
-
-static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
- },
-};
+#include "dr_ste_v2.h"
static struct mlx5dr_ste_ctx ste_ctx_v2 = {
/* Builders */
@@ -223,7 +63,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
new file mode 100644
index 000000000000..d853fde49cfc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V2_
+#define _DR_STE_V2_
+
+enum {
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
+};
+
+#endif /* _DR_STE_V2_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
new file mode 100644
index 000000000000..cc60ce1d274e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+#include "dr_ste_v2.h"
+
+static void dr_ste_v3_set_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_push_vlan(u8 *ste, u8 *d_action,
+ u32 vlan_hdr)
+{
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to vlan header in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, start_offset,
+ HDR_LEN_L2_MACS >> 1);
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, inline_data, vlan_hdr);
+ dr_ste_v1_set_reparse(ste);
+}
+
+static void dr_ste_v3_set_pop_vlan(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
+{
+ /* Remove L2 headers */
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+ /* Encapsulate with given reformat ID */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static int
+dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ uint16_t *used_hw_action_num)
+{
+ u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+ void *data_ptr = padded_data;
+ u16 used_actions = 0;
+ u32 inline_data_sz;
+ u32 i;
+
+ if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ return -EINVAL;
+
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v3, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++; /* Remove and NOP are a single double action */
+
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ void *addr_inline;
+
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to words (2 bytes) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, start_offset, 0);
+
+ /* Copy bytes one by one to avoid endianness problem */
+ addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v3,
+ hw_action, inline_data);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, start_offset, 0);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, remove_size, 1);
+ used_actions++;
+
+ *used_hw_action_num = used_actions;
+
+ return 0;
+}
+
+static struct mlx5dr_ste_ctx ste_ctx_v3 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v3_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+ /* Actions bit set */
+ .set_encap = &dr_ste_v3_set_encap,
+ .set_push_vlan = &dr_ste_v3_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v3_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v3_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void)
+{
+ return &ste_ctx_v3;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 4b349d4005e4..8007d3f523c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -521,7 +521,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
- id = dst->dest_attr.counter_id;
+ id = mlx5_fc_id(dst->dest_attr.counter);
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
index fb078fa0f0cc..898c3618ff26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
@@ -600,4 +600,44 @@ struct mlx5_ifc_ste_double_action_aso_v1_bits {
};
};
+struct mlx5_ifc_ste_single_action_remove_header_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 end_anchor[0x7];
+ u8 reserved_at_16[0x1];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x4];
+ u8 decap[0x1];
+ u8 vni_to_cqe[0x1];
+ u8 qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 reserved_at_17[0x9];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 size[0x6];
+ u8 attributes[0x3];
+
+ u8 pointer[0x20];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 3ac7dc67509f..0bb3724c10c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
@@ -160,7 +160,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_7)));
+ MLX5_STEERING_FORMAT_CONNECTX_8)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 1bed75eca97d..740b719e7072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -382,6 +382,7 @@ err_alloc_bfreg:
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
{
+ struct mutex *wc_state_lock = &mdev->wc_state_lock;
struct mlx5_core_dev *parent = NULL;
if (!MLX5_CAP_GEN(mdev, bf)) {
@@ -400,32 +401,31 @@ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
*/
goto out;
- mutex_lock(&mdev->wc_state_lock);
-
- if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
- goto unlock;
-
#ifdef CONFIG_MLX5_SF
- if (mlx5_core_is_sf(mdev))
+ if (mlx5_core_is_sf(mdev)) {
parent = mdev->priv.parent_mdev;
+ wc_state_lock = &parent->wc_state_lock;
+ }
#endif
- if (parent) {
- mutex_lock(&parent->wc_state_lock);
+ mutex_lock(wc_state_lock);
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+ if (parent) {
mlx5_core_test_wc(parent);
mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
parent->wc_state);
mdev->wc_state = parent->wc_state;
- mutex_unlock(&parent->wc_state_lock);
+ } else {
+ mlx5_core_test_wc(mdev);
}
- mlx5_core_test_wc(mdev);
-
unlock:
- mutex_unlock(&mdev->wc_state_lock);
+ mutex_unlock(wc_state_lock);
out:
mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 46245e0b2462..43c84900369a 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -14,7 +14,6 @@
#define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
#define MLXFW_FSM_STATE_WAIT_ROUNDS \
(MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
-#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
static const int mlxfw_fsm_state_errno[] = {
[MLXFW_FSM_STATE_ERR_ERROR] = -EIO,
@@ -229,7 +228,6 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
return err;
}
- comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
if (comp->data_size > comp_max_size) {
MLXFW_ERR_MSG(mlxfw_dev, extack,
"Component size is bigger than limit", -EINVAL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 4a79c0d7e7ad..2bb2b77351bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -35,6 +35,7 @@
#include "reg.h"
#include "resources.h"
#include "../mlxfw/mlxfw.h"
+#include "txheader.h"
static LIST_HEAD(mlxsw_core_driver_list);
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
@@ -677,7 +678,7 @@ struct mlxsw_reg_trans {
struct list_head bulk_list;
struct mlxsw_core *core;
struct sk_buff *tx_skb;
- struct mlxsw_tx_info tx_info;
+ struct mlxsw_txhdr_info txhdr_info;
struct delayed_work timeout_dw;
unsigned int retries;
u64 tid;
@@ -737,12 +738,11 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
if (!skb)
return -ENOMEM;
- trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
- skb->data + mlxsw_core->driver->txhdr_len,
- skb->len - mlxsw_core->driver->txhdr_len);
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, skb->data,
+ skb->len);
atomic_set(&trans->active, 1);
- err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->txhdr_info);
if (err) {
dev_kfree_skb(skb);
return err;
@@ -944,7 +944,7 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
- sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ sizeof(u32) + MLXSW_TXHDR_LEN);
if (mlxsw_core->emad.enable_string_tlv)
emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
if (mlxsw_core->emad.enable_latency_tlv)
@@ -984,8 +984,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
list_add_tail(&trans->bulk_list, bulk_list);
trans->core = mlxsw_core;
trans->tx_skb = skb;
- trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
- trans->tx_info.is_emad = true;
+ trans->txhdr_info.tx_info.local_port = MLXSW_PORT_CPU_PORT;
+ trans->txhdr_info.tx_info.is_emad = true;
INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
trans->tid = tid;
init_completion(&trans->completion);
@@ -995,7 +995,6 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->type = type;
mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid);
- mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
@@ -2330,10 +2329,10 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
- tx_info);
+ txhdr_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6d11225594dd..1a871397a6df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -72,7 +72,14 @@ struct mlxsw_tx_info {
bool is_emad;
};
+struct mlxsw_txhdr_info {
+ struct mlxsw_tx_info tx_info;
+ bool data;
+ u16 max_fid; /* Used for PTP packets which are sent as data. */
+};
+
struct mlxsw_rx_md_info {
+ struct napi_struct *napi;
u32 cookie_index;
u32 latency;
u32 tx_congestion;
@@ -94,7 +101,7 @@ struct mlxsw_rx_md_info {
bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
@@ -425,8 +432,6 @@ struct mlxsw_driver {
int (*trap_policer_counter_get)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer,
u64 *p_drops);
- void (*txhdr_construct)(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
@@ -439,7 +444,6 @@ struct mlxsw_driver {
void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
- u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool sdq_supports_cqe_v2;
};
@@ -486,7 +490,7 @@ struct mlxsw_bus {
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 1e150ce1c73a..f9f565c1036d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -516,7 +516,7 @@ static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index d6f37456fb31..5b44c931b660 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -21,6 +21,7 @@
#include "cmd.h"
#include "port.h"
#include "resources.h"
+#include "txheader.h"
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
@@ -737,6 +738,7 @@ static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
}
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct napi_struct *napi,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
@@ -807,6 +809,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
}
mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+ mlxsw_skb_cb(skb)->rx_md_info.napi = napi;
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -869,7 +872,7 @@ static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
continue;
}
- mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, napi, rdq,
wqe_counter, q->u.cq.v, cqe);
if (++work_done == budget)
@@ -2093,6 +2096,39 @@ static void mlxsw_pci_fini(void *bus_priv)
mlxsw_pci_free_irq_vectors(mlxsw_pci);
}
+static int mlxsw_pci_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_txhdr_info *txhdr_info)
+{
+ const struct mlxsw_tx_info tx_info = txhdr_info->tx_info;
+ char *txhdr;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN))
+ return -ENOMEM;
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+
+ if (unlikely(txhdr_info->data)) {
+ u16 fid = txhdr_info->max_fid + tx_info.local_port - 1;
+
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, fid);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ } else {
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info.local_port);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+ }
+
+ return 0;
+}
+
static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
@@ -2120,7 +2156,7 @@ static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct mlxsw_pci_queue *q;
@@ -2129,13 +2165,17 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
int i;
int err;
+ err = mlxsw_pci_txhdr_construct(skb, txhdr_info);
+ if (err)
+ return err;
+
if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
err = skb_linearize(skb);
if (err)
return err;
}
- q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, &txhdr_info->tx_info);
spin_lock_bh(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
if (!elem_info) {
@@ -2143,7 +2183,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
err = -EAGAIN;
goto unlock;
}
- mlxsw_skb_cb(skb)->tx_info = *tx_info;
+ mlxsw_skb_cb(skb)->tx_info = txhdr_info->tx_info;
elem_info->sdq.skb = skb;
wqe = elem_info->elem;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3f5e5d99251b..d714311fd884 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -107,74 +107,6 @@ static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
};
-/* tx_hdr_version
- * Tx header version.
- * Must be set to 1.
- */
-MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
-
-/* tx_hdr_ctl
- * Packet control type.
- * 0 - Ethernet control (e.g. EMADs, LACP)
- * 1 - Ethernet data
- */
-MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
-
-/* tx_hdr_proto
- * Packet protocol type. Must be set to 1 (Ethernet).
- */
-MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
-
-/* tx_hdr_rx_is_router
- * Packet is sent from the router. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
-
-/* tx_hdr_fid_valid
- * Indicates if the 'fid' field is valid and should be used for
- * forwarding lookup. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
-
-/* tx_hdr_swid
- * Switch partition ID. Must be set to 0.
- */
-MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
-
-/* tx_hdr_control_tclass
- * Indicates if the packet should use the control TClass and not one
- * of the data TClasses.
- */
-MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
-
-/* tx_hdr_etclass
- * Egress TClass to be used on the egress device on the egress port.
- */
-MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
-
-/* tx_hdr_port_mid
- * Destination local port for unicast packets.
- * Destination multicast ID for multicast packets.
- *
- * Control packets are directed to a specific egress port, while data
- * packets are transmitted through the CPU port (0) into the switch partition,
- * where forwarding rules are applied.
- */
-MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
-
-/* tx_hdr_fid
- * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
- * set, otherwise calculated based on the packet's VID using VID to FID mapping.
- * Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
-
-/* tx_hdr_type
- * 0 - Data packets
- * 6 - Control packets
- */
-MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, bool clear,
u64 *packets, u64 *bytes)
@@ -233,61 +165,6 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
-
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_swid_set(txhdr, 0);
- mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
- mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
-}
-
-int
-mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr;
- u16 max_fid;
- int err;
-
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- err = -ENOMEM;
- goto err_skb_cow_head;
- }
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
- err = -EIO;
- goto err_res_valid;
- }
- max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
-
- txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
- mlxsw_tx_hdr_fid_valid_set(txhdr, true);
- mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
- return 0;
-
-err_res_valid:
-err_skb_cow_head:
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return err;
-}
-
static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
{
unsigned int type;
@@ -299,30 +176,49 @@ static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
return !!ptp_parse_header(skb, type);
}
-static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static void mlxsw_sp_txhdr_info_data_init(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ /* Resource validation was done as part of PTP init. */
+ u16 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr_info->data = true;
+ txhdr_info->max_fid = max_fid;
+}
- /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
- * need special handling and cannot be transmitted as regular control
- * packets.
+static struct sk_buff *
+mlxsw_sp_vlan_tag_push(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb)
+{
+ /* In some Spectrum ASICs, in order for PTP event packets to have their
+ * correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
*/
- if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
- return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
- mlxsw_sp_port, skb,
- tx_info);
+ if (skb_vlan_tagged(skb))
+ return skb;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
+ return vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+}
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
+static struct sk_buff *
+mlxsw_sp_txhdr_preparations(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
+{
+ if (likely(!mlxsw_sp_skb_requires_ts(skb)))
+ return skb;
+
+ if (!mlxsw_sp->ptp_ops->tx_as_data)
+ return skb;
+
+ /* Special handling for PTP events that require a time stamp and cannot
+ * be transmitted as regular control packets.
+ */
+ mlxsw_sp_txhdr_info_data_init(mlxsw_sp->core, skb, txhdr_info);
+ return mlxsw_sp_vlan_tag_push(mlxsw_sp, skb);
}
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
@@ -721,16 +617,16 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
- const struct mlxsw_tx_info tx_info = {
- .local_port = mlxsw_sp_port->local_port,
- .is_emad = false,
+ struct mlxsw_txhdr_info txhdr_info = {
+ .tx_info.local_port = mlxsw_sp_port->local_port,
+ .tx_info.is_emad = false,
};
u64 len;
int err;
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
- if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &txhdr_info.tx_info))
return NETDEV_TX_BUSY;
if (eth_skb_pad(skb)) {
@@ -738,10 +634,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
- &tx_info);
- if (err)
+ skb = mlxsw_sp_txhdr_preparations(mlxsw_sp, skb, &txhdr_info);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
return NETDEV_TX_OK;
+ }
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
@@ -751,7 +648,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &txhdr_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -2449,7 +2346,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u64_stats_update_end(&pcpu_stats->syncp);
skb->protocol = eth_type_trans(skb, skb->dev);
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
@@ -2792,7 +2689,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2811,7 +2707,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+ .tx_as_data = true,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
@@ -2830,7 +2726,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -3992,11 +3887,9 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.sdq_supports_cqe_v2 = false,
};
@@ -4030,10 +3923,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4067,10 +3958,8 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4102,10 +3991,8 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp4_config_profile,
.sdq_supports_cqe_v2 = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 8d3c61287696..b10f80fc651b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -243,10 +243,7 @@ struct mlxsw_sp_ptp_ops {
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
- int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ bool tx_as_data;
};
struct mlxsw_sp_fid_core_ops {
@@ -711,12 +708,6 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index f07955b5439f..6a4a81c63451 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -192,6 +192,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (sample_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirror action after sample action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
block, out_dev,
extack);
@@ -265,6 +270,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (mirror_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action after mirror action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
block,
act->sample.psample_group,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 69cd689dbc83..5afe6b155ef0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
&bytes);
- if (mr_route->mfc->mfc_un.res.pkt != packets)
- mr_route->mfc->mfc_un.res.lastuse = jiffies;
- mr_route->mfc->mfc_un.res.pkt = packets;
- mr_route->mfc->mfc_un.res.bytes = bytes;
+ if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets)
+ WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes);
}
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index d94081c7658e..ca8b9d18fbb9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -1353,6 +1353,10 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
+ /* Max FID will be used in data path, check validity as part of init. */
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, FID))
+ return ERR_PTR(-EIO);
+
ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
if (!ptp_state)
return ERR_PTR(-ENOMEM);
@@ -1679,43 +1683,3 @@ int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
-
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
-}
-
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
- * their correction field correctly set on the egress port they must be
- * transmitted as data packets. Such packets ingress the ASIC via the
- * CPU port and must have a VLAN tag, as the CPU port is not configured
- * with a PVID. Push the default VLAN (4095), which is configured as
- * egress untagged on all the ports.
- */
- if (!skb_vlan_tagged(skb)) {
- skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
- MLXSW_SP_DEFAULT_VID);
- if (!skb) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- return -ENOMEM;
- }
- }
-
- return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
- tx_info);
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index c8aa1452fbb9..102db9060135 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -49,11 +49,6 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-
struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
@@ -78,11 +73,6 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -157,15 +147,6 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-static inline int
-mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- return -EOPNOTSUPP;
-}
-
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
{
@@ -211,15 +192,6 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
{
return -EOPNOTSUPP;
}
-
-static inline int
-mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- return -EOPNOTSUPP;
-}
#endif
static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 899c954e0e5f..1f9c1c86839f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -173,7 +173,7 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
if (err)
return;
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index da51dd9d5e44..e78cba5821b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -4,6 +4,69 @@
#ifndef _MLXSW_TXHEADER_H
#define _MLXSW_TXHEADER_H
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
#define MLXSW_TXHDR_LEN 0x10
#define MLXSW_TXHDR_VERSION_0 0
#define MLXSW_TXHDR_VERSION_1 1
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index ea6214ca48e7..239b2258ec65 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -13,6 +13,7 @@ fbnic-y := fbnic_csr.o \
fbnic_ethtool.o \
fbnic_fw.o \
fbnic_hw_stats.o \
+ fbnic_hwmon.o \
fbnic_irq.o \
fbnic_mac.o \
fbnic_netdev.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 744eb0d95449..14751f16e125 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -16,10 +16,15 @@
#include "fbnic_mac.h"
#include "fbnic_rpc.h"
+struct fbnic_napi_vector;
+
+#define FBNIC_MAX_NAPI_VECTORS 128u
+
struct fbnic_dev {
struct device *dev;
struct net_device *netdev;
struct dentry *dbg_fbd;
+ struct device *hwmon;
u32 __iomem *uc_addr0;
u32 __iomem *uc_addr4;
@@ -28,10 +33,16 @@ struct fbnic_dev {
unsigned int pcs_msix_vector;
unsigned short num_irqs;
+ struct {
+ u8 users;
+ char name[IFNAMSIZ + 9];
+ } napi_irq[FBNIC_MAX_NAPI_VECTORS];
+
struct delayed_work service_task;
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
+ struct fbnic_fw_completion *cmpl_data;
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -140,9 +151,18 @@ void fbnic_devlink_unregister(struct fbnic_dev *fbd);
int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+void fbnic_hwmon_register(struct fbnic_dev *fbd);
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
+
int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd);
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr);
int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler,
unsigned long flags, const char *name, void *data);
void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index cc8ca94529ca..20cd9f5f89e2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -40,49 +40,99 @@ static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
#define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
#define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN
-static int
-fbnic_get_ts_info(struct net_device *netdev,
- struct kernel_ethtool_ts_info *tsinfo)
+static void
+fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
- tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+ fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
+}
- tsinfo->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+static int fbnic_get_regs_len(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
- tsinfo->tx_types =
- BIT(HWTSTAMP_TX_OFF) |
- BIT(HWTSTAMP_TX_ON);
+ return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
+}
- tsinfo->rx_filters =
- BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_ALL);
+static void fbnic_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
- return 0;
+ fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
}
-static void
-fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
{
- struct fbnic_net *fbn = netdev_priv(netdev);
- struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *clone;
- fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
- sizeof(drvinfo->fw_version));
+ clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
+ if (!clone)
+ return NULL;
+
+ memset(clone->tx, 0, sizeof(clone->tx));
+ memset(clone->rx, 0, sizeof(clone->rx));
+ memset(clone->napi, 0, sizeof(clone->napi));
+ return clone;
}
-static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
+ struct fbnic_net *clone)
{
- if (counter->reported)
- *stat = counter->value;
+ swap(clone->rcq_size, orig->rcq_size);
+ swap(clone->hpq_size, orig->hpq_size);
+ swap(clone->ppq_size, orig->ppq_size);
+ swap(clone->txq_size, orig->txq_size);
+ swap(clone->num_rx_queues, orig->num_rx_queues);
+ swap(clone->num_tx_queues, orig->num_tx_queues);
+ swap(clone->num_napi, orig->num_napi);
+}
+
+static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
+ }
+}
+
+static void fbnic_clone_swap(struct fbnic_net *orig,
+ struct fbnic_net *clone)
+{
+ struct fbnic_dev *fbd = orig->fbd;
+ unsigned int i;
+
+ for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
+ fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
+ for (i = 0; i < orig->num_napi; i++)
+ fbnic_aggregate_vector_counters(orig, orig->napi[i]);
+
+ fbnic_clone_swap_cfg(orig, clone);
+
+ for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
+ swap(clone->napi[i], orig->napi[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
+ swap(clone->tx[i], orig->tx[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
+ swap(clone->rx[i], orig->rx[i]);
+}
+
+static void fbnic_clone_free(struct fbnic_net *clone)
+{
+ kfree(clone);
}
static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
@@ -97,6 +147,21 @@ static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
}
}
+static void fbnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_stat *stat;
+ int i;
+
+ fbnic_get_hw_stats(fbn->fbd);
+
+ for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
+ stat = &fbnic_gstrings_hw_stats[i];
+ data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+ }
+}
+
static int fbnic_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
@@ -107,19 +172,375 @@ static int fbnic_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fbnic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static int fbnic_get_rss_hash_idx(u32 flow_type)
{
- struct fbnic_net *fbn = netdev_priv(dev);
- const struct fbnic_stat *stat;
- int i;
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ return FBNIC_TCP4_HASH_OPT;
+ case TCP_V6_FLOW:
+ return FBNIC_TCP6_HASH_OPT;
+ case UDP_V4_FLOW:
+ return FBNIC_UDP4_HASH_OPT;
+ case UDP_V6_FLOW:
+ return FBNIC_UDP6_HASH_OPT;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IPV4_USER_FLOW:
+ return FBNIC_IPV4_HASH_OPT;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ return FBNIC_IPV6_HASH_OPT;
+ case ETHER_FLOW:
+ return FBNIC_ETHER_HASH_OPT;
+ }
- fbnic_get_hw_stats(fbn->fbd);
+ return -1;
+}
- for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
- stat = &fbnic_gstrings_hw_stats[i];
- data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+static int
+fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Report options from rss_en table in fbn */
+ cmd->data = fbn->rss_flow_hash[hash_opt_idx];
+
+ return 0;
+}
+
+static int fbnic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = fbn->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = fbnic_get_rss_hash_opts(fbn, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+#define FBNIC_L2_HASH_OPTIONS \
+ (RXH_L2DA | RXH_DISCARD)
+#define FBNIC_L3_HASH_OPTIONS \
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
+#define FBNIC_L4_HASH_OPTIONS \
+ (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+static int
+fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
+{
+ int hash_opt_idx;
+
+ /* Verify the type requested is correct */
+ hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Verify the fields asked for can actually be assigned based on type */
+ if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
+ (hash_opt_idx > FBNIC_L4_HASH_OPT &&
+ cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
+ (hash_opt_idx > FBNIC_IP_HASH_OPT &&
+ cmd->data & ~FBNIC_L2_HASH_OPTIONS))
+ return -EINVAL;
+
+ fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ return 0;
+}
+
+static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = fbnic_set_rss_hash_opts(fbn, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_KEY_BYTE_LEN;
+}
+
+static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_TBL_SIZE;
+}
+
+static int
+fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
+ u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
+
+ rxfh->key[i] = rss_key >> 24;
+ }
+ }
+
+ if (rxfh->indir) {
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ rxfh->indir[i] = fbn->indir_tbl[0][i];
+ }
+
+ return 0;
+}
+
+static unsigned int
+fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
+{
+ unsigned int i, changes = 0;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (fbn->indir_tbl[idx][i] == indir[i])
+ continue;
+
+ fbn->indir_tbl[idx][i] = indir[i];
+ changes++;
+ }
+
+ return changes;
+}
+
+static int
+fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i, changes = 0;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EINVAL;
+
+ if (rxfh->key) {
+ u32 rss_key = 0;
+
+ for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
+ rss_key >>= 8;
+ rss_key |= (u32)(rxfh->key[i]) << 24;
+
+ if (i % 4)
+ continue;
+
+ if (fbn->rss_key[i / 4] == rss_key)
+ continue;
+
+ fbn->rss_key[i / 4] = rss_key;
+ changes++;
+ }
}
+
+ if (rxfh->indir)
+ changes += fbnic_set_indir(fbn, 0, rxfh->indir);
+
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static void fbnic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ ch->max_rx = fbd->max_num_queues;
+ ch->max_tx = fbd->max_num_queues;
+ ch->max_combined = min(ch->max_rx, ch->max_tx);
+ ch->max_other = FBNIC_NON_NAPI_VECTORS;
+
+ if (fbn->num_rx_queues > fbn->num_napi ||
+ fbn->num_tx_queues > fbn->num_napi)
+ ch->combined_count = min(fbn->num_rx_queues,
+ fbn->num_tx_queues);
+ else
+ ch->combined_count =
+ fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
+ ch->rx_count = fbn->num_rx_queues - ch->combined_count;
+ ch->tx_count = fbn->num_tx_queues - ch->combined_count;
+ ch->other_count = FBNIC_NON_NAPI_VECTORS;
+}
+
+static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
+ unsigned int max_napis)
+{
+ fbn->num_rx_queues = ch->rx_count + ch->combined_count;
+ fbn->num_tx_queues = ch->tx_count + ch->combined_count;
+ fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
+ max_napis);
+}
+
+static int fbnic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int max_napis, standalone;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *clone;
+ int err;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+ standalone = ch->rx_count + ch->tx_count;
+
+ /* Limits for standalone queues:
+ * - each queue has it's own NAPI (num_napi >= rx + tx + combined)
+ * - combining queues (combined not 0, rx or tx must be 0)
+ */
+ if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
+ (standalone && standalone + ch->combined_count > max_napis) ||
+ ch->rx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->tx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->other_count != FBNIC_NON_NAPI_VECTORS)
+ return -EINVAL;
+
+ if (!netif_running(netdev)) {
+ fbnic_set_queues(fbn, ch, max_napis);
+ fbnic_reset_indir_tbl(fbn);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_queues(clone, ch, max_napis);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ /* Reset RSS indirection table */
+ fbnic_reset_indir_tbl(fbn);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
+static int
+fbnic_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *tsinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+
+ tsinfo->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ tsinfo->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ tsinfo->rx_filters =
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static void fbnic_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u64 ts_packets, ts_lost;
+ struct fbnic_ring *ring;
+ unsigned int start;
+ int i;
+
+ ts_stats->pkts = fbn->tx_stats.ts_packets;
+ ts_stats->lost = fbn->tx_stats.ts_lost;
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ ring = fbn->tx[i];
+ do {
+ start = u64_stats_fetch_begin(&ring->stats.syncp);
+ ts_packets = ring->stats.ts_packets;
+ ts_lost = ring->stats.ts_lost;
+ } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
+ ts_stats->pkts += ts_packets;
+ ts_stats->lost += ts_lost;
+ }
+}
+
+static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+{
+ if (counter->reported)
+ *stat = counter->value;
}
static void
@@ -164,44 +585,6 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
&mac_stats->eth_mac.FrameTooLongErrors);
}
-static void fbnic_get_ts_stats(struct net_device *netdev,
- struct ethtool_ts_stats *ts_stats)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
- u64 ts_packets, ts_lost;
- struct fbnic_ring *ring;
- unsigned int start;
- int i;
-
- ts_stats->pkts = fbn->tx_stats.ts_packets;
- ts_stats->lost = fbn->tx_stats.ts_lost;
- for (i = 0; i < fbn->num_tx_queues; i++) {
- ring = fbn->tx[i];
- do {
- start = u64_stats_fetch_begin(&ring->stats.syncp);
- ts_packets = ring->stats.ts_packets;
- ts_lost = ring->stats.ts_lost;
- } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
- ts_stats->pkts += ts_packets;
- ts_stats->lost += ts_lost;
- }
-}
-
-static void fbnic_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *data)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
-
- fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
-}
-
-static int fbnic_get_regs_len(struct net_device *netdev)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
-
- return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
-}
-
static const struct ethtool_ops fbnic_ethtool_ops = {
.get_drvinfo = fbnic_get_drvinfo,
.get_regs_len = fbnic_get_regs_len,
@@ -209,6 +592,14 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_strings = fbnic_get_strings,
.get_ethtool_stats = fbnic_get_ethtool_stats,
.get_sset_count = fbnic_get_sset_count,
+ .get_rxnfc = fbnic_get_rxnfc,
+ .set_rxnfc = fbnic_set_rxnfc,
+ .get_rxfh_key_size = fbnic_get_rxfh_key_size,
+ .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
+ .get_rxfh = fbnic_get_rxfh,
+ .set_rxfh = fbnic_set_rxfh,
+ .get_channels = fbnic_get_channels,
+ .set_channels = fbnic_set_channels,
.get_ts_info = fbnic_get_ts_info,
.get_ts_stats = fbnic_get_ts_stats,
.get_eth_mac_stats = fbnic_get_eth_mac_stats,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 8f7a2a19ddf8..bbc7c1c0c37e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -228,6 +228,63 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
tx_mbx->head = head;
}
+static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+
+ /* If we are already waiting on a completion then abort */
+ if (cmpl_data && fbd->cmpl_data) {
+ err = -EBUSY;
+ goto unlock_mbx;
+ }
+
+ /* Record completion location and submit request */
+ if (cmpl_data)
+ fbd->cmpl_data = cmpl_data;
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ /* If msg failed then clear completion data for next caller */
+ if (err && cmpl_data)
+ fbd->cmpl_data = NULL;
+
+unlock_mbx:
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static void fbnic_fw_release_cmpl_data(struct kref *kref)
+{
+ struct fbnic_fw_completion *cmpl_data;
+
+ cmpl_data = container_of(kref, struct fbnic_fw_completion,
+ ref_count);
+ kfree(cmpl_data);
+}
+
+static struct fbnic_fw_completion *
+fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_fw_completion *cmpl_data = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) {
+ cmpl_data = fbd->cmpl_data;
+ kref_get(&fbd->cmpl_data->ref_count);
+ }
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return cmpl_data;
+}
+
/**
* fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
* @fbd: FBNIC device structure
@@ -651,6 +708,84 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
dev_warn(fbd->dev, "Failed to send heartbeat message\n");
}
+/**
+ * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion data structure to store sensor response
+ *
+ * Asks the firmware to provide an update with the latest sensor data.
+ * The response will contain temperature and voltage readings.
+ *
+ * Return: 0 on success, negative error value on failure
+ */
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_TSENE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_tsene_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ int err = 0;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+ if (!cmpl_data)
+ return -EINVAL;
+
+ if (results[FBNIC_TSENE_ERROR]) {
+ err = fbnic_tlv_attr_get_unsigned(results[FBNIC_TSENE_ERROR]);
+ if (err)
+ goto exit_complete;
+ }
+
+ if (!results[FBNIC_TSENE_THERM] || !results[FBNIC_TSENE_VOLT]) {
+ err = -EINVAL;
+ goto exit_complete;
+ }
+
+ cmpl_data->u.tsene.millidegrees =
+ fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_THERM]);
+ cmpl_data->u.tsene.millivolts =
+ fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_VOLT]);
+
+exit_complete:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
fbnic_fw_parse_cap_resp),
@@ -658,6 +793,9 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
fbnic_fw_parse_ownership_resp),
FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(TSENE_READ_RESP,
+ fbnic_tsene_read_resp_index,
+ fbnic_fw_parse_tsene_read_resp),
FBNIC_TLV_MSG_ERROR
};
@@ -802,3 +940,25 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
fw_version, str_sz);
}
+
+void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl,
+ u32 msg_type)
+{
+ fw_cmpl->msg_type = msg_type;
+ init_completion(&fw_cmpl->done);
+ kref_init(&fw_cmpl->ref_count);
+}
+
+void fbnic_fw_clear_compl(struct fbnic_dev *fbd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ fbd->cmpl_data = NULL;
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+}
+
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
+{
+ kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 221faf8c6756..fe68333d51b1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -44,6 +44,19 @@ struct fbnic_fw_cap {
u8 link_fec;
};
+struct fbnic_fw_completion {
+ u32 msg_type;
+ struct completion done;
+ struct kref ref_count;
+ int result;
+ union {
+ struct {
+ s32 millivolts;
+ s32 millidegrees;
+ } tsene;
+ } u;
+};
+
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
@@ -52,6 +65,12 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+void fbnic_fw_init_cmpl(struct fbnic_fw_completion *cmpl_data,
+ u32 msg_type);
+void fbnic_fw_clear_compl(struct fbnic_dev *fbd);
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
do { \
@@ -76,6 +95,8 @@ enum {
FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
};
#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
@@ -118,6 +139,13 @@ enum {
};
enum {
+ FBNIC_TSENE_THERM = 0x0,
+ FBNIC_TSENE_VOLT = 0x1,
+ FBNIC_TSENE_ERROR = 0x2,
+ FBNIC_TSENE_MSG_MAX
+};
+
+enum {
FBNIC_FW_OWNERSHIP_FLAG = 0x0,
FBNIC_FW_OWNERSHIP_MSG_MAX
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
new file mode 100644
index 000000000000..def8598aceec
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/hwmon.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+
+static int fbnic_hwmon_sensor_id(enum hwmon_sensor_types type)
+{
+ if (type == hwmon_temp)
+ return FBNIC_SENSOR_TEMP;
+ if (type == hwmon_in)
+ return FBNIC_SENSOR_VOLTAGE;
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t fbnic_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_temp && attr == hwmon_temp_input)
+ return 0444;
+ if (type == hwmon_in && attr == hwmon_in_input)
+ return 0444;
+
+ return 0;
+}
+
+static int fbnic_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ const struct fbnic_mac *mac = fbd->mac;
+ int id;
+
+ id = fbnic_hwmon_sensor_id(type);
+ return id < 0 ? id : mac->get_sensor(fbd, id, val);
+}
+
+static const struct hwmon_ops fbnic_hwmon_ops = {
+ .is_visible = fbnic_hwmon_is_visible,
+ .read = fbnic_hwmon_read,
+};
+
+static const struct hwmon_channel_info *fbnic_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info fbnic_chip_info = {
+ .ops = &fbnic_hwmon_ops,
+ .info = fbnic_hwmon_info,
+};
+
+void fbnic_hwmon_register(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return;
+
+ fbd->hwmon = hwmon_device_register_with_info(fbd->dev, "fbnic",
+ fbd, &fbnic_chip_info,
+ NULL);
+ if (IS_ERR(fbd->hwmon)) {
+ dev_notice(fbd->dev,
+ "Failed to register hwmon device %pe\n",
+ fbd->hwmon);
+ fbd->hwmon = NULL;
+ }
+}
+
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON) || !fbd->hwmon)
+ return;
+
+ hwmon_device_unregister(fbd->hwmon);
+ fbd->hwmon = NULL;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
index 914362195920..1bbc0e56f3a0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -146,6 +146,17 @@ void fbnic_pcs_irq_disable(struct fbnic_dev *fbd)
free_irq(fbd->pcs_msix_vector, fbd);
}
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ synchronize_irq(irq);
+}
+
int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler,
unsigned long flags, const char *name, void *data)
{
@@ -169,6 +180,48 @@ void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data)
free_irq(irq, data);
}
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fbd->napi_irq); i++)
+ snprintf(fbd->napi_irq[i].name,
+ sizeof(fbd->napi_irq[i].name),
+ "%s-TxRx-%u", fbd->netdev->name, i);
+}
+
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+ int err;
+
+ if (!fbd->napi_irq[i].users) {
+ err = fbnic_request_irq(fbd, nv->v_idx,
+ fbnic_msix_clean_rings, 0,
+ fbd->napi_irq[i].name,
+ &fbn->napi[i]);
+ if (err)
+ return err;
+ }
+
+ fbd->napi_irq[i].users++;
+ return 0;
+}
+
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+
+ if (--fbd->napi_irq[i].users)
+ return;
+
+ fbnic_free_irq(fbd, nv->v_idx, &fbn->napi[i]);
+}
+
void fbnic_free_irqs(struct fbnic_dev *fbd)
{
struct pci_dev *pdev = to_pci_dev(fbd->dev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 7b654d0a6dac..14291401f463 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -686,6 +686,77 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
+static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
+ long *val)
+{
+ struct fbnic_fw_completion *fw_cmpl;
+ int err = 0, retries = 5;
+ s32 *sensor;
+
+ fw_cmpl = kzalloc(sizeof(*fw_cmpl), GFP_KERNEL);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Initialize completion and queue it for FW to process */
+ fbnic_fw_init_cmpl(fw_cmpl, FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+
+ switch (id) {
+ case FBNIC_SENSOR_TEMP:
+ sensor = &fw_cmpl->u.tsene.millidegrees;
+ break;
+ case FBNIC_SENSOR_VOLTAGE:
+ sensor = &fw_cmpl->u.tsene.millivolts;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit_free;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, fw_cmpl);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_free;
+ }
+
+ /* Allow 2 seconds for reply, resend and try up to 5 times */
+ while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ retries--;
+
+ if (retries == 0) {
+ dev_err(fbd->dev,
+ "Timed out waiting for TSENE read\n");
+ err = -ETIMEDOUT;
+ goto exit_cleanup;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_cleanup;
+ }
+ }
+
+ /* Handle error returned by firmware */
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ dev_err(fbd->dev, "%s: Firmware returned error %d\n",
+ __func__, err);
+ goto exit_cleanup;
+ }
+
+ *val = *sensor;
+exit_cleanup:
+ fbnic_fw_clear_compl(fbd);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err;
+}
+
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
.pcs_enable = fbnic_pcs_enable_asic,
@@ -695,6 +766,7 @@ static const struct fbnic_mac fbnic_mac_asic = {
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
+ .get_sensor = fbnic_mac_get_sensor_asic,
};
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 476239a9d381..05a591653e09 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -47,6 +47,11 @@ enum {
#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+enum fbnic_sensor_id {
+ FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
+ FBNIC_SENSOR_VOLTAGE, /* Voltage in millivolts */
+};
+
/* This structure defines the interface hooks for the MAC. The MAC hooks
* will be configured as a const struct provided with a set of function
* pointers.
@@ -83,6 +88,8 @@ struct fbnic_mac {
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+
+ int (*get_sensor)(struct fbnic_dev *fbd, int id, long *val);
};
int fbnic_mac_init(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index fc7d80db5fa6..7a96b6ee773f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -23,13 +23,7 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto free_napi_vectors;
- err = netif_set_real_num_tx_queues(fbn->netdev,
- fbn->num_tx_queues);
- if (err)
- goto free_resources;
-
- err = netif_set_real_num_rx_queues(fbn->netdev,
- fbn->num_rx_queues);
+ err = fbnic_set_netif_queues(fbn);
if (err)
goto free_resources;
@@ -74,6 +68,8 @@ static int fbnic_open(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
int err;
+ fbnic_napi_name_irqs(fbn->fbd);
+
err = __fbnic_open(fbn);
if (!err)
fbnic_up(fbn);
@@ -91,6 +87,7 @@ static int fbnic_stop(struct net_device *netdev)
fbnic_time_stop(fbn);
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+ fbnic_reset_netif_queues(fbn);
fbnic_free_resources(fbn);
fbnic_free_napi_vectors(fbn);
@@ -615,7 +612,6 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->netdev = netdev;
fbn->fbd = fbd;
- INIT_LIST_HEAD(&fbn->napis);
fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index b8417b300778..a392ac1cc4f2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -11,10 +11,14 @@
#include "fbnic_rpc.h"
#include "fbnic_txrx.h"
+#define FBNIC_MAX_NAPI_VECTORS 128u
+
struct fbnic_net {
struct fbnic_ring *tx[FBNIC_MAX_TXQS];
struct fbnic_ring *rx[FBNIC_MAX_RXQS];
+ struct fbnic_napi_vector *napi[FBNIC_MAX_NAPI_VECTORS];
+
struct net_device *netdev;
struct fbnic_dev *fbd;
@@ -56,13 +60,12 @@ struct fbnic_net {
/* Time stampinn filter config */
struct kernel_hwtstamp_config hwtstamp_config;
-
- struct list_head napis;
};
int __fbnic_open(struct fbnic_net *fbn);
void fbnic_up(struct fbnic_net *fbn);
void fbnic_down(struct fbnic_net *fbn);
+void fbnic_down_noidle(struct fbnic_net *fbn);
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd);
void fbnic_netdev_free(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 7ccf192f13d5..6cbbc2ee3e1f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -145,7 +145,7 @@ void fbnic_up(struct fbnic_net *fbn)
fbnic_service_task_start(fbn);
}
-static void fbnic_down_noidle(struct fbnic_net *fbn)
+void fbnic_down_noidle(struct fbnic_net *fbn)
{
fbnic_service_task_stop(fbn);
@@ -296,6 +296,8 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Capture snapshot of hardware stats so netdev can calculate delta */
fbnic_reset_hw_stats(fbd);
+ fbnic_hwmon_register(fbd);
+
if (!fbd->dsn) {
dev_warn(&pdev->dev, "Reading serial number failed\n");
goto init_failure_mode;
@@ -358,6 +360,7 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_netdev_free(fbd);
}
+ fbnic_hwmon_unregister(fbd);
fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
fbnic_fw_disable_mbx(fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index 1a5e1e719b30..bb11fc83367d 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -15,7 +15,7 @@ fbnic_pcs_to_net(struct phylink_pcs *pcs)
}
static void
-fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs,
+fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index 908c098cd59e..c25bd300b902 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -13,10 +13,11 @@ void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
unsigned int num_rx = fbn->num_rx_queues;
unsigned int i;
- for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (netif_is_rxfh_configured(fbn->netdev))
+ return;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
- fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx);
- }
}
void fbnic_rss_key_fill(u32 *buffer)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index b5050fabe8fe..d4d7027df9a0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -1033,20 +1033,20 @@ static int fbnic_poll(struct napi_struct *napi, int budget)
if (likely(napi_complete_done(napi, work_done)))
fbnic_nv_irq_rearm(nv);
- return 0;
+ return work_done;
}
-static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
+irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
{
- struct fbnic_napi_vector *nv = data;
+ struct fbnic_napi_vector *nv = *(void **)data;
napi_schedule_irqoff(&nv->napi);
return IRQ_HANDLED;
}
-static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *rxr)
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
{
struct fbnic_queue_stats *stats = &rxr->stats;
@@ -1056,8 +1056,8 @@ static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.dropped += stats->dropped;
}
-static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *txr)
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
{
struct fbnic_queue_stats *stats = &txr->stats;
@@ -1099,7 +1099,6 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
struct fbnic_dev *fbd = nv->fbd;
- u32 v_idx = nv->v_idx;
int i, j;
for (i = 0; i < nv->txt_count; i++) {
@@ -1113,31 +1112,20 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
}
- fbnic_free_irq(fbd, v_idx, nv);
+ fbnic_napi_free_irq(fbd, nv);
page_pool_destroy(nv->page_pool);
netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
}
void fbnic_free_napi_vectors(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv, *temp;
-
- list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
- fbnic_free_napi_vector(fbn, nv);
-}
-
-static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
-{
- unsigned char *dev_name = nv->napi.dev->name;
+ int i;
- if (!nv->rxt_count)
- snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
- else
- snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+ for (i = 0; i < fbn->num_napi; i++)
+ if (fbn->napi[i])
+ fbnic_free_napi_vector(fbn, fbn->napi[i]);
}
#define FBNIC_PAGE_POOL_FLAGS \
@@ -1222,7 +1210,7 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
nv->v_idx = v_idx;
/* Tie napi to netdev */
- list_add(&nv->napis, &fbn->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = nv;
netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
/* Record IRQ to NAPI struct */
@@ -1239,12 +1227,8 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
goto napi_del;
}
- /* Initialize vector name */
- fbnic_name_napi_vector(nv);
-
/* Request the IRQ for napi vector */
- err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
- IRQF_SHARED, nv->name, nv);
+ err = fbnic_napi_request_irq(fbd, nv);
if (err)
goto pp_destroy;
@@ -1307,7 +1291,7 @@ pp_destroy:
page_pool_destroy(nv->page_pool);
napi_del:
netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
return err;
}
@@ -1612,19 +1596,18 @@ free_resources:
void fbnic_free_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
}
int fbnic_alloc_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
- int err = -ENODEV;
+ int i, err = -ENODEV;
- list_for_each_entry(nv, &fbn->napis, napis) {
- err = fbnic_alloc_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++) {
+ err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]);
if (err)
goto free_resources;
}
@@ -1632,12 +1615,77 @@ int fbnic_alloc_resources(struct fbnic_net *fbn)
return 0;
free_resources:
- list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ while (i--)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
return err;
}
+static void fbnic_set_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Associate Tx queue with NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, &nv->napi);
+ }
+
+ /* Associate Rx queue with NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ }
+}
+
+static void fbnic_reset_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Disassociate Tx queue from NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ /* Disassociate Rx queue from NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+}
+
+int fbnic_set_netif_queues(struct fbnic_net *fbn)
+{
+ int i, err;
+
+ err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues,
+ fbn->num_rx_queues);
+ if (err)
+ return err;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_set_netif_napi(fbn->napi[i]);
+
+ return 0;
+}
+
+void fbnic_reset_netif_queues(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_reset_netif_napi(fbn->napi[i]);
+}
+
static void fbnic_disable_twq0(struct fbnic_ring *txr)
{
u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
@@ -1670,33 +1718,34 @@ static void fbnic_disable_rcq(struct fbnic_ring *rxr)
void fbnic_napi_disable(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- napi_disable(&nv->napi);
+ for (i = 0; i < fbn->num_napi; i++) {
+ napi_disable(&fbn->napi[i]->napi);
- fbnic_nv_irq_disable(nv);
+ fbnic_nv_irq_disable(fbn->napi[i]);
}
}
void fbnic_disable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Disable Tx queue triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_disable_twq0(&qt->sub0);
fbnic_disable_tcq(&qt->cmpl);
}
/* Disable Rx queue triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_disable_bdq(&qt->sub0, &qt->sub1);
fbnic_disable_rcq(&qt->cmpl);
@@ -1792,14 +1841,15 @@ int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
void fbnic_flush(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
/* Flush any processed Tx Queue Triads and drop the rest */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
struct netdev_queue *tx_queue;
/* Clean the work queues of unprocessed work */
@@ -1816,15 +1866,11 @@ void fbnic_flush(struct fbnic_net *fbn)
tx_queue = netdev_get_tx_queue(nv->napi.dev,
qt->sub0.q_idx);
netdev_tx_reset_queue(tx_queue);
-
- /* Disassociate Tx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, NULL);
}
/* Flush any processed Rx Queue Triads and drop the rest */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
/* Clean the work queues of unprocessed work */
fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
@@ -1835,43 +1881,23 @@ void fbnic_flush(struct fbnic_net *fbn)
fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
qt->cmpl.pkt->buff.data_hard_start = NULL;
-
- /* Disassociate Rx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, NULL);
}
}
}
void fbnic_fill(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
-
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
-
- /* Configure NAPI mapping for Tx */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
-
- /* Nothing to do if Tx queue is disabled */
- if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
- continue;
+ int i;
- /* Associate Tx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, &nv->napi);
- }
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
/* Configure NAPI mapping and populate pages
* in the BDQ rings to use for Rx
*/
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
-
- /* Associate Rx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
/* Populate the header and payload BDQs */
fbnic_fill_bdq(nv, &qt->sub0);
@@ -2025,21 +2051,23 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
void fbnic_enable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Setup Tx Queue Triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_enable_twq0(&qt->sub0);
fbnic_enable_tcq(nv, &qt->cmpl);
}
/* Setup Rx Queue Triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_enable_bdq(&qt->sub0, &qt->sub1);
fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
@@ -2064,10 +2092,11 @@ void fbnic_napi_enable(struct fbnic_net *fbn)
{
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
napi_enable(&nv->napi);
fbnic_nv_irq_enable(nv);
@@ -2096,17 +2125,18 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Find RQs which are completely out of pages */
- for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
+ for (t = nv->txt_count, j = 0; j < nv->rxt_count; j++, t++) {
/* Assume 4 pages is always enough to fit a packet
* and therefore generate a completion and an IRQ.
*/
- if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
- fbnic_desc_used(&nv->qt[i].sub1) < 4)
+ if (fbnic_desc_used(&nv->qt[t].sub0) < 4 ||
+ fbnic_desc_used(&nv->qt[t].sub1) < 4)
irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
}
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 8d626287c3f4..c2a94f31f71b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -104,14 +104,11 @@ struct fbnic_napi_vector {
struct device *dev; /* Device for DMA unmapping */
struct page_pool *page_pool;
struct fbnic_dev *fbd;
- char name[IFNAMSIZ + 9];
u16 v_idx;
u8 txt_count;
u8 rxt_count;
- struct list_head napis;
-
struct fbnic_q_triad qt[];
};
@@ -123,10 +120,18 @@ netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features);
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr);
+
int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
void fbnic_free_napi_vectors(struct fbnic_net *fbn);
int fbnic_alloc_resources(struct fbnic_net *fbn);
void fbnic_free_resources(struct fbnic_net *fbn);
+int fbnic_set_netif_queues(struct fbnic_net *fbn);
+void fbnic_reset_netif_queues(struct fbnic_net *fbn);
+irqreturn_t fbnic_msix_clean_rings(int irq, void *data);
void fbnic_napi_enable(struct fbnic_net *fbn);
void fbnic_napi_disable(struct fbnic_net *fbn);
void fbnic_enable(struct fbnic_net *fbn);
@@ -137,4 +142,9 @@ void fbnic_fill(struct fbnic_net *fbn);
void fbnic_napi_depletion_check(struct net_device *netdev);
int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
+static inline int fbnic_napi_idx(const struct fbnic_napi_vector *nv)
+{
+ return nv->v_idx - FBNIC_NON_NAPI_VECTORS;
+}
+
#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 1a1cbd034eda..1459acfb1e61 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1055,9 +1055,6 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- eee->tx_lpi_timer = lan743x_csr_read(adapter,
- MAC_EEE_TX_LPI_REQ_DLY_CNT);
-
return phylink_ethtool_get_eee(adapter->phylink, eee);
}
@@ -1065,24 +1062,6 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- u32 tx_lpi_timer;
-
- tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
- if (tx_lpi_timer != eee->tx_lpi_timer) {
- u32 mac_cr = lan743x_csr_read(adapter, MAC_CR);
-
- /* Software should only change this field when Energy Efficient
- * Ethernet Enable (EEEEN) is cleared.
- * This function will trigger an autonegotiation restart and
- * eee will be reenabled during link up if eee was negotiated.
- */
- lan743x_mac_eee_enable(adapter, false);
- lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT,
- eee->tx_lpi_timer);
-
- if (mac_cr & MAC_CR_EEE_EN_)
- lan743x_mac_eee_enable(adapter, true);
- }
return phylink_ethtool_set_eee(adapter->phylink, eee);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 4dc5adcda6a3..23760b613d3e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2966,7 +2966,7 @@ static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
return lan743x_pcs_power_reset(adapter);
}
-void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
+static void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
{
u32 mac_cr;
@@ -3027,10 +3027,8 @@ static void lan743x_phylink_mac_link_down(struct phylink_config *config,
phy_interface_t interface)
{
struct net_device *netdev = to_net_dev(config->dev);
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- netif_tx_stop_all_queues(to_net_dev(config->dev));
- lan743x_mac_eee_enable(adapter, false);
+ netif_tx_stop_all_queues(netdev);
}
static void lan743x_phylink_mac_link_up(struct phylink_config *config,
@@ -3072,16 +3070,40 @@ static void lan743x_phylink_mac_link_up(struct phylink_config *config,
cap & FLOW_CTRL_TX,
cap & FLOW_CTRL_RX);
- if (phydev)
- lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi);
-
netif_tx_wake_all_queues(netdev);
}
+static void lan743x_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ lan743x_mac_eee_enable(adapter, false);
+}
+
+static int lan743x_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
+ * EEEEN during probe, and phylink itself guarantees that
+ * mac_disable_tx_lpi() will have been previously called.
+ */
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, timer);
+ lan743x_mac_eee_enable(adapter, true);
+
+ return 0;
+}
+
static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
.mac_config = lan743x_phylink_mac_config,
.mac_link_down = lan743x_phylink_mac_link_down,
.mac_link_up = lan743x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = lan743x_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = lan743x_mac_enable_tx_lpi,
};
static int lan743x_phylink_create(struct lan743x_adapter *adapter)
@@ -3095,6 +3117,9 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter)
adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+ adapter->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ adapter->phylink_config.lpi_timer_default =
+ lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
lan743x_phy_interface_select(adapter);
@@ -3120,6 +3145,10 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter)
phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
}
+ memcpy(adapter->phylink_config.lpi_interfaces,
+ adapter->phylink_config.supported_interfaces,
+ sizeof(adapter->phylink_config.lpi_interfaces));
+
pl = phylink_create(&adapter->phylink_config, NULL,
adapter->phy_interface, &lan743x_phylink_mac_ops);
@@ -3517,6 +3546,9 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&tx->ring_lock);
}
+ /* Ensure EEEEN is clear */
+ lan743x_mac_eee_enable(adapter, false);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 8ef897c114d3..7f73d66854be 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -1206,6 +1206,5 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
bool tx_enable, bool rx_enable);
int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
-void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 25cb2f61986f..1efa584e7107 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -443,7 +443,7 @@ int lan966x_stats_init(struct lan966x *lan966x);
void lan966x_port_config_down(struct lan966x_port *port);
void lan966x_port_config_up(struct lan966x_port *port);
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state);
int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x_port_config *config);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index 1d63903f9006..75188b99e4e7 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -88,11 +88,12 @@ static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
}
static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x_port *port = lan966x_pcs_to_port(pcs);
- lan966x_port_status_get(port, state);
+ lan966x_port_status_get(port, neg_mode, state);
}
static int lan966x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index fdfa4040d9ee..cf7de0267c32 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -284,7 +284,7 @@ void lan966x_port_config_up(struct lan966x_port *port)
lan966x_port_link_up(port);
}
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x *lan966x = port->lan966x;
@@ -314,7 +314,7 @@ void lan966x_port_status_get(struct lan966x_port *port,
bmsr |= BMSR_ANEGCOMPLETE;
lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
- phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lp_adv);
} else {
if (!state->link)
return;
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index 35b057c9d0cb..35e1c0cf345e 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -28,5 +28,6 @@ config SPARX5_DCB
config LAN969X_SWITCH
bool "Lan969x switch driver"
depends on SPARX5_SWITCH
+ select PAGE_POOL
help
This driver supports the lan969x family of network switch devices.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 4bf2a885a9da..d447f9e84d92 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -20,7 +20,9 @@ sparx5-switch-$(CONFIG_LAN969X_SWITCH) += lan969x/lan969x_regs.o \
lan969x/lan969x.o \
lan969x/lan969x_calendar.o \
lan969x/lan969x_vcap_ag_api.o \
- lan969x/lan969x_vcap_impl.o
+ lan969x/lan969x_vcap_impl.o \
+ lan969x/lan969x_rgmii.o \
+ lan969x/lan969x_fdma.o
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
index c2afa2176b08..f3a9c71bea36 100644
--- a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
@@ -90,9 +90,12 @@ static const struct sparx5_main_io_resource lan969x_main_iomap[] = {
{ TARGET_DEV2G5 + 27, 0x30d8000, 1 }, /* 0xe30d8000 */
{ TARGET_DEV10G + 9, 0x30dc000, 1 }, /* 0xe30dc000 */
{ TARGET_PCS10G_BR + 9, 0x30e0000, 1 }, /* 0xe30e0000 */
+ { TARGET_DEVRGMII, 0x30e4000, 1 }, /* 0xe30e4000 */
+ { TARGET_DEVRGMII + 1, 0x30e8000, 1 }, /* 0xe30e8000 */
{ TARGET_DSM, 0x30ec000, 1 }, /* 0xe30ec000 */
{ TARGET_PORT_CONF, 0x30f0000, 1 }, /* 0xe30f0000 */
{ TARGET_ASM, 0x3200000, 1 }, /* 0xe3200000 */
+ { TARGET_HSIO_WRAP, 0x3408000, 1 }, /* 0xe3408000 */
};
static struct sparx5_sdlb_group lan969x_sdlb_groups[LAN969X_SDLB_GRP_CNT] = {
@@ -329,6 +332,7 @@ static const struct sparx5_ops lan969x_ops = {
.is_port_5g = &lan969x_port_is_5g,
.is_port_10g = &lan969x_port_is_10g,
.is_port_25g = &lan969x_port_is_25g,
+ .is_port_rgmii = &lan969x_port_is_rgmii,
.get_port_dev_index = &lan969x_port_dev_mapping,
.get_port_dev_bit = &lan969x_get_dev_mode_bit,
.get_hsch_max_group_rate = &lan969x_get_hsch_max_group_rate,
@@ -336,6 +340,11 @@ static const struct sparx5_ops lan969x_ops = {
.set_port_mux = &lan969x_port_mux_set,
.ptp_irq_handler = &lan969x_ptp_irq_handler,
.dsm_calendar_calc = &lan969x_dsm_calendar_calc,
+ .port_config_rgmii = &lan969x_port_config_rgmii,
+ .fdma_init = &lan969x_fdma_init,
+ .fdma_deinit = &lan969x_fdma_deinit,
+ .fdma_poll = &lan969x_fdma_napi_poll,
+ .fdma_xmit = &lan969x_fdma_xmit,
};
const struct sparx5_match_data lan969x_desc = {
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
index 2489d0d32dfd..529fde3d4deb 100644
--- a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
@@ -59,7 +59,24 @@ static inline bool lan969x_port_is_25g(int portno)
return false;
}
+static inline bool lan969x_port_is_rgmii(int portno)
+{
+ return portno == 28 || portno == 29;
+}
+
/* lan969x_calendar.c */
int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+
+/* lan969x_rgmii.c */
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+/* lan969x_fdma.c */
+int lan969x_fdma_init(struct sparx5 *sparx5);
+int lan969x_fdma_deinit(struct sparx5 *sparx5);
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight);
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
+
#endif
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
new file mode 100644
index 000000000000..1282f5c3ee6d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries.
+ */
+#include <net/page_pool/helpers.h>
+
+#include "../sparx5_main.h"
+#include "../sparx5_main_regs.h"
+#include "../sparx5_port.h"
+
+#include "fdma_api.h"
+#include "lan969x.h"
+
+#define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
+
+static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ *dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr;
+
+ return 0;
+}
+
+static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+
+ *dataptr = page_pool_get_dma_addr(page);
+
+ return 0;
+}
+
+static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx)
+{
+ struct fdma *fdma = &tx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i)
+ if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i]))
+ return i;
+
+ return -ENOSPC;
+}
+
+static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+ struct sparx5_tx_buf *db;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&sparx5->tx_lock, flags);
+
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ db = &sparx5->tx.dbs[i];
+
+ if (!db->used)
+ continue;
+
+ if (!fdma_db_is_done(fdma_db_get(fdma, i, 0)))
+ continue;
+
+ db->dev->stats.tx_bytes += db->skb->len;
+ db->dev->stats.tx_packets++;
+ sparx5->tx.packets++;
+
+ dma_unmap_single(sparx5->dev,
+ db->dma_addr,
+ db->skb->len,
+ DMA_TO_DEVICE);
+
+ if (!db->ptp)
+ napi_consume_skb(db->skb, weight);
+
+ db->used = false;
+ }
+
+ spin_unlock_irqrestore(&sparx5->tx_lock, flags);
+}
+
+static void lan969x_fdma_free_pages(struct sparx5_rx *rx)
+{
+ struct fdma *fdma = &rx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i) {
+ for (int j = 0; j < fdma->n_dbs; ++j)
+ page_pool_put_full_page(rx->page_pool,
+ rx->page[i][j], false);
+ }
+}
+
+static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5,
+ struct sparx5_rx *rx)
+{
+ const struct sparx5_consts *consts = sparx5->data->consts;
+ struct fdma *fdma = &rx->fdma;
+ struct sparx5_port *port;
+ struct frame_info fi;
+ struct sk_buff *skb;
+ struct fdma_db *db;
+ struct page *page;
+
+ db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
+
+ sparx5_ifh_parse(sparx5, page_address(page), &fi);
+ port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] :
+ NULL;
+ if (WARN_ON(!port))
+ goto free_page;
+
+ skb = build_skb(page_address(page), fdma->db_size);
+ if (unlikely(!skb))
+ goto free_page;
+
+ skb_mark_for_recycle(skb);
+ skb_put(skb, fdma_db_len_get(db));
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ skb->dev = port->ndev;
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ skb->offload_fwd_mark = 1;
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+free_page:
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ return NULL;
+}
+
+static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct fdma *fdma = &rx->fdma;
+ int err;
+
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fdma->n_dcbs * fdma->n_dbs,
+ .nid = NUMA_NO_NODE,
+ .dev = sparx5->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = fdma->db_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ };
+
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool))
+ return PTR_ERR(rx->page_pool);
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ return 0;
+}
+
+static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ int err;
+
+ tx->dbs = kcalloc(fdma->n_dcbs,
+ sizeof(struct sparx5_tx_buf),
+ GFP_KERNEL);
+ if (!tx->dbs)
+ return -ENOMEM;
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err) {
+ kfree(tx->dbs);
+ return err;
+ }
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_DONE);
+
+ return 0;
+}
+
+static void lan969x_fdma_rx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->rx.fdma;
+
+ fdma->channel_id = FDMA_XTR_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+
+ /* Fetch a netdev for SKB and NAPI use, any will do */
+ for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
+ struct sparx5_port *port = sparx5->ports[idx];
+
+ if (port && port->ndev) {
+ sparx5->rx.ndev = port->ndev;
+ break;
+ }
+ }
+}
+
+static void lan969x_fdma_tx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+
+ fdma->channel_id = FDMA_INJ_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+}
+
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
+ struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
+ struct sk_buff *skb;
+
+ dcb_reload = fdma->dcb_index;
+
+ lan969x_fdma_tx_clear_buf(sparx5, weight);
+
+ /* Process RX data */
+ while (counter < weight) {
+ if (!fdma_has_frames(fdma))
+ break;
+
+ skb = lan969x_fdma_rx_get_frame(sparx5, rx);
+ if (!skb)
+ break;
+
+ napi_gro_receive(&rx->napi, skb);
+
+ fdma_db_advance(fdma);
+ counter++;
+ /* Check if the DCB can be reused */
+ if (fdma_dcb_is_reusable(fdma))
+ continue;
+
+ fdma_db_reset(fdma);
+ fdma_dcb_advance(fdma);
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
+ dcb_reload++;
+ /* n_dcbs must be a power of 2 */
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma,
+ old_dcb,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ sparx5_fdma_reload(sparx5, fdma);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int next_dcb, needed_headroom, needed_tailroom, err;
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ struct sparx5_tx_buf *db_buf;
+ u64 status;
+
+ next_dcb = lan969x_fdma_get_next_dcb(tx);
+ if (next_dcb < 0)
+ return -EBUSY;
+
+ needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ }
+
+ skb_push(skb, IFH_LEN * 4);
+ memcpy(skb->data, ifh, IFH_LEN * 4);
+ skb_put(skb, ETH_FCS_LEN);
+
+ db_buf = &tx->dbs[next_dcb];
+ db_buf->dma_addr = dma_map_single(sparx5->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sparx5->dev, db_buf->dma_addr))
+ return -ENOMEM;
+
+ db_buf->dev = dev;
+ db_buf->skb = skb;
+ db_buf->ptp = false;
+ db_buf->used = true;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ db_buf->ptp = true;
+
+ status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len) |
+ FDMA_DCB_STATUS_INTR;
+
+ fdma_dcb_advance(fdma);
+ fdma_dcb_add(fdma, next_dcb, 0, status);
+
+ sparx5_fdma_reload(sparx5, fdma);
+
+ return NETDEV_TX_OK;
+}
+
+int lan969x_fdma_init(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ int err;
+
+ lan969x_fdma_rx_init(sparx5);
+ lan969x_fdma_tx_init(sparx5);
+ sparx5_fdma_injection_mode(sparx5);
+
+ err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask");
+ return err;
+ }
+
+ err = lan969x_fdma_rx_alloc(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ err = lan969x_fdma_tx_alloc(sparx5);
+ if (err) {
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ /* Reset FDMA state */
+ spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
+ spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
+
+ return err;
+}
+
+int lan969x_fdma_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ sparx5_fdma_stop(sparx5);
+ fdma_free_coherent(sparx5->dev, &tx->fdma);
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ lan969x_fdma_free_pages(rx);
+ page_pool_destroy(rx->page_pool);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
new file mode 100644
index 000000000000..4e422ca50828
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+/* Tx clock selectors */
+#define LAN969X_RGMII_TX_CLK_SEL_125MHZ 1 /* 1000Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_25MHZ 2 /* 100Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_2M5MHZ 3 /* 10Mbps */
+
+/* Port speed selectors */
+#define LAN969X_RGMII_SPEED_SEL_10 0 /* Select 10Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_100 1 /* Select 100Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_1000 2 /* Select 1000Mbps speed */
+
+/* Clock delay selectors */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS 2 /* Phase shift 45deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS 3 /* Phase shift 77deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS 4 /* Phase shift 90deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS 5 /* Phase shift 112deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS 6 /* Phase shift 135deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS 7 /* Phase shift 147deg */
+
+#define LAN969X_RGMII_PORT_START_IDX 28 /* Index of the first RGMII port */
+#define LAN969X_RGMII_IFG_TX 4 /* TX Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX1 5 /* RX1 Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX2 1 /* RX2 Inter Frame Gap value */
+
+#define RGMII_PORT_IDX(port) ((port)->portno - LAN969X_RGMII_PORT_START_IDX)
+
+/* Get the tx clock selector based on the port speed. */
+static int lan969x_rgmii_get_clk_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_TX_CLK_SEL_2M5MHZ :
+ speed == SPEED_100 ? LAN969X_RGMII_TX_CLK_SEL_25MHZ :
+ LAN969X_RGMII_TX_CLK_SEL_125MHZ);
+}
+
+/* Get the port speed selector based on the port speed. */
+static int lan969x_rgmii_get_speed_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_SPEED_SEL_10 :
+ speed == SPEED_100 ? LAN969X_RGMII_SPEED_SEL_100 :
+ LAN969X_RGMII_SPEED_SEL_1000);
+}
+
+/* Get the clock delay selector based on the clock delay in picoseconds. */
+static int lan969x_rgmii_get_clk_delay_sel(struct sparx5_port *port,
+ u32 delay_ps, u32 *clk_delay_sel)
+{
+ switch (delay_ps) {
+ case 0:
+ /* Hardware default selector. */
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 1000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS;
+ break;
+ case 1700:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS;
+ break;
+ case 2000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS;
+ break;
+ case 2500:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 3000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS;
+ break;
+ case 3300:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS;
+ break;
+ default:
+ dev_err(port->sparx5->dev, "Invalid RGMII delay: %u", delay_ps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Configure the RGMII tx clock frequency. */
+static void lan969x_rgmii_tx_clk_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_sel = lan969x_rgmii_get_clk_sel(conf->speed);
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Take the RGMII clock domain out of reset and set tx clock
+ * frequency.
+ */
+ spx5_rmw(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(clk_sel) |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(0) |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(0),
+ HSIO_WRAP_RGMII_CFG_TX_CLK_CFG |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST,
+ port->sparx5, HSIO_WRAP_RGMII_CFG(idx));
+}
+
+/* Configure the RGMII port device. */
+static void lan969x_rgmii_port_device_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 dtag, dotag, etype, speed_sel, idx = RGMII_PORT_IDX(port);
+
+ speed_sel = lan969x_rgmii_get_speed_sel(conf->speed);
+
+ etype = (port->vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+ port->custom_etype :
+ port->vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+ ETH_P_8021Q : ETH_P_8021AD);
+
+ dtag = port->max_vlan_tags == SPX5_PORT_MAX_TAGS_TWO;
+ dotag = port->max_vlan_tags != SPX5_PORT_MAX_TAGS_NONE;
+
+ /* Enable the MAC. */
+ spx5_wr(DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(1),
+ port->sparx5, DEVRGMII_MAC_ENA_CFG(idx));
+
+ /* Configure the Inter Frame Gap. */
+ spx5_wr(DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(LAN969X_RGMII_IFG_TX) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(LAN969X_RGMII_IFG_RX1) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(LAN969X_RGMII_IFG_RX2),
+ port->sparx5, DEVRGMII_MAC_IFG_CFG(idx));
+
+ /* Configure port data rate. */
+ spx5_wr(DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(speed_sel),
+ port->sparx5, DEVRGMII_DEV_RST_CTRL(idx));
+
+ /* Configure VLAN awareness. */
+ spx5_wr(DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+ port->sparx5,
+ DEVRGMII_MAC_TAGS_CFG(idx));
+}
+
+/* Configure the RGMII delay lines in the MAC.
+ *
+ * We use the rx-internal-delay-ps" and "tx-internal-delay-ps" properties to
+ * configure the rx and tx delays for the MAC. If these properties are missing
+ * or set to zero, the MAC will not apply any delay.
+ *
+ * The PHY side delays are determined by the PHY mode
+ * (e.g. PHY_INTERFACE_MODE_RGMII_{ID, RXID, TXID}), and ignored by the MAC side
+ * entirely.
+ */
+static int lan969x_rgmii_delay_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 tx_clk_sel, rx_clk_sel, tx_delay_ps = 0, rx_delay_ps = 0;
+ u32 idx = RGMII_PORT_IDX(port);
+ int err;
+
+ of_property_read_u32(port->of_node, "rx-internal-delay-ps",
+ &rx_delay_ps);
+
+ of_property_read_u32(port->of_node, "tx-internal-delay-ps",
+ &tx_delay_ps);
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, rx_delay_ps, &rx_clk_sel);
+ if (err)
+ return err;
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, tx_delay_ps, &tx_clk_sel);
+ if (err)
+ return err;
+
+ /* Configure rx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!rx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(rx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 0));
+
+ /* Configure tx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!tx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(tx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 1));
+
+ return 0;
+}
+
+/* Configure GPIO's to be used as RGMII interface. */
+static void lan969x_rgmii_gpio_config(struct sparx5_port *port)
+{
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Enable the RGMII on the GPIOs. */
+ spx5_wr(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(1), port->sparx5,
+ HSIO_WRAP_XMII_CFG(!idx));
+}
+
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ int err;
+
+ err = lan969x_rgmii_delay_config(port, conf);
+ if (err)
+ return err;
+
+ lan969x_rgmii_tx_clk_config(port, conf);
+ lan969x_rgmii_gpio_config(port);
+ lan969x_rgmii_port_device_config(port, conf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 0027144a2af2..dbe86f937b21 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -18,9 +18,6 @@
#include "sparx5_main.h"
#include "sparx5_port.h"
-#define FDMA_XTR_CHANNEL 6
-#define FDMA_INJ_CHANNEL 0
-
#define FDMA_XTR_BUFFER_SIZE 2048
#define FDMA_WEIGHT 4
@@ -133,7 +130,7 @@ static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *t
sparx5, FDMA_CH_ACTIVATE);
}
-static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
{
/* Reload the RX channel */
spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD);
@@ -183,7 +180,7 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
return true;
}
-static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
{
struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
@@ -213,11 +210,11 @@ static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
return counter;
}
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
{
struct sparx5_tx *tx = &sparx5->tx;
struct fdma *fdma = &tx->fdma;
- static bool first_time = true;
void *virt_addr;
fdma_dcb_advance(fdma);
@@ -238,12 +235,8 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4));
- if (first_time) {
- sparx5_fdma_tx_activate(sparx5, tx);
- first_time = false;
- } else {
- sparx5_fdma_reload(sparx5, fdma);
- }
+ sparx5_fdma_reload(sparx5, fdma);
+
return NETDEV_TX_OK;
}
@@ -260,10 +253,6 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
FDMA_DCB_STATUS_INTR);
- netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
- FDMA_WEIGHT);
- napi_enable(&rx->napi);
- sparx5_fdma_rx_activate(sparx5, rx);
return 0;
}
@@ -348,7 +337,7 @@ irqreturn_t sparx5_fdma_handler(int irq, void *args)
return IRQ_HANDLED;
}
-static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
{
const int byte_swap = 1;
int portno;
@@ -410,7 +399,7 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
}
}
-int sparx5_fdma_start(struct sparx5 *sparx5)
+int sparx5_fdma_init(struct sparx5 *sparx5)
{
int err;
@@ -443,24 +432,55 @@ int sparx5_fdma_start(struct sparx5 *sparx5)
return err;
}
+int sparx5_fdma_deinit(struct sparx5 *sparx5)
+{
+ sparx5_fdma_stop(sparx5);
+ fdma_free_phys(&sparx5->rx.fdma);
+ fdma_free_phys(&sparx5->tx.fdma);
+
+ return 0;
+}
+
static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5)
{
return spx5_rd(sparx5, FDMA_PORT_CTRL(0));
}
+int sparx5_fdma_start(struct sparx5 *sparx5)
+{
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ netif_napi_add_weight(rx->ndev,
+ &rx->napi,
+ ops->fdma_poll,
+ FDMA_WEIGHT);
+
+ napi_enable(&rx->napi);
+
+ sparx5_fdma_rx_activate(sparx5, rx);
+ sparx5_fdma_tx_activate(sparx5, tx);
+
+ return 0;
+}
+
int sparx5_fdma_stop(struct sparx5 *sparx5)
{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
u32 val;
- napi_disable(&sparx5->rx.napi);
+ napi_disable(&rx->napi);
+
/* Stop the fdma and channel interrupts */
- sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx);
- sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx);
+ sparx5_fdma_rx_deactivate(sparx5, rx);
+ sparx5_fdma_tx_deactivate(sparx5, tx);
+
/* Wait for the RX channel to stop */
read_poll_timeout(sparx5_fdma_port_ctrl, val,
FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
500, 10000, 0, sparx5);
- fdma_free_phys(&sparx5->rx.fdma);
- fdma_free_phys(&sparx5->tx.fdma);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index f61aa15beab7..6a0e5b83ecd0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -313,10 +313,13 @@ static int sparx5_create_port(struct sparx5 *sparx5,
struct initial_port_config *config)
{
struct sparx5_port *spx5_port;
+ const struct sparx5_ops *ops;
struct net_device *ndev;
struct phylink *phylink;
int err;
+ ops = sparx5->data->ops;
+
ndev = sparx5_create_netdev(sparx5, config->portno);
if (IS_ERR(ndev)) {
dev_err(sparx5->dev, "Could not create net device: %02u\n",
@@ -357,6 +360,9 @@ static int sparx5_create_port(struct sparx5 *sparx5,
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+ if (ops->is_port_rgmii(spx5_port->portno))
+ phy_interface_set_rgmii(spx5_port->phylink_config.supported_interfaces);
+
__set_bit(PHY_INTERFACE_MODE_SGMII,
spx5_port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
@@ -778,15 +784,19 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Start Frame DMA with fallback to register based INJ/XTR */
err = -ENXIO;
- if (sparx5->fdma_irq >= 0 && is_sparx5(sparx5)) {
- if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
+ if (sparx5->fdma_irq >= 0) {
+ if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0 ||
+ !is_sparx5(sparx5))
err = devm_request_irq(sparx5->dev,
sparx5->fdma_irq,
sparx5_fdma_handler,
0,
"sparx5-fdma", sparx5);
- if (!err)
- err = sparx5_fdma_start(sparx5);
+ if (!err) {
+ err = ops->fdma_init(sparx5);
+ if (!err)
+ sparx5_fdma_start(sparx5);
+ }
if (err)
sparx5->fdma_irq = -ENXIO;
} else {
@@ -830,6 +840,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
struct initial_port_config *configs, *config;
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
+ const struct sparx5_ops *ops;
struct reset_control *reset;
struct sparx5 *sparx5;
int idx = 0, err = 0;
@@ -851,6 +862,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
return -EINVAL;
regs = sparx5->data->regs;
+ ops = sparx5->data->ops;
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
@@ -880,7 +892,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
for_each_available_child_of_node(ports, portnp) {
struct sparx5_port_config *conf;
- struct phy *serdes;
+ struct phy *serdes = NULL;
u32 portno;
err = of_property_read_u32(portnp, "reg", &portno);
@@ -910,13 +922,17 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
conf->sd_sgpio = ~0;
else
sparx5->sd_sgpio_remapping = true;
- serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
- if (IS_ERR(serdes)) {
- err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
- "port %u: missing serdes\n",
- portno);
- of_node_put(portnp);
- goto cleanup_config;
+ /* There is no SerDes node for RGMII ports. */
+ if (!ops->is_port_rgmii(portno)) {
+ serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = dev_err_probe(sparx5->dev,
+ PTR_ERR(serdes),
+ "port %u: missing serdes\n",
+ portno);
+ of_node_put(portnp);
+ goto cleanup_config;
+ }
}
config->portno = portno;
config->node = portnp;
@@ -1014,6 +1030,7 @@ cleanup_pnode:
static void mchp_sparx5_remove(struct platform_device *pdev)
{
struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+ const struct sparx5_ops *ops = sparx5->data->ops;
debugfs_remove_recursive(sparx5->debugfs_root);
if (sparx5->xtr_irq) {
@@ -1025,7 +1042,7 @@ static void mchp_sparx5_remove(struct platform_device *pdev)
sparx5->fdma_irq = -ENXIO;
}
sparx5_ptp_deinit(sparx5);
- sparx5_fdma_stop(sparx5);
+ ops->fdma_deinit(sparx5);
sparx5_cleanup_ports(sparx5);
sparx5_vcap_destroy(sparx5);
/* Unregister netdevs */
@@ -1072,6 +1089,7 @@ static const struct sparx5_ops sparx5_ops = {
.is_port_5g = &sparx5_port_is_5g,
.is_port_10g = &sparx5_port_is_10g,
.is_port_25g = &sparx5_port_is_25g,
+ .is_port_rgmii = &sparx5_port_is_rgmii,
.get_port_dev_index = &sparx5_port_dev_mapping,
.get_port_dev_bit = &sparx5_port_dev_mapping,
.get_hsch_max_group_rate = &sparx5_get_hsch_max_group_rate,
@@ -1079,6 +1097,10 @@ static const struct sparx5_ops sparx5_ops = {
.set_port_mux = &sparx5_port_mux_set,
.ptp_irq_handler = &sparx5_ptp_irq_handler,
.dsm_calendar_calc = &sparx5_dsm_calendar_calc,
+ .fdma_init = &sparx5_fdma_init,
+ .fdma_deinit = &sparx5_fdma_deinit,
+ .fdma_poll = &sparx5_fdma_napi_callback,
+ .fdma_xmit = &sparx5_fdma_xmit,
};
static const struct sparx5_match_data sparx5_desc = {
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index d5dd953b0a71..fe7d8bcc0cd9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -112,6 +112,8 @@ enum sparx5_feature {
#define XTR_QUEUE 0
#define INJ_QUEUE 0
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
#define FDMA_DCB_MAX 64
#define FDMA_RX_DCB_MAX_DBS 15
#define FDMA_TX_DCB_MAX_DBS 1
@@ -157,11 +159,25 @@ struct sparx5_calendar_data {
*/
struct sparx5_rx {
struct fdma fdma;
- struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page_pool *page_pool;
+ union {
+ struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ };
dma_addr_t dma;
struct napi_struct napi;
struct net_device *ndev;
u64 packets;
+ u8 page_order;
+};
+
+/* Used to store information about TX buffers. */
+struct sparx5_tx_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
};
/* Frame DMA transmit state:
@@ -169,6 +185,7 @@ struct sparx5_rx {
*/
struct sparx5_tx {
struct fdma fdma;
+ struct sparx5_tx_buf *dbs;
u64 packets;
u64 dropped;
};
@@ -313,6 +330,7 @@ struct sparx5_ops {
bool (*is_port_5g)(int portno);
bool (*is_port_10g)(int portno);
bool (*is_port_25g)(int portno);
+ bool (*is_port_rgmii)(int portno);
u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port);
u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port);
u32 (*get_hsch_max_group_rate)(int grp);
@@ -323,6 +341,13 @@ struct sparx5_ops {
irqreturn_t (*ptp_irq_handler)(int irq, void *args);
int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+ int (*port_config_rgmii)(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+ int (*fdma_init)(struct sparx5 *sparx5);
+ int (*fdma_deinit)(struct sparx5 *sparx5);
+ int (*fdma_poll)(struct napi_struct *napi, int weight);
+ int (*fdma_xmit)(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
};
struct sparx5_main_io_resource {
@@ -433,10 +458,16 @@ int sparx5_manual_injection_mode(struct sparx5 *sparx5);
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
/* sparx5_fdma.c */
+int sparx5_fdma_init(struct sparx5 *sparx5);
+int sparx5_fdma_deinit(struct sparx5 *sparx5);
int sparx5_fdma_start(struct sparx5 *sparx5);
int sparx5_fdma_stop(struct sparx5 *sparx5);
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb);
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight);
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
irqreturn_t sparx5_fdma_handler(int irq, void *args);
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma);
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5);
/* sparx5_mactable.c */
void sparx5_mact_pull_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 561344f19062..d9ef4ef137b8 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -37,6 +37,7 @@ enum sparx5_target {
TARGET_FDMA = 117,
TARGET_GCB = 118,
TARGET_HSCH = 119,
+ TARGET_HSIO_WRAP = 120,
TARGET_LRN = 122,
TARGET_PCEP = 129,
TARGET_PCS10G_BR = 132,
@@ -54,6 +55,7 @@ enum sparx5_target {
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
+ TARGET_DEVRGMII = 392,
NUM_TARGETS = 517
};
@@ -5367,6 +5369,69 @@ extern const struct sparx5_regs *regs;
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:XMII_CFG */
+#define HSIO_WRAP_XMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 0, 0, 1, 4)
+
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG GENMASK(2, 1)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:RGMII_CFG */
+#define HSIO_WRAP_RGMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 4, 0, 1, 4)
+
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST BIT(1)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST BIT(0)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:DLL_CFG */
+#define HSIO_WRAP_DLL_CFG(g, r) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 12, r, 2, 4)
+
+#define HSIO_WRAP_DLL_CFG_DLL_ENA BIT(19)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA BIT(18)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL GENMASK(17, 15)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_RST BIT(0)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+
/* LRN:COMMON:COMMON_ACCESS_CTRL */
#define LRN_COMMON_ACCESS_CTRL \
__REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
@@ -8110,4 +8175,84 @@ extern const struct sparx5_regs *regs;
#define XQS_CNT(g) \
__REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+/* LAN969X ONLY */
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEVRGMII_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEVRGMII_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEVRGMII_MAC_TAGS_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEVRGMII_MAC_IFG_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+
#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index b6f635d85820..138ac58fae51 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -232,9 +232,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
+ const struct sparx5_ops *ops;
u32 ifh[IFH_LEN];
netdev_tx_t ret;
+ ops = sparx5->data->ops;
+
memset(ifh, 0, IFH_LEN * 4);
sparx5_set_port_ifh(sparx5, ifh, port->portno);
@@ -254,7 +257,7 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
- ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ ret = ops->fdma_xmit(sparx5, ifh, skb, dev);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
spin_unlock(&sparx5->tx_lock);
@@ -264,6 +267,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
if (ret < 0)
goto drop;
+ if (!is_sparx5(sparx5))
+ /* When lan969x and TX_OK, stats and SKB consumption is handled
+ * in the TX completion loop, so dont go any further.
+ */
+ return NETDEV_TX_OK;
+
stats->tx_bytes += skb->len;
stats->tx_packets++;
sparx5->tx.packets++;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index f8562c1a894d..cfb4b2e17ace 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -32,7 +32,19 @@ sparx5_phylink_mac_select_pcs(struct phylink_config *config,
{
struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
- return &port->phylink_pcs;
+ /* Return the PCS for all the modes that require it. */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ return &port->phylink_pcs;
+ default:
+ return NULL;
+ }
}
static void sparx5_phylink_mac_config(struct phylink_config *config,
@@ -77,7 +89,7 @@ static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct sparx5_port, phylink_pcs);
}
-static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct sparx5_port *port = sparx5_pcs_to_port(pcs);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index f9d1a6bb9bff..04bc8fffaf96 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -257,6 +257,15 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
conf->speed != SPEED_25000))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ if (conf->speed != SPEED_1000 &&
+ conf->speed != SPEED_100 &&
+ conf->speed != SPEED_10)
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
default:
return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
}
@@ -994,6 +1003,7 @@ int sparx5_port_config(struct sparx5 *sparx5,
struct sparx5_port *port,
struct sparx5_port_config *conf)
{
+ bool rgmii = phy_interface_mode_is_rgmii(conf->phy_mode);
bool high_speed_dev = sparx5_is_baser(conf->portmode);
const struct sparx5_ops *ops = sparx5->data->ops;
int err, urgency, stop_wm;
@@ -1002,8 +1012,14 @@ int sparx5_port_config(struct sparx5 *sparx5,
if (err)
return err;
+ if (rgmii) {
+ err = ops->port_config_rgmii(port, conf);
+ if (err)
+ return err;
+ }
+
/* high speed device is already configured */
- if (!high_speed_dev)
+ if (!rgmii && !high_speed_dev)
sparx5_port_config_low_set(sparx5, port, conf);
/* Configure flow control */
@@ -1067,24 +1083,6 @@ int sparx5_port_init(struct sparx5 *sparx5,
if (err)
return err;
- /* Configure MAC vlan awareness */
- err = sparx5_port_max_tags_set(sparx5, port);
- if (err)
- return err;
-
- /* Set Max Length */
- spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
- DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
- sparx5,
- DEV2G5_MAC_MAXLEN_CFG(port->portno));
-
- /* 1G/2G5: Signal Detect configuration */
- spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
- DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
- DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
- sparx5,
- DEV2G5_PCS1G_SD_CFG(port->portno));
-
/* Set Pause WM hysteresis */
spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
@@ -1108,6 +1106,27 @@ int sparx5_port_init(struct sparx5 *sparx5,
ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
sparx5, ANA_CL_FILTER_CTRL(port->portno));
+ if (ops->is_port_rgmii(port->portno))
+ return 0; /* RGMII device - nothing more to configure */
+
+ /* Configure MAC vlan awareness */
+ err = sparx5_port_max_tags_set(sparx5, port);
+ if (err)
+ return err;
+
+ /* Set Max Length */
+ spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+ sparx5,
+ DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+ /* 1G/2G5: Signal Detect configuration */
+ spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV2G5_PCS1G_SD_CFG(port->portno));
+
if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
conf->portmode == PHY_INTERFACE_MODE_SGMII) {
err = sparx5_serdes_set(sparx5, port, conf);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index 9b9bcc6834bc..c8a37468a3d1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -40,6 +40,11 @@ static inline bool sparx5_port_is_25g(int portno)
return portno >= 56 && portno <= 63;
}
+static inline bool sparx5_port_is_rgmii(int portno)
+{
+ return false;
+}
+
static inline u32 sparx5_to_high_dev(struct sparx5 *sparx5, int port)
{
const struct sparx5_ops *ops = sparx5->data->ops;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 2dc0c6ad54be..be95336ce089 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1656,9 +1656,9 @@ static int __init mana_driver_init(void)
static void __exit mana_driver_exit(void)
{
- debugfs_remove(mana_debugfs_root);
-
pci_unregister_driver(&mana_driver);
+
+ debugfs_remove(mana_debugfs_root);
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 558e03301aa8..7663d196eaf8 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -758,12 +758,13 @@ static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct ocelot_dump_ctx *dump = data;
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -992,6 +993,16 @@ static int ocelot_port_get_ts_info(struct net_device *dev,
return ocelot_get_ts_info(ocelot, port, info);
}
+static void ocelot_port_ts_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_port_get_strings,
.get_ethtool_stats = ocelot_port_get_ethtool_stats,
@@ -999,6 +1010,7 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ocelot_port_get_ts_info,
+ .get_ts_stats = ocelot_port_ts_stats,
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 808ce8e68d39..cc1088988da0 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -680,9 +680,14 @@ static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port,
skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time +
OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) {
- dev_warn_ratelimited(ocelot->dev,
- "port %d invalidating stale timestamp ID %u which seems lost\n",
- port, OCELOT_SKB_CB(skb)->ts_id);
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->lost++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d invalidating stale timestamp ID %u which seems lost\n",
+ port, OCELOT_SKB_CB(skb)->ts_id);
+
__skb_unlink(skb, &ocelot_port->tx_skbs);
kfree_skb(skb);
ocelot->ptp_skbs_in_flight--;
@@ -748,13 +753,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
return 0;
ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return -EINVAL;
+ if (ptp_class == PTP_CLASS_NONE) {
+ err = -EINVAL;
+ goto error;
+ }
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->onestep_pkts_unconfirmed++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
return 0;
}
@@ -764,14 +776,16 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
*clone = skb_clone_sk(skb);
- if (!(*clone))
- return -ENOMEM;
+ if (!(*clone)) {
+ err = -ENOMEM;
+ goto error;
+ }
/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone);
if (err) {
kfree_skb(*clone);
- return err;
+ goto error;
}
skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -780,6 +794,12 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
}
return 0;
+
+error:
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+ return err;
}
EXPORT_SYMBOL(ocelot_port_txtstamp_request);
@@ -816,6 +836,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
while (budget--) {
struct skb_shared_hwtstamps shhwtstamps;
+ struct ocelot_port *ocelot_port;
u32 val, id, seqid, txport;
struct sk_buff *skb_match;
struct timespec64 ts;
@@ -832,17 +853,27 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
+ ocelot_port = ocelot->ports[txport];
/* Retrieve its associated skb */
skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id,
seqid);
if (!skb_match) {
- dev_warn_ratelimited(ocelot->dev,
- "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n",
- txport, seqid, id);
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n",
+ txport, seqid, id);
+
goto next_ts;
}
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->pkts++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index c018783757fb..545710dadcf5 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -821,6 +821,26 @@ void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
+void ocelot_port_get_ts_stats(struct ocelot *ocelot, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_ts_stats *stats = ocelot_port->ts_stats;
+ unsigned int start;
+
+ if (!ocelot->ptp)
+ return;
+
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ ts_stats->pkts = stats->pkts;
+ ts_stats->onestep_pkts_unconfirmed = stats->onestep_pkts_unconfirmed;
+ ts_stats->lost = stats->lost;
+ ts_stats->err = stats->err;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_ts_stats);
+
void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
struct rtnl_link_stats64 *stats)
{
@@ -960,6 +980,23 @@ int ocelot_stats_init(struct ocelot *ocelot)
if (!ocelot->stats)
return -ENOMEM;
+ if (ocelot->ptp) {
+ for (int port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ ocelot_port->ts_stats = devm_kzalloc(ocelot->dev,
+ sizeof(*ocelot_port->ts_stats),
+ GFP_KERNEL);
+ if (!ocelot_port->ts_stats)
+ return -ENOMEM;
+
+ u64_stats_init(&ocelot_port->ts_stats->syncp);
+ }
+ }
+
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 9d97cd281f18..c03558adda91 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
map_id_full = be64_to_cpu(cbe->map_ptr);
map_id = map_id_full;
- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ if (size_add(pkt_size, data_size) > INT_MAX ||
+ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 98e098c09c03..abba165738a3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2779,7 +2779,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
break;
}
- netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+ netdev->watchdog_timeo = secs_to_jiffies(5);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 1c61390677f7..04f00ea94230 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,8 +18,6 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define IONIC_ASIC_TYPE_ELBA 2
-
#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
@@ -59,7 +57,6 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
cpumask_var_t *affinity_masks;
struct delayed_work doorbell_check_dwork;
- struct work_struct nb_work;
struct notifier_block nb;
struct rw_semaphore vf_op_lock; /* lock for VF operations */
struct ionic_vf *vfs;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 9b7f78b6cdb1..a2d4336d2766 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -158,6 +158,20 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
25000baseCR_Full);
copper_seen++;
break;
+ case IONIC_XCVR_PID_QSFP_50G_CR2_FC:
+ case IONIC_XCVR_PID_QSFP_50G_CR2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 200000baseCR4_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 400000baseCR4_Full);
+ copper_seen++;
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_AOC:
case IONIC_XCVR_PID_SFP_10GBASE_CU:
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -196,6 +210,31 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
+ case IONIC_XCVR_PID_QSFP_200G_AOC:
+ case IONIC_XCVR_PID_QSFP_200G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseSR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseSR4_Full);
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_SR:
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
@@ -929,6 +968,7 @@ static int ionic_get_module_info(struct net_device *netdev,
break;
case SFF8024_ID_QSFP_8436_8636:
case SFF8024_ID_QSFP28_8636:
+ case SFF8024_ID_QSFP_PLUS_CMIS:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9c85c0706c6e..830c8adbfbee 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1277,7 +1277,10 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3,
IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4,
IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5,
-
+ IONIC_XCVR_PID_QSFP_50G_CR2_FC = 6,
+ IONIC_XCVR_PID_QSFP_50G_CR2 = 7,
+ IONIC_XCVR_PID_QSFP_200G_CR4 = 8,
+ IONIC_XCVR_PID_QSFP_400G_CR4 = 9,
/* Fiber */
IONIC_XCVR_PID_QSFP_100G_AOC = 50,
IONIC_XCVR_PID_QSFP_100G_ACC = 51,
@@ -1303,6 +1306,15 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
IONIC_XCVR_PID_SFP_10GBASE_T = 72,
IONIC_XCVR_PID_SFP_1000BASE_T = 73,
+ IONIC_XCVR_PID_QSFP_200G_AOC = 74,
+ IONIC_XCVR_PID_QSFP_200G_FR4 = 75,
+ IONIC_XCVR_PID_QSFP_200G_DR4 = 76,
+ IONIC_XCVR_PID_QSFP_200G_SR4 = 77,
+ IONIC_XCVR_PID_QSFP_200G_ACC = 78,
+ IONIC_XCVR_PID_QSFP_400G_FR4 = 79,
+ IONIC_XCVR_PID_QSFP_400G_DR4 = 80,
+ IONIC_XCVR_PID_QSFP_400G_SR4 = 81,
+ IONIC_XCVR_PID_QSFP_400G_VR4 = 82,
};
/**
@@ -1404,6 +1416,8 @@ struct ionic_xcvr_status {
*/
union ionic_port_config {
struct {
+#define IONIC_SPEED_400G 400000 /* 400G in Mbps */
+#define IONIC_SPEED_200G 200000 /* 200G in Mbps */
#define IONIC_SPEED_100G 100000 /* 100G in Mbps */
#define IONIC_SPEED_50G 50000 /* 50G in Mbps */
#define IONIC_SPEED_40G 40000 /* 40G in Mbps */
@@ -3209,7 +3223,11 @@ union ionic_adminq_comp {
#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000
#define IONIC_DEV_CMD_DONE 0x00000001
-#define IONIC_ASIC_TYPE_CAPRI 0
+#define IONIC_ASIC_TYPE_NONE 0
+#define IONIC_ASIC_TYPE_CAPRI 1
+#define IONIC_ASIC_TYPE_ELBA 2
+#define IONIC_ASIC_TYPE_GIGLIO 3
+#define IONIC_ASIC_TYPE_SALINA 4
/**
* struct ionic_doorbell - Doorbell register layout
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 3d3f936779f7..7707a9e53c43 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3265,7 +3265,7 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
- le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
+ le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
lif->neqs = ionic->neqs_per_lif;
lif->nxqs = ionic->ntxqs_per_lif;
@@ -3804,10 +3804,6 @@ err_out_adminq_deinit:
return err;
}
-static void ionic_lif_notify_work(struct work_struct *ws)
-{
-}
-
static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
{
struct ionic_admin_ctx ctx = {
@@ -3858,8 +3854,6 @@ int ionic_lif_register(struct ionic_lif *lif)
ionic_lif_register_phc(lif);
- INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
-
lif->ionic->nb.notifier_call = ionic_lif_notify;
err = register_netdevice_notifier(&lif->ionic->nb);
@@ -3885,7 +3879,6 @@ void ionic_lif_unregister(struct ionic_lif *lif)
{
if (lif->ionic->nb.notifier_call) {
unregister_netdevice_notifier(&lif->ionic->nb);
- cancel_work_sync(&lif->ionic->nb_work);
lif->ionic->nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 0f817c3f92d8..daf1e82cb76b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -81,8 +81,9 @@ static int ionic_error_to_errno(enum ionic_status_code code)
case IONIC_RC_EQTYPE:
case IONIC_RC_EQID:
case IONIC_RC_EINVAL:
- case IONIC_RC_ENOSUPP:
return -EINVAL;
+ case IONIC_RC_ENOSUPP:
+ return -EOPNOTSUPP;
case IONIC_RC_EPERM:
return -EPERM;
case IONIC_RC_ENOENT:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 9cff0a8ffb2c..3383ee1dad14 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2832,7 +2832,7 @@ netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2860,7 +2860,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
static ssize_t
netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2901,7 +2901,7 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2922,7 +2922,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2946,20 +2946,20 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read = netxen_sysfs_read_crb,
- .write = netxen_sysfs_write_crb,
+ .read_new = netxen_sysfs_read_crb,
+ .write_new = netxen_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read = netxen_sysfs_read_mem,
- .write = netxen_sysfs_write_mem,
+ .read_new = netxen_sysfs_read_mem,
+ .write_new = netxen_sysfs_write_mem,
};
static ssize_t
netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3082,7 +3082,7 @@ out:
static const struct bin_attribute bin_attr_dimm = {
.attr = { .name = "dimm", .mode = 0644 },
.size = sizeof(struct netxen_dimm_cfg),
- .read = netxen_sysfs_read_dimm,
+ .read_new = netxen_sysfs_read_dimm,
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 74125188beb8..c0f20464fd1e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -264,7 +264,7 @@ static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -281,7 +281,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -310,7 +310,7 @@ static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -332,7 +332,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -396,7 +396,7 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -446,7 +446,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -539,7 +539,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -623,7 +623,7 @@ out:
static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -675,7 +675,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -722,7 +722,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -769,7 +769,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -804,7 +804,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -839,7 +839,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -868,7 +868,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -898,7 +898,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -938,7 +938,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1115,7 +1115,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1195,64 +1195,63 @@ static const struct device_attribute dev_attr_beacon = {
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_crb,
- .write = qlcnic_sysfs_write_crb,
+ .read_new = qlcnic_sysfs_read_crb,
+ .write_new = qlcnic_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_mem,
- .write = qlcnic_sysfs_write_mem,
+ .read_new = qlcnic_sysfs_read_mem,
+ .write_new = qlcnic_sysfs_write_mem,
};
static const struct bin_attribute bin_attr_npar_config = {
.attr = { .name = "npar_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_npar_config,
- .write = qlcnic_sysfs_write_npar_config,
+ .read_new = qlcnic_sysfs_read_npar_config,
+ .write_new = qlcnic_sysfs_write_npar_config,
};
static const struct bin_attribute bin_attr_pci_config = {
.attr = { .name = "pci_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_pci_config,
- .write = NULL,
+ .read_new = qlcnic_sysfs_read_pci_config,
};
static const struct bin_attribute bin_attr_port_stats = {
.attr = { .name = "port_stats", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_get_port_stats,
- .write = qlcnic_sysfs_clear_port_stats,
+ .read_new = qlcnic_sysfs_get_port_stats,
+ .write_new = qlcnic_sysfs_clear_port_stats,
};
static const struct bin_attribute bin_attr_esw_stats = {
.attr = { .name = "esw_stats", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_get_esw_stats,
- .write = qlcnic_sysfs_clear_esw_stats,
+ .read_new = qlcnic_sysfs_get_esw_stats,
+ .write_new = qlcnic_sysfs_clear_esw_stats,
};
static const struct bin_attribute bin_attr_esw_config = {
.attr = { .name = "esw_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_esw_config,
- .write = qlcnic_sysfs_write_esw_config,
+ .read_new = qlcnic_sysfs_read_esw_config,
+ .write_new = qlcnic_sysfs_write_esw_config,
};
static const struct bin_attribute bin_attr_pm_config = {
.attr = { .name = "pm_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_pm_config,
- .write = qlcnic_sysfs_write_pm_config,
+ .read_new = qlcnic_sysfs_read_pm_config,
+ .write_new = qlcnic_sysfs_write_pm_config,
};
static const struct bin_attribute bin_attr_flash = {
.attr = { .name = "flash", .mode = 0644 },
.size = 0,
- .read = qlcnic_83xx_sysfs_flash_read_handler,
- .write = qlcnic_83xx_sysfs_flash_write_handler,
+ .read_new = qlcnic_83xx_sysfs_flash_read_handler,
+ .write_new = qlcnic_83xx_sysfs_flash_write_handler,
};
#ifdef CONFIG_QLCNIC_HWMON
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index be4c9622618d..7a194a8ab989 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -23,7 +23,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_08,
RTL_GIGA_MAC_VER_09,
RTL_GIGA_MAC_VER_10,
- RTL_GIGA_MAC_VER_11,
+ /* support for RTL_GIGA_MAC_VER_11 has been removed */
/* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */
/* RTL_GIGA_MAC_VER_13 was merged with VER_10 */
RTL_GIGA_MAC_VER_14,
@@ -71,6 +71,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_64,
RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_VER_66,
+ RTL_GIGA_MAC_VER_70,
+ RTL_GIGA_MAC_VER_71,
RTL_GIGA_MAC_NONE
};
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 739707a7b40f..5a5eba49c651 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/hwmon.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -57,6 +56,8 @@
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
#define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw"
+#define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw"
+#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
@@ -104,7 +105,6 @@ static const struct {
[RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
[RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
- [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_14] = {"RTL8401" },
[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
@@ -141,8 +141,10 @@ static const struct {
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
[RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1},
- [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
- [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3},
+ [RTL_GIGA_MAC_VER_65] = {"RTL8125D", FIRMWARE_8125D_2},
+ [RTL_GIGA_MAC_VER_66] = {"RTL8125BP", FIRMWARE_8125BP_2},
+ [RTL_GIGA_MAC_VER_70] = {"RTL8126A", FIRMWARE_8126A_2},
+ [RTL_GIGA_MAC_VER_71] = {"RTL8126A", FIRMWARE_8126A_3},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -623,7 +625,6 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -632,6 +633,7 @@ enum rtl_dash_type {
RTL_DASH_NONE,
RTL_DASH_DP,
RTL_DASH_EP,
+ RTL_DASH_25_BP,
};
struct rtl8169_private {
@@ -708,6 +710,8 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
MODULE_FIRMWARE(FIRMWARE_8125D_1);
+MODULE_FIRMWARE(FIRMWARE_8125D_2);
+MODULE_FIRMWARE(FIRMWARE_8125BP_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
@@ -1230,7 +1234,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1245,7 +1249,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1360,10 +1364,19 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
+static void rtl8125bp_driver_start(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_start(tp);
else
rtl8168ep_driver_start(tp);
}
@@ -1384,10 +1397,19 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
+static void rtl8125bp_driver_stop(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_stop(tp);
else
rtl8168ep_driver_stop(tp);
}
@@ -1410,6 +1432,7 @@ static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
case RTL_DASH_DP:
return r8168dp_check_dash(tp);
case RTL_DASH_EP:
+ case RTL_DASH_25_BP:
return r8168ep_check_dash(tp);
default:
return false;
@@ -1424,6 +1447,8 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
return RTL_DASH_DP;
case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
return RTL_DASH_EP;
+ case RTL_GIGA_MAC_VER_66:
+ return RTL_DASH_25_BP;
default:
return RTL_DASH_NONE;
}
@@ -1576,7 +1601,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_71:
r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
break;
default:
@@ -2049,7 +2074,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
tp->tx_lpi_timer = timer_val;
r8168_mac_ocp_write(tp, 0xe048, timer_val);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2257,10 +2282,14 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
enum mac_version ver;
} mac_info[] = {
/* 8126A family. */
- { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 },
- { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_71 },
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70 },
+
+ /* 8125BP family. */
+ { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66 },
/* 8125D family. */
+ { 0x7cf, 0x689, RTL_GIGA_MAC_VER_65 },
{ 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 },
/* 8125B family. */
@@ -2336,7 +2365,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168B family. */
{ 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- /* This one is very old and rare, let's see if anybody complains.
+ /* This one is very old and rare, support has been removed.
* { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
*/
@@ -2528,7 +2557,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2660,7 +2689,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2903,7 +2932,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2917,7 +2946,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2943,8 +2972,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_71:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -2955,7 +2984,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2967,7 +2996,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -2975,8 +3004,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_71:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3696,12 +3725,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_71)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_71)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3719,8 +3748,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_71)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3804,7 +3833,6 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
@@ -3840,8 +3868,10 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
[RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
- [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
- [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_71] = rtl_hw_start_8126a,
};
if (hw_configs[tp->mac_version])
@@ -3858,12 +3888,14 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_64:
+ case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
for (i = 0xa00; i < 0xb00; i += 4)
RTL_W32(tp, i, 0);
break;
case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_71:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -4095,7 +4127,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4252,7 +4284,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -4680,12 +4712,6 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (status & LinkChg)
phy_mac_interrupt(tp->phydev);
- if (unlikely(status & RxFIFOOver &&
- tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(tp->dev);
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
- }
-
rtl_irq_disable(tp);
napi_schedule(&tp->napi);
out:
@@ -4723,8 +4749,6 @@ static void rtl_task(struct work_struct *work)
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
- } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
- rtl_reset_work(tp);
}
}
@@ -5103,9 +5127,6 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp)
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
tp->irq_mask |= SYSErr | RxFIFOOver;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
- /* special workaround needed */
- tp->irq_mask |= RxFIFOOver;
}
static int rtl_alloc_irq(struct rtl8169_private *tp)
@@ -5281,7 +5302,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
rtl_hw_init_8125(tp);
break;
default:
@@ -5300,7 +5321,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
return JUMBO_7K;
/* RTL8168b */
- case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_17:
return JUMBO_4K;
/* RTL8168c */
@@ -5347,43 +5367,6 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
return false;
}
-static umode_t r8169_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
-static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct rtl8169_private *tp = dev_get_drvdata(dev);
- int val_raw;
-
- val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff;
- if (val_raw >= 512)
- val_raw -= 1024;
-
- *val = 1000 * val_raw / 2;
-
- return 0;
-}
-
-static const struct hwmon_ops r8169_hwmon_ops = {
- .is_visible = r8169_hwmon_is_visible,
- .read = r8169_hwmon_read,
-};
-
-static const struct hwmon_channel_info * const r8169_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
- NULL
-};
-
-static const struct hwmon_chip_info r8169_hwmon_chip_info = {
- .ops = &r8169_hwmon_ops,
- .info = r8169_hwmon_info,
-};
-
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5563,12 +5546,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* The temperature sensor is available from RTl8125B */
- if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63)
- /* ignore errors */
- devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp,
- &r8169_hwmon_chip_info,
- NULL);
rc = register_netdev(dev);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 5307c6ff4e25..cf95e579c65d 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -276,15 +276,6 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp,
rtl_writephy_batch(phydev, phy_reg_init);
}
-static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- phy_write(phydev, 0x1f, 0x0001);
- phy_set_bits(phydev, 0x16, BIT(0));
- phy_write(phydev, 0x10, 0xf41b);
- phy_write(phydev, 0x1f, 0x0000);
-}
-
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1111,6 +1102,28 @@ static void rtl8125d_hw_phy_config(struct rtl8169_private *tp,
rtl8125_config_eee_phy(phydev);
}
+static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0b87);
+ phy_write(phydev, 0x16, 0x8088);
+ phy_modify(phydev, 0x17, 0xff00, 0x9000);
+ phy_write(phydev, 0x16, 0x808f);
+ phy_modify(phydev, 0x17, 0xff00, 0x9000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
+}
+
static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1136,7 +1149,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
[RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
@@ -1172,8 +1184,10 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
[RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
- [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
- [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_65] = rtl8125d_hw_phy_config,
+ [RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config,
+ [RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_71] = rtl8126a_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index dbc3f92eebc4..2bbfcad613ab 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -13,6 +13,7 @@
#define RTASE_HW_VER_906X_7XA 0x00800000
#define RTASE_HW_VER_906X_7XC 0x04000000
#define RTASE_HW_VER_907XD_V1 0x04800000
+#define RTASE_HW_VER_907XD_VA 0x08000000
#define RTASE_RX_DMA_BURST_256 4
#define RTASE_TX_DMA_BURST_UNLIMITED 7
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index c42c0516656b..3bd11cb56294 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1725,6 +1725,7 @@ static int rtase_get_settings(struct net_device *dev,
cmd->base.speed = SPEED_5000;
break;
case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
cmd->base.speed = SPEED_10000;
break;
}
@@ -1993,6 +1994,7 @@ static int rtase_check_mac_version_valid(struct rtase_private *tp)
case RTASE_HW_VER_906X_7XA:
case RTASE_HW_VER_906X_7XC:
case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
ret = 0;
break;
}
@@ -2016,7 +2018,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
SET_NETDEV_DEV(dev, &pdev->dev);
ret = pci_enable_device(pdev);
- if (ret < 0)
+ if (ret)
goto err_out_free_dev;
/* make sure PCI base addr 1 is MMIO */
@@ -2032,7 +2034,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
ret = pci_request_regions(pdev, KBUILD_MODNAME);
- if (ret < 0)
+ if (ret)
goto err_out_disable;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -2108,7 +2110,7 @@ static int rtase_init_one(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
ret = rtase_init_board(pdev, &dev, &ioaddr);
- if (ret != 0)
+ if (ret)
return ret;
tp = netdev_priv(dev);
@@ -2118,7 +2120,7 @@ static int rtase_init_one(struct pci_dev *pdev,
/* identify chip attached to board */
ret = rtase_check_mac_version_valid(tp);
- if (ret != 0) {
+ if (ret) {
dev_err(&pdev->dev,
"unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n",
tp->hw_ver);
@@ -2129,7 +2131,7 @@ static int rtase_init_one(struct pci_dev *pdev,
rtase_init_hardware(tp);
ret = rtase_alloc_interrupt(pdev, tp);
- if (ret < 0) {
+ if (ret) {
dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
goto err_out_del_napi;
}
@@ -2174,7 +2176,7 @@ static int rtase_init_one(struct pci_dev *pdev,
netif_carrier_off(dev);
ret = register_netdev(dev);
- if (ret != 0)
+ if (ret)
goto err_out_free_dma;
netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac0f093f647a..bc395294a32d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2763,6 +2763,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 9ac6e2aad18f..84d09a8973b7 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -111,25 +111,35 @@ static void rswitch_top_init(struct rswitch_private *priv)
/* Forwarding engine block (MFWD) */
static void rswitch_fwd_init(struct rswitch_private *priv)
{
+ u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
unsigned int i;
- /* For ETHA */
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
+ /* Start with empty configuration */
+ for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
+ /* Disable all port features */
+ iowrite32(0, priv->addr + FWPC0(i));
+ /* Disallow L3 forwarding and direct descriptor forwarding */
+ iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
+ priv->addr + FWPC1(i));
+ /* Disallow L2 forwarding */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
+ priv->addr + FWPC2(i));
+ /* Disallow port based forwarding */
iowrite32(0, priv->addr + FWPBFC(i));
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ /* For enabled ETHA ports, setup port based forwarding */
+ rswitch_for_each_enabled_port(priv, i) {
+ /* Port based forwarding from port i to GWCA port */
+ rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
+ FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
+ /* Within GWCA port, forward to Rx queue for port i */
iowrite32(priv->rdev[i]->rx_queue->index,
priv->addr + FWPBFCSDC(GWCA_INDEX, i));
- iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
}
- /* For GWCA */
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
- iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
- iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
- iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
+ /* For GWCA port, allow direct descriptor forwarding */
+ rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
}
/* Gateway CPU agent block (GWCA) */
@@ -1159,9 +1169,9 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
- rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
- rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
+ rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
+ FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
+ FIELD_PREP(MPIC_PSMHT, 0x06));
}
static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
@@ -1190,42 +1200,29 @@ static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
}
-static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
- int phyad, int devad, int regad, int data)
+static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
+ unsigned int mmf, unsigned int pda,
+ unsigned int pra, unsigned int pop,
+ unsigned int prd)
{
- int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
u32 val;
int ret;
- if (devad == 0xffffffff)
- return -ENODEV;
-
- writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
+ val = MPSM_PSME |
+ FIELD_PREP(MPSM_MFF, mmf) |
+ FIELD_PREP(MPSM_PDA, pda) |
+ FIELD_PREP(MPSM_PRA, pra) |
+ FIELD_PREP(MPSM_POP, pop) |
+ FIELD_PREP(MPSM_PRD, prd);
+ iowrite32(val, etha->addr + MPSM);
- val = MPSM_PSME | MPSM_MFF_C45;
- iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
+ ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
if (ret)
return ret;
- rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
-
if (read) {
- writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- if (ret)
- return ret;
-
- ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
-
- rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- } else {
- iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
- etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
+ val = ioread32(etha->addr + MPSM);
+ ret = FIELD_GET(MPSM_PRD, val);
}
return ret;
@@ -1235,16 +1232,47 @@ static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
int regad)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
- return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_READ_C45, 0);
}
static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
int regad, u16 val)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
- return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_WRITE, val);
+}
+
+static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_READ_C22, 0);
+}
+
+static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
+ int regad, u16 val)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_WRITE, val);
}
/* Call of_node_put(port) after done */
@@ -1329,6 +1357,8 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->priv = rdev->etha;
mii_bus->read_c45 = rswitch_etha_mii_read_c45;
mii_bus->write_c45 = rswitch_etha_mii_write_c45;
+ mii_bus->read = rswitch_etha_mii_read_c22;
+ mii_bus->write = rswitch_etha_mii_write_c22;
mii_bus->parent = &rdev->priv->pdev->dev;
mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
@@ -1549,7 +1579,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
{
unsigned int i;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
@@ -1924,9 +1954,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_get_params;
- if (rdev->priv->gwca.speed < rdev->etha->speed)
- rdev->priv->gwca.speed = rdev->etha->speed;
-
err = rswitch_rxdmac_alloc(ndev);
if (err < 0)
goto out_rxdmac;
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index d8d4ed7d7f8b..532192cbca4b 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -12,6 +12,7 @@
#define RSWITCH_MAX_NUM_QUEUES 128
+#define RSWITCH_NUM_AGENTS 5
#define RSWITCH_NUM_PORTS 3
#define rswitch_for_each_enabled_port(priv, i) \
for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
@@ -731,28 +732,21 @@ enum rswitch_etha_mode {
#define MPIC_LSC_100M 1
#define MPIC_LSC_1G 2
#define MPIC_LSC_2_5G 3
-
-#define MDIO_READ_C45 0x03
-#define MDIO_WRITE_C45 0x01
+#define MPIC_PSMCS GENMASK(22, 16)
+#define MPIC_PSMHT GENMASK(26, 24)
#define MPSM_PSME BIT(0)
-#define MPSM_MFF_C45 BIT(2)
-#define MPSM_PRD_SHIFT 16
-#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT)
-
-/* Completion flags */
-#define MMIS1_PAACS BIT(2) /* Address */
-#define MMIS1_PWACS BIT(1) /* Write */
-#define MMIS1_PRACS BIT(0) /* Read */
-#define MMIS1_CLEAR_FLAGS 0xf
-
-#define MPIC_PSMCS_SHIFT 16
-#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT)
-#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT)
-
-#define MPIC_PSMHT_SHIFT 24
-#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
-#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT)
+#define MPSM_MFF BIT(2)
+#define MPSM_MMF_C22 0
+#define MPSM_MMF_C45 1
+#define MPSM_PDA GENMASK(7, 3)
+#define MPSM_PRA GENMASK(12, 8)
+#define MPSM_POP GENMASK(14, 13)
+#define MPSM_POP_ADDRESS 0
+#define MPSM_POP_WRITE 1
+#define MPSM_POP_READ_C22 2
+#define MPSM_POP_READ_C45 3
+#define MPSM_PRD GENMASK(31, 16)
#define MLVC_PLV BIT(16)
@@ -806,6 +800,7 @@ enum rswitch_gwca_mode {
#define CABPPFLC_INIT_VALUE 0x00800080
/* MFWD */
+#define FWPC0(i) (FWPC00 + (i) * 0x10)
#define FWPC0_LTHTA BIT(0)
#define FWPC0_IP4UE BIT(3)
#define FWPC0_IP4TE BIT(4)
@@ -819,15 +814,15 @@ enum rswitch_gwca_mode {
#define FWPC0_MACHMA BIT(27)
#define FWPC0_VLANSA BIT(28)
-#define FWPC0(i) (FWPC00 + (i) * 0x10)
-#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \
- FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \
- FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \
- FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA)
#define FWPC1(i) (FWPC10 + (i) * 0x10)
+#define FWCP1_LTHFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
#define FWPC1_DDE BIT(0)
-#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPC2(i) (FWPC20 + (i) * 0x10)
+#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+
+#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0)
#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
@@ -984,7 +979,6 @@ struct rswitch_gwca {
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
- int speed;
};
#define NUM_QUEUES_PER_NDEV 2
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4cc7b501135f..ef374a8e05c3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -217,28 +217,4 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
(reg) != 0xa1c), \
page)
-/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
- * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
- * collector register.
- */
-static inline void _efx_writed_page_locked(struct efx_nic *efx,
- const efx_dword_t *value,
- unsigned int reg,
- unsigned int page)
-{
- unsigned long flags __attribute__ ((unused));
-
- if (page == 0) {
- spin_lock_irqsave(&efx->biu_lock, flags);
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- spin_unlock_irqrestore(&efx->biu_lock, flags);
- } else {
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- }
-}
-#define efx_writed_page_locked(efx, value, reg, page) \
- _efx_writed_page_locked(efx, value, \
- reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
- page)
-
#endif /* EFX_IO_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 620ba6ef3514..f70a7b7d6345 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -831,6 +831,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -838,6 +839,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index ab358fe13e1d..4cc83203e188 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -897,7 +897,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -989,7 +989,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 9785eff10607..2be3bad3c993 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -753,6 +753,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -760,6 +761,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 082e35c6caaa..2839d0e0a9c1 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -888,7 +888,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -980,7 +980,8 @@ int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6658536a4e17..4cc85a36a1ab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -154,6 +154,18 @@ config DWMAC_RZN1
the stmmac device driver. This support can make use of a custom MII
converter PCS device.
+config DWMAC_S32
+ tristate "NXP S32G/S32R GMAC support"
+ default ARCH_S32
+ depends on OF && (ARCH_S32 || COMPILE_TEST)
+ help
+ Support for ethernet controller on NXP S32CC SOCs.
+
+ This selects NXP SoC glue layer support for the stmmac
+ device driver. This driver is used for the S32CC series
+ SOCs GMAC ethernet controller, ie. S32G2xx, S32G3xx and
+ S32R45.
+
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_INTEL_SOCFPGA
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 2389fd261344..b26f0e79c2b3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
+obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1367fa5c9b8e..e25db747a81a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -257,6 +257,8 @@ struct stmmac_safety_stats {
#define CSR_F_150M 150000000
#define CSR_F_250M 250000000
#define CSR_F_300M 300000000
+#define CSR_F_500M 500000000
+#define CSR_F_800M 800000000
#define MAC_CSR_H_FRQ_MASK 0x20
@@ -543,18 +545,8 @@ struct dma_features {
#define STMMAC_VLAN_INSERT 0x2
#define STMMAC_VLAN_REPLACE 0x3
-extern const struct stmmac_desc_ops enh_desc_ops;
-extern const struct stmmac_desc_ops ndesc_ops;
-
struct mac_device_info;
-extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern const struct stmmac_hwtimestamp dwmac1000_ptp;
-extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
-
-extern const struct ptp_clock_info stmmac_ptp_clock_ops;
-extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
-
struct mac_link {
u32 caps;
u32 speed_mask;
@@ -641,8 +633,4 @@ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_mode_ops ring_mode_ops;
-extern const struct stmmac_mode_ops chain_mode_ops;
-extern const struct stmmac_desc_ops dwmac4_desc_ops;
-
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 83290e707df5..bd4eb187f8c6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -181,24 +181,19 @@ static void dwc_qos_remove(struct platform_device *pdev)
static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct tegra_eqos *eqos = priv;
- unsigned long rate = 125000000;
bool needs_calibration = false;
+ long rate = 125000000;
u32 value;
int err;
switch (speed) {
case SPEED_1000:
- needs_calibration = true;
- rate = 125000000;
- break;
-
case SPEED_100:
needs_calibration = true;
- rate = 25000000;
- break;
+ fallthrough;
case SPEED_10:
- rate = 2500000;
+ rate = rgmii_clock(speed);
break;
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 641f3cd019a3..20d3a202bb8d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -36,6 +36,8 @@
#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1)
#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1)
#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
+#define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0)
+#define MX93_GPR_CLK_SEL_OFFSET (4)
#define DMA_BUS_MODE 0x00001000
#define DMA_BUS_MODE_SFT_RESET (0x1 << 0)
@@ -108,13 +110,21 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
{
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
- int val;
+ int val, ret;
switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
break;
case PHY_INTERFACE_MODE_RMII:
+ if (dwmac->rmii_refclk_ext) {
+ ret = regmap_clear_bits(dwmac->intf_regmap,
+ dwmac->intf_reg_off +
+ MX93_GPR_CLK_SEL_OFFSET,
+ MX93_GPR_ENET_QOS_CLK_SEL_MASK);
+ if (ret)
+ return ret;
+ }
val = MX93_GPR_ENET_QOS_INTF_SEL_RMII;
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -186,7 +196,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
- unsigned long rate;
+ long rate;
int err;
plat_dat = dwmac->plat_dat;
@@ -196,17 +206,8 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
(plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
return;
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dwmac->dev, "invalid speed %u\n", speed);
return;
}
@@ -301,15 +302,11 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
* is required by i.MX8MP, i.MX93.
* is optinoal for i.MX8DXL.
*/
- dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
+ dwmac->intf_regmap =
+ syscon_regmap_lookup_by_phandle_args(np, "intf_mode", 1,
+ &dwmac->intf_reg_off);
if (IS_ERR(dwmac->intf_regmap))
return PTR_ERR(dwmac->intf_regmap);
-
- err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off);
- if (err) {
- dev_err(dev, "Can't get intf mode reg offset (%d)\n", err);
- return err;
- }
}
return err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index d94f0a150e93..ddee6154d40b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -31,27 +31,13 @@ struct intel_dwmac_data {
static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct intel_dwmac *dwmac = priv;
- unsigned long rate;
+ long rate;
int ret;
- rate = clk_get_rate(dwmac->tx_clk);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
-
- case SPEED_100:
- rate = 25000000;
- break;
-
- case SPEED_10:
- rate = 2500000;
- break;
-
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dwmac->dev, "Invalid speed\n");
- break;
+ return;
}
ret = clk_set_rate(dwmac->tx_clk, rate);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 901a3c1959fa..2a5b38723635 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -777,7 +777,7 @@ static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv)
netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err);
plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref);
- netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate);
+ netdev_dbg(priv->dev, "PTP rate %lu\n", plat_dat->clk_ptp_rate);
}
static int qcom_ethqos_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 8cb374668b74..a4dc89e23a68 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1079,20 +1079,11 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
+ long rate;
int ret;
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
return;
}
@@ -1540,20 +1531,11 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
+ long rate;
int ret;
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
return;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
new file mode 100644
index 000000000000..9cc0e5817416
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP S32G/R GMAC glue layer
+ *
+ * Copyright 2019-2024 NXP
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define GMAC_INTF_RATE_125M 125000000 /* 125MHz */
+
+/* SoC PHY interface control register */
+#define PHY_INTF_SEL_MII 0x00
+#define PHY_INTF_SEL_SGMII 0x01
+#define PHY_INTF_SEL_RGMII 0x02
+#define PHY_INTF_SEL_RMII 0x08
+
+struct s32_priv_data {
+ void __iomem *ioaddr;
+ void __iomem *ctrl_sts;
+ struct device *dev;
+ phy_interface_t *intf_mode;
+ struct clk *tx_clk;
+ struct clk *rx_clk;
+};
+
+static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
+{
+ writel(PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+
+ dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode));
+
+ return 0;
+}
+
+static int s32_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+ int ret;
+
+ /* Set initial TX interface clock */
+ ret = clk_prepare_enable(gmac->tx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable tx clock\n");
+ return ret;
+ }
+ ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set tx clock\n");
+ goto err_tx_disable;
+ }
+
+ /* Set initial RX interface clock */
+ ret = clk_prepare_enable(gmac->rx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable rx clock\n");
+ goto err_tx_disable;
+ }
+ ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set rx clock\n");
+ goto err_txrx_disable;
+ }
+
+ /* Set interface mode */
+ ret = s32_gmac_write_phy_intf_select(gmac);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set PHY interface mode\n");
+ goto err_txrx_disable;
+ }
+
+ return 0;
+
+err_txrx_disable:
+ clk_disable_unprepare(gmac->rx_clk);
+err_tx_disable:
+ clk_disable_unprepare(gmac->tx_clk);
+ return ret;
+}
+
+static void s32_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+
+ clk_disable_unprepare(gmac->tx_clk);
+ clk_disable_unprepare(gmac->rx_clk);
+}
+
+static void s32_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+{
+ struct s32_priv_data *gmac = priv;
+ long tx_clk_rate;
+ int ret;
+
+ tx_clk_rate = rgmii_clock(speed);
+ if (tx_clk_rate < 0) {
+ dev_err(gmac->dev, "Unsupported/Invalid speed: %d\n", speed);
+ return;
+ }
+
+ dev_dbg(gmac->dev, "Set tx clock to %ld Hz\n", tx_clk_rate);
+ ret = clk_set_rate(gmac->tx_clk, tx_clk_rate);
+ if (ret)
+ dev_err(gmac->dev, "Can't set tx clock\n");
+}
+
+static int s32_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct device *dev = &pdev->dev;
+ struct stmmac_resources res;
+ struct s32_priv_data *gmac;
+ int ret;
+
+ gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->dev = &pdev->dev;
+
+ ret = stmmac_get_platform_resources(pdev, &res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get platform resources\n");
+
+ plat = devm_stmmac_probe_config_dt(pdev, res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(dev, PTR_ERR(plat),
+ "dt configuration failed\n");
+
+ /* PHY interface mode control reg */
+ gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(gmac->ctrl_sts))
+ return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
+ "S32CC config region is missing\n");
+
+ /* tx clock */
+ gmac->tx_clk = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(gmac->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->tx_clk),
+ "tx clock not found\n");
+
+ /* rx clock */
+ gmac->rx_clk = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(gmac->rx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->rx_clk),
+ "rx clock not found\n");
+
+ gmac->intf_mode = &plat->phy_interface;
+ gmac->ioaddr = res.addr;
+
+ /* S32CC core feature set */
+ plat->has_gmac4 = true;
+ plat->pmt = 1;
+ plat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat->rx_fifo_size = 20480;
+ plat->tx_fifo_size = 20480;
+
+ plat->init = s32_gmac_init;
+ plat->exit = s32_gmac_exit;
+ plat->fix_mac_speed = s32_fix_mac_speed;
+
+ plat->bsp_priv = gmac;
+
+ return stmmac_pltfr_probe(pdev, plat, &res);
+}
+
+static const struct of_device_id s32_dwmac_match[] = {
+ { .compatible = "nxp,s32g2-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s32_dwmac_match);
+
+static struct platform_driver s32_dwmac_driver = {
+ .probe = s32_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "s32-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = s32_dwmac_match,
+ },
+};
+module_platform_driver(s32_dwmac_driver);
+
+MODULE_AUTHOR("Jan Petrous (OSS) <jan.petrous@oss.nxp.com>");
+MODULE_DESCRIPTION("NXP S32G/R common chassis GMAC driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 421666279dd3..0a0a363d3730 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -34,24 +34,13 @@ struct starfive_dwmac {
static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct starfive_dwmac *dwmac = priv;
- unsigned long rate;
+ long rate;
int err;
- rate = clk_get_rate(dwmac->clk_tx);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dwmac->dev, "invalid speed %u\n", speed);
- break;
+ return;
}
err = clk_set_rate(dwmac->clk_tx, rate);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a6ff02d905a9..f25461c292fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,10 +21,7 @@
#include "stmmac_platform.h"
-#define DWMAC_125MHZ 125000000
#define DWMAC_50MHZ 50000000
-#define DWMAC_25MHZ 25000000
-#define DWMAC_2_5MHZ 2500000
#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
iface == PHY_INTERFACE_MODE_RGMII_ID || \
@@ -140,7 +137,7 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
u32 reg = dwmac->ctrl_reg;
- u32 freq = 0;
+ long freq = 0;
if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
src = TX_RETIME_SRC_TXCLK;
@@ -153,19 +150,14 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
}
} else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
/* On GiGa clk source can be either ext or from clkgen */
- if (spd == SPEED_1000) {
- freq = DWMAC_125MHZ;
- } else {
+ freq = rgmii_clock(spd);
+
+ if (spd != SPEED_1000 && freq > 0)
/* Switch to clkgen for these speeds */
src = TX_RETIME_SRC_CLKGEN;
- if (spd == SPEED_100)
- freq = DWMAC_25MHZ;
- else if (spd == SPEED_10)
- freq = DWMAC_2_5MHZ;
- }
}
- if (src == TX_RETIME_SRC_CLKGEN && freq)
+ if (src == TX_RETIME_SRC_CLKGEN && freq > 0)
clk_set_rate(dwmac->clk, freq);
regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
@@ -207,16 +199,11 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (res)
dwmac->clk_sel_reg = res->start;
- regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->ctrl_reg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
- return err;
- }
-
err = of_get_phy_mode(np, &dwmac->interface);
if (err && err != -ENODEV) {
dev_err(dev, "Can't get phy-mode\n");
@@ -321,7 +308,6 @@ static void sti_dwmac_remove(struct platform_device *pdev)
clk_disable_unprepare(dwmac->clk);
}
-#ifdef CONFIG_PM_SLEEP
static int sti_dwmac_suspend(struct device *dev)
{
struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
@@ -341,10 +327,9 @@ static int sti_dwmac_resume(struct device *dev)
return stmmac_resume(dev);
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
- sti_dwmac_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
+ sti_dwmac_resume);
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
@@ -361,7 +346,7 @@ static struct platform_driver sti_dwmac_driver = {
.remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
- .pm = &sti_dwmac_pm_ops,
+ .pm = pm_sleep_ptr(&sti_dwmac_pm_ops),
.of_match_table = sti_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1e8bac665cc9..1fcb74e9e3ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -419,16 +419,11 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
}
/* Get mode register */
- dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ dwmac->regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->mode_reg);
if (IS_ERR(dwmac->regmap))
return PTR_ERR(dwmac->regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
- return err;
- }
-
if (dwmac->ops->is_mp2)
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c25781874aa7..9ed8620580a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -27,7 +27,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
- u32 clk_rate;
+ unsigned long clk_rate;
value |= GMAC_CORE_INIT;
@@ -420,10 +420,10 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
-static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
+static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
- int value = et & STMMAC_ET_MAX;
+ u32 value = et & STMMAC_ET_MAX;
int regval;
/* Program LPI entry timer value into register */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 1ce6f43d545a..806555976496 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -144,4 +144,7 @@
/* TDS3 use for both format (read and write back) */
#define RDES3_OWN BIT(31)
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index a04a79003692..20027d3c25a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -493,4 +493,9 @@
#define XGMAC_RDES3_TSD BIT(6)
#define XGMAC_RDES3_TSA BIT(4)
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_ops dwxlgmac2_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
+
#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index a72d336a8350..31bdbab9a46c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -9,6 +9,8 @@
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "stmmac_est.h"
+#include "dwmac4_descs.h"
+#include "dwxgmac2.h"
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
{
@@ -265,7 +267,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
@@ -288,7 +290,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 64f8ed67dcc4..0f200b72c225 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -363,7 +363,7 @@ struct stmmac_ops {
void (*set_eee_mode)(struct mac_device_info *hw,
bool en_tx_lpi_clockgating);
void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
+ void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, u32 et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -665,6 +665,15 @@ struct stmmac_regs_off {
u32 est_off;
};
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_hwtimestamp dwmac1000_ptp;
+
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
+
extern const struct stmmac_ops dwmac100_ops;
extern const struct stmmac_dma_ops dwmac100_dma_ops;
extern const struct stmmac_ops dwmac1000_ops;
@@ -676,14 +685,6 @@ extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
-extern const struct stmmac_tc_ops dwxgmac_tc_ops;
-extern const struct stmmac_ops dwxgmac210_ops;
-extern const struct stmmac_ops dwxlgmac2_ops;
-extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
-extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
-extern const struct stmmac_mmc_ops dwmac_mmc_ops;
-extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
-extern const struct stmmac_est_ops dwmac510_est_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 5d1ea3e07459..1cba39fb2c44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -139,4 +139,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fpe_fragment_cntr;
};
+extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 1d86439b8a14..f05cae103d83 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -126,7 +126,7 @@ struct stmmac_rx_queue {
unsigned int cur_rx;
unsigned int dirty_rx;
unsigned int buf_alloc_num;
- u32 rx_zeroc_thresh;
+ unsigned int napi_skb_frag_size;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
unsigned int state_saved;
@@ -266,7 +266,6 @@ struct stmmac_priv {
int sph_cap;
u32 sarc_type;
- unsigned int rx_copybreak;
u32 rx_riwt[MTL_MAX_TX_QUEUES];
int hwts_rx_en;
@@ -307,11 +306,9 @@ struct stmmac_priv {
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
- int eee_enabled;
- int eee_active;
- int tx_lpi_timer;
- int tx_lpi_enabled;
- int eee_tw_timer;
+ u32 tx_lpi_timer;
+ bool eee_enabled;
+ bool eee_active;
bool eee_sw_timer_en;
unsigned int mode;
unsigned int chain_mode;
@@ -407,8 +404,6 @@ void stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
-void stmmac_disable_eee_mode(struct stmmac_priv *priv);
-bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
@@ -418,14 +413,6 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
return !!priv->xdp_prog;
}
-static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
-{
- if (stmmac_xdp_is_enabled(priv))
- return XDP_PACKET_HEADROOM;
-
- return 0;
-}
-
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
index 7a858c566e7e..d247fa383a6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -62,3 +62,5 @@
#define EST_SRWO BIT(0)
#define EST_GCL_DATA 0x00000034
+
+extern const struct stmmac_est_ops dwmac510_est_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1d77389ce953..918a32f8fda8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -654,7 +654,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
- if (priv->eee_enabled) {
+ if (priv->dma_cap.eee) {
int val = phylink_get_eee_err(priv->phylink);
if (val)
priv->xstats.phy_eee_wakeup_error_n = val;
@@ -898,9 +898,6 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- edata->tx_lpi_timer = priv->tx_lpi_timer;
- edata->tx_lpi_enabled = priv->tx_lpi_enabled;
-
return phylink_ethtool_get_eee(priv->phylink, edata);
}
@@ -908,29 +905,11 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret;
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- if (priv->tx_lpi_enabled != edata->tx_lpi_enabled)
- netdev_warn(priv->dev,
- "Setting EEE tx-lpi is not supported\n");
-
- if (!edata->eee_enabled)
- stmmac_disable_eee_mode(priv);
-
- ret = phylink_ethtool_set_eee(priv->phylink, edata);
- if (ret)
- return ret;
-
- if (edata->eee_enabled &&
- priv->tx_lpi_timer != edata->tx_lpi_timer) {
- priv->tx_lpi_timer = edata->tx_lpi_timer;
- stmmac_eee_init(priv);
- }
-
- return 0;
+ return phylink_ethtool_set_eee(priv->phylink, edata);
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
@@ -1227,43 +1206,6 @@ static int stmmac_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
}
-static int stmmac_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = priv->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int stmmac_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- priv->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static int stmmac_get_mm(struct net_device *ndev,
struct ethtool_mm_state *state)
{
@@ -1390,8 +1332,6 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
.get_channels = stmmac_get_channels,
.set_channels = stmmac_set_channels,
- .get_tunable = stmmac_get_tunable,
- .set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
.get_mm = stmmac_get_mm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c81ea8cdfe6e..edbf8994455d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -77,7 +77,6 @@ module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -107,15 +106,13 @@ static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
-#define STMMAC_RX_COPYBREAK 256
-
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
#define STMMAC_DEFAULT_LPI_TIMER 1000
-static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
-module_param(eee_timer, int, 0644);
+static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, uint, 0644);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
@@ -197,8 +194,6 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
- if (eee_timer < 0)
- eee_timer = STMMAC_DEFAULT_LPI_TIMER;
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -301,7 +296,7 @@ static void stmmac_global_err(struct stmmac_priv *priv)
*/
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
- u32 clk_rate;
+ unsigned long clk_rate;
clk_rate = clk_get_rate(priv->plat->stmmac_clk);
@@ -325,6 +320,10 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_150_250M;
else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
+ else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
+ priv->clk_csr = STMMAC_CSR_300_500M;
+ else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
+ priv->clk_csr = STMMAC_CSR_500_800M;
}
if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
@@ -391,23 +390,17 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
+static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
{
- int tx_lpi_timer;
+ stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
+}
- /* Clear/set the SW EEE timer flag based on LPI ET enablement */
- priv->eee_sw_timer_en = en ? 0 : 1;
- tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
- stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
+static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
+{
+ stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
}
-/**
- * stmmac_enable_eee_mode - check and enter in LPI mode
- * @priv: driver private structure
- * Description: this function is to verify and enter in LPI mode in case of
- * EEE.
- */
-static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -417,29 +410,43 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return -EBUSY; /* still unfinished work */
+ return true; /* still unfinished work */
+ }
+
+ return false;
+}
+
+static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
+{
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+}
+
+/**
+ * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
+ */
+static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
+{
+ if (stmmac_eee_tx_busy(priv)) {
+ stmmac_restart_sw_lpi_timer(priv);
+ return;
}
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
- return 0;
}
/**
- * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * stmmac_stop_sw_lpi - stop transmitting LPI
* @priv: driver private structure
- * Description: this function is to exit and disable EEE in case of
- * LPI state is true. This is called by the xmit.
+ * Description: When using software-controlled LPI, stop transmitting LPI state.
*/
-void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
{
- if (!priv->eee_sw_timer_en) {
- stmmac_lpi_entry_timer_config(priv, 0);
- return;
- }
-
stmmac_reset_eee_mode(priv, priv->hw);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
@@ -456,25 +463,27 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ stmmac_try_to_start_sw_lpi(priv);
}
/**
* stmmac_eee_init - init EEE
* @priv: driver private structure
+ * @active: indicates whether EEE should be enabled.
* Description:
* if the GMAC supports the EEE (from the HW cap reg) and the phy device
* can also manage EEE, this function enable the LPI state and start related
* timer.
*/
-bool stmmac_eee_init(struct stmmac_priv *priv)
+static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
{
- int eee_tw_timer = priv->eee_tw_timer;
+ priv->eee_active = active;
/* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee)
- return false;
+ if (!priv->dma_cap.eee) {
+ priv->eee_enabled = false;
+ return;
+ }
mutex_lock(&priv->lock);
@@ -482,22 +491,24 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
if (!priv->eee_active) {
if (priv->eee_enabled) {
netdev_dbg(priv->dev, "disable EEE\n");
- stmmac_lpi_entry_timer_config(priv, 0);
+ priv->eee_sw_timer_en = false;
+ stmmac_disable_hw_lpi_timer(priv);
del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+ stmmac_set_eee_timer(priv, priv->hw, 0,
+ STMMAC_DEFAULT_TWT_LS);
if (priv->hw->xpcs)
xpcs_config_eee(priv->hw->xpcs,
priv->plat->mult_fact_100ns,
false);
}
+ priv->eee_enabled = false;
mutex_unlock(&priv->lock);
- return false;
+ return;
}
if (priv->eee_active && !priv->eee_enabled) {
- timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- eee_tw_timer);
+ STMMAC_DEFAULT_TWT_LS);
if (priv->hw->xpcs)
xpcs_config_eee(priv->hw->xpcs,
priv->plat->mult_fact_100ns,
@@ -505,18 +516,22 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
}
if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
+ /* Use hardware LPI mode */
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
- stmmac_lpi_entry_timer_config(priv, 1);
+ priv->eee_sw_timer_en = false;
+ stmmac_enable_hw_lpi_timer(priv);
} else {
- stmmac_lpi_entry_timer_config(priv, 0);
- mod_timer(&priv->eee_ctrl_timer,
- STMMAC_LPI_T(priv->tx_lpi_timer));
+ /* Use software LPI mode */
+ priv->eee_sw_timer_en = true;
+ stmmac_disable_hw_lpi_timer(priv);
+ stmmac_restart_sw_lpi_timer(priv);
}
+ priv->eee_enabled = true;
+
mutex_unlock(&priv->lock);
netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
- return true;
}
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
@@ -973,10 +988,8 @@ static void stmmac_mac_link_down(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- priv->tx_lpi_enabled = false;
- priv->eee_enabled = stmmac_eee_init(priv);
- stmmac_set_eee_pls(priv, priv->hw, false);
+ if (priv->dma_cap.eee)
+ stmmac_set_eee_pls(priv, priv->hw, false);
if (stmmac_fpe_supported(priv))
stmmac_fpe_link_state_handle(priv, false);
@@ -1083,14 +1096,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
stmmac_mac_set(priv, priv->ioaddr, true);
- if (phy && priv->dma_cap.eee) {
- priv->eee_active =
- phy_init_eee(phy, !(priv->plat->flags &
- STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
- priv->eee_enabled = stmmac_eee_init(priv);
- priv->tx_lpi_enabled = priv->eee_enabled;
+ if (priv->dma_cap.eee)
stmmac_set_eee_pls(priv, priv->hw, true);
- }
if (stmmac_fpe_supported(priv))
stmmac_fpe_link_state_handle(priv, true);
@@ -1099,12 +1106,32 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_hwtstamp_correct_latency(priv, priv);
}
+static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_eee_init(priv, false);
+}
+
+static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ priv->tx_lpi_timer = timer;
+ stmmac_eee_init(priv, true);
+
+ return 0;
+}
+
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.mac_get_caps = stmmac_mac_get_caps,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
+ .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
};
/**
@@ -1177,15 +1204,27 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
- if (priv->dma_cap.eee)
- phy_support_eee(phydev);
-
ret = phylink_connect_phy(priv->phylink, phydev);
} else {
fwnode_handle_put(phy_fwnode);
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
+ if (ret == 0) {
+ struct ethtool_keee eee;
+
+ /* Configure phylib's copy of the LPI timer. Normally,
+ * phylink_config.lpi_timer_default would do this, but there is
+ * a chance that userspace could change the eee_timer setting
+ * via sysfs before the first open. Thus, preserve existing
+ * behaviour.
+ */
+ if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
+ eee.tx_lpi_timer = priv->tx_lpi_timer;
+ phylink_ethtool_set_eee(priv->phylink, &eee);
+ }
+ }
+
if (!priv->plat->pmt) {
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
@@ -1202,6 +1241,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
struct stmmac_mdio_bus_data *mdio_bus_data;
int mode = priv->plat->phy_interface;
struct fwnode_handle *fwnode;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
priv->phylink_config.dev = &priv->dev->dev;
@@ -1211,6 +1251,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
/* Stmmac always requires an RX clock for hardware initialization */
priv->phylink_config.mac_requires_rxc = true;
+ if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
+ priv->phylink_config.eee_rx_clk_stop_enable = true;
+
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
priv->phylink_config.default_an_inband =
@@ -1223,8 +1266,27 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
/* If we have an xpcs, it defines which PHY interfaces are supported. */
if (priv->hw->xpcs)
- xpcs_get_interfaces(priv->hw->xpcs,
- priv->phylink_config.supported_interfaces);
+ pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
+ else
+ pcs = priv->hw->phylink_pcs;
+
+ if (pcs)
+ phy_interface_or(priv->phylink_config.supported_interfaces,
+ priv->phylink_config.supported_interfaces,
+ pcs->supported_interfaces);
+
+ if (priv->dma_cap.eee) {
+ /* Assume all supported interfaces also support LPI */
+ memcpy(priv->phylink_config.lpi_interfaces,
+ priv->phylink_config.supported_interfaces,
+ sizeof(priv->phylink_config.lpi_interfaces));
+
+ /* All full duplex speeds above 100Mbps are supported */
+ priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
+ MAC_100FD;
+ priv->phylink_config.lpi_timer_default = eee_timer * 1000;
+ priv->phylink_config.eee_enabled_default = true;
+ }
fwnode = priv->plat->port_node;
if (!fwnode)
@@ -1307,6 +1369,14 @@ static void stmmac_display_rings(struct stmmac_priv *priv,
stmmac_display_tx_rings(priv, dma_conf);
}
+static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
+{
+ if (stmmac_xdp_is_enabled(priv))
+ return XDP_PACKET_HEADROOM;
+
+ return NET_SKB_PAD;
+}
+
static int stmmac_set_bfsize(int mtu, int bufsize)
{
int ret = bufsize;
@@ -2003,22 +2073,26 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
- unsigned int num_pages;
+ unsigned int dma_buf_sz_pad, num_pages;
unsigned int napi_id;
int ret;
+ dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
+
rx_q->queue_index = queue;
rx_q->priv_data = priv;
+ rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = dma_conf->dma_rx_size;
- num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
+ pp_params.order = order_base_2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
- pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+ pp_params.max_len = dma_conf->dma_buf_sz;
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
@@ -2757,11 +2831,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
xmits = budget;
}
- if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
- priv->eee_sw_timer_en) {
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
- }
+ if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
+ stmmac_restart_sw_lpi_timer(priv);
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
@@ -3436,12 +3507,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
else if (ptp_register)
stmmac_ptp_register(priv);
- priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
-
- /* Convert the timer from msec to usec */
- if (!priv->tx_lpi_timer)
- priv->tx_lpi_timer = eee_timer * 1000;
-
if (priv->use_riwt) {
u32 queue;
@@ -3908,6 +3973,10 @@ static int __stmmac_open(struct net_device *dev,
u32 chan;
int ret;
+ /* Initialise the tx lpi timer, converting from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
ret = pm_runtime_resume_and_get(priv->device);
if (ret < 0)
return ret;
@@ -3923,8 +3992,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- priv->rx_copybreak = STMMAC_RX_COPYBREAK;
-
buf_sz = dma_conf->dma_buf_sz;
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
@@ -4024,11 +4091,6 @@ static int stmmac_release(struct net_device *dev)
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
- if (priv->eee_enabled) {
- priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
- }
-
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
@@ -4117,11 +4179,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
- if (priv->dma_cap.addr64 <= 32)
- desc->des0 = cpu_to_le32(curr_addr);
- else
- stmmac_set_desc_addr(priv, desc, curr_addr);
-
+ stmmac_set_desc_addr(priv, desc, curr_addr);
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -4167,17 +4225,27 @@ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
* First Descriptor
* --------
* | DES0 |---> buffer1 = L2/L3/L4 header
- * | DES1 |---> TCP Payload (can continue on next descr...)
- * | DES2 |---> buffer 1 and 2 len
+ * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
+ * | | width is 32-bit, but we never use it.
+ * | | Also can be used as the most-significant 8-bits or 16-bits of
+ * | | buffer1 address pointer if the DMA AXI address width is 40-bit
+ * | | or 48-bit, and we always use it.
+ * | DES2 |---> buffer1 len
* | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
* --------
+ * --------
+ * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
+ * | DES3 |
+ * --------
* |
* ...
* |
* --------
- * | DES0 | --| Split TCP Payload on Buffers 1 and 2
- * | DES1 | --|
- * | DES2 | --> buffer 1 and 2 len
+ * | DES0 |---> buffer1 = Split TCP Payload
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
* | DES3 |
* --------
*
@@ -4187,15 +4255,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int tmp_pay_len = 0, first_tx, nfrags;
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
u32 pay_len, mss, queue;
- dma_addr_t tso_des, des;
+ int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
+ dma_addr_t des;
bool set_ic;
- int i;
/* Always insert VLAN tag to SKB payload for TSO frames.
*
@@ -4280,24 +4347,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- if (priv->dma_cap.addr64 <= 32) {
- first->des0 = cpu_to_le32(des);
-
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
-
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
- tso_des = des;
- } else {
- stmmac_set_desc_addr(priv, first, des);
- tmp_pay_len = pay_len;
- tso_des = des + proto_hdr_len;
- pay_len = 0;
- }
-
- stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
+ stmmac_set_desc_addr(priv, first, des);
+ stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
+ (nfrags == 0), queue);
/* In case two or more DMA transmit descriptors are allocated for this
* non-paged SKB data, the DMA buffer address should be saved to
@@ -4401,11 +4453,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Complete the first descriptor before granting the DMA */
- stmmac_prepare_tso_tx_desc(priv, first, 1,
- proto_hdr_len,
- pay_len,
- 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- hdr / 4, (skb->len - proto_hdr_len));
+ stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
+ tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -4492,7 +4542,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
- stmmac_disable_eee_mode(priv);
+ stmmac_stop_sw_lpi(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
@@ -5473,7 +5523,7 @@ read_again:
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
@@ -5491,10 +5541,6 @@ read_again:
/* Buffer is good. Go on. */
- prefetch(page_address(buf->page) + buf->page_offset);
- if (buf->sec_page)
- prefetch(page_address(buf->sec_page));
-
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
@@ -5516,6 +5562,8 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
+ net_prefetch(page_address(buf->page) +
+ buf->page_offset);
xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
@@ -5569,22 +5617,26 @@ read_again:
}
if (!skb) {
+ unsigned int head_pad_len;
+
/* XDP program may expand or reduce tail */
buf1_len = ctx.xdp.data_end - ctx.xdp.data;
- skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ skb = napi_build_skb(page_address(buf->page),
+ rx_q->napi_skb_frag_size);
if (!skb) {
+ page_pool_recycle_direct(rx_q->page_pool,
+ buf->page);
rx_dropped++;
count++;
goto drain_data;
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
+ head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
+ skb_reserve(skb, head_pad_len);
skb_put(skb, buf1_len);
-
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
} else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
@@ -5592,9 +5644,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->page = NULL;
}
@@ -5604,9 +5653,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->sec_page = NULL;
}
@@ -7407,6 +7453,8 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -7714,7 +7762,7 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- if (priv->eee_enabled) {
+ if (priv->eee_sw_timer_en) {
priv->tx_path_in_lpi_mode = false;
del_timer_sync(&priv->eee_ctrl_timer);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index dc9884130b91..d0e61aa1a495 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -626,7 +626,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_info(&pdev->dev, "PTP uses main clock\n");
} else {
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
- dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+ dev_dbg(&pdev->dev, "PTP rate %lu\n", plat->clk_ptp_rate);
}
plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 4cc70480ce0f..3fe0e3a80e80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -104,4 +104,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time);
void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv);
+extern const struct ptp_clock_info stmmac_ptp_clock_ops;
+extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
+
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 6a79e6a111ed..694d6ee14381 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1284,14 +1284,3 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.query_caps = tc_query_caps,
.setup_mqprio = tc_setup_dwmac510_mqprio,
};
-
-const struct stmmac_tc_ops dwxgmac_tc_ops = {
- .init = tc_init,
- .setup_cls_u32 = tc_setup_cls_u32,
- .setup_cbs = tc_setup_cbs,
- .setup_cls = tc_setup_cls,
- .setup_taprio = tc_setup_taprio,
- .setup_etf = tc_setup_etf,
- .query_caps = tc_query_caps,
- .setup_mqprio = tc_setup_dwmac510_mqprio,
-};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
index 896dc987d4ef..77ce8cfbe976 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
@@ -4,7 +4,6 @@
#ifndef _STMMAC_XDP_H_
#define _STMMAC_XDP_H_
-#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index df6d35d41b97..d7459866d24c 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3303,7 +3303,7 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
addr &= PAGE_MASK;
pp = &rp->rxhash[h];
for (; (p = *pp) != NULL; pp = &niu_next_page(p)) {
- if (p->index == addr) {
+ if (p->private == addr) {
*link = pp;
goto found;
}
@@ -3318,7 +3318,7 @@ static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
{
unsigned int h = niu_hash_rxaddr(rp, base);
- page->index = base;
+ page->private = base;
niu_next_page(page) = rp->rxhash[h];
rp->rxhash[h] = page;
}
@@ -3400,11 +3400,11 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
RCR_ENTRY_PKTBUFSZ_SHIFT];
- if ((page->index + PAGE_SIZE) - rcr_size == addr) {
+ if ((page->private + PAGE_SIZE) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
rp->rbr_refill_pending++;
@@ -3469,11 +3469,11 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
- if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+ if ((page->private + rp->rbr_block_size) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
rp->rbr_refill_pending++;
} else
@@ -3538,11 +3538,11 @@ static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
page = rp->rxhash[i];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
np->ops->unmap_page(np->device, base, PAGE_SIZE,
DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
@@ -6460,7 +6460,7 @@ static void niu_reset_buffers(struct niu *np)
page = rp->rxhash[j];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
base = base >> RBR_DESCR_ADDR_SHIFT;
rp->rbr[k++] = cpu_to_le32(base);
page = next;
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 721d8ed3f302..5e0e4c9ecbb0 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -199,7 +199,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = spl2sw_ethernet_start_xmit,
.ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode,
.ndo_set_mac_address = spl2sw_ethernet_set_mac_address,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = spl2sw_ethernet_tx_timeout,
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 5465bf872734..b663271e79f7 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -32,6 +32,7 @@
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
#include <net/page_pool/helpers.h>
+#include <net/dsa.h>
#include <net/switchdev.h>
#include "cpsw_ale.h"
@@ -497,35 +498,62 @@ static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
+ struct page *page,
+ bool allow_direct);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
-static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
+static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
- int id, port;
+ int port;
- for (id = 0; id < common->rx_ch_num_flows; id++) {
- flow = &rx_chn->flows[id];
+ flow = &rx_chn->flows[id];
+ napi_disable(&flow->napi_rx);
+ hrtimer_cancel(&flow->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn,
+ am65_cpsw_nuss_rx_cleanup, !!id);
- for (port = 0; port < common->port_num; port++) {
- if (!common->ports[port].ndev)
- continue;
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- rxq = &common->ports[port].xdp_rxq[id];
+ rxq = &common->ports[port].xdp_rxq[id];
- if (xdp_rxq_info_is_reg(rxq))
- xdp_rxq_info_unreg(rxq);
- }
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
- if (flow->page_pool) {
- page_pool_destroy(flow->page_pool);
- flow->page_pool = NULL;
- }
+ if (flow->page_pool) {
+ page_pool_destroy(flow->page_pool);
+ flow->page_pool = NULL;
}
}
-static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
+static void am65_cpsw_destroy_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ int id;
+
+ reinit_completion(&common->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ id = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "rx teardown timeout\n");
+ }
+
+ for (id = common->rx_ch_num_flows - 1; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+}
+
+static int am65_cpsw_create_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct page_pool_params pp_params = {
@@ -540,45 +568,162 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
struct page_pool *pool;
- int id, port, ret;
+ struct page *page;
+ int port, ret, i;
+
+ flow = &rx_chn->flows[id];
+ pp_params.napi = &flow->napi_rx;
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ return ret;
+ }
+
+ flow->page_pool = pool;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ /* FIXME should we BUG here? */
+ continue;
+
+ rxq = &common->ports[port].xdp_rxq[id];
+ ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
+ id, flow->napi_rx.napi_id);
+ if (ret)
+ goto err;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
+ page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (!page) {
+ dev_err(common->dev, "cannot allocate page in flow %d\n",
+ id);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, page, id);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit page to rx channel flow %d, error %d\n",
+ id, ret);
+ am65_cpsw_put_page(flow, page, false);
+ goto err;
+ }
+ }
+
+ napi_enable(&flow->napi_rx);
+ return 0;
+
+err:
+ am65_cpsw_destroy_rxq(common, id);
+ return ret;
+}
+
+static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
for (id = 0; id < common->rx_ch_num_flows; id++) {
- flow = &rx_chn->flows[id];
- pp_params.napi = &flow->napi_rx;
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool)) {
- ret = PTR_ERR(pool);
+ ret = am65_cpsw_create_rxq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create rxq %d: %d\n",
+ id, ret);
goto err;
}
+ }
- flow->page_pool = pool;
+ ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ if (ret) {
+ dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
+ goto err;
+ }
- /* using same page pool is allowed as no running rx handlers
- * simultaneously for both ndevs
- */
- for (port = 0; port < common->port_num; port++) {
- if (!common->ports[port].ndev)
- continue;
+ return 0;
+
+err:
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
+
+ return ret;
+}
+
+static void am65_cpsw_destroy_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
+
+ napi_disable(&tx_chn->napi_tx);
+ hrtimer_cancel(&tx_chn->tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(tx_chn->tx_chn, tx_chn,
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chn->tx_chn);
+}
+
+static void am65_cpsw_destroy_txqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
+ int id;
+
+ /* shutdown tx channels */
+ atomic_set(&common->tdown_cnt, common->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ reinit_completion(&common->tdown_complete);
- rxq = &common->ports[port].xdp_rxq[id];
+ for (id = 0; id < common->tx_ch_num; id++)
+ k3_udma_glue_tdown_tx_chn(tx_chn[id].tx_chn, false);
- ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
- id, flow->napi_rx.napi_id);
- if (ret)
- goto err;
+ id = wait_for_completion_timeout(&common->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "tx teardown timeout\n");
- ret = xdp_rxq_info_reg_mem_model(rxq,
- MEM_TYPE_PAGE_POOL,
- pool);
- if (ret)
- goto err;
+ for (id = common->tx_ch_num - 1; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+}
+
+static int am65_cpsw_create_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
+ int ret;
+
+ ret = k3_udma_glue_enable_tx_chn(tx_chn->tx_chn);
+ if (ret)
+ return ret;
+
+ napi_enable(&tx_chn->napi_tx);
+
+ return 0;
+}
+
+static int am65_cpsw_create_txqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
+
+ for (id = 0; id < common->tx_ch_num; id++) {
+ ret = am65_cpsw_create_txq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create txq %d: %d\n",
+ id, ret);
+ goto err;
}
}
return 0;
err:
- am65_cpsw_destroy_xdp_rxqs(common);
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+
return ret;
}
@@ -642,7 +787,6 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
}
@@ -717,12 +861,8 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
- struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int port_idx, i, ret, tx, flow_idx;
- struct am65_cpsw_rx_flow *flow;
u32 val, port_mask;
- struct page *page;
+ int port_idx, ret;
if (common->usage_count)
return 0;
@@ -762,7 +902,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -782,151 +922,38 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
am65_cpsw_qos_tx_p0_rate_init(common);
- ret = am65_cpsw_create_xdp_rxqs(common);
- if (ret) {
- dev_err(common->dev, "Failed to create XDP rx queues\n");
+ ret = am65_cpsw_create_rxqs(common);
+ if (ret)
return ret;
- }
-
- for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
- flow = &rx_chn->flows[flow_idx];
- for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
- page = page_pool_dev_alloc_pages(flow->page_pool);
- if (!page) {
- dev_err(common->dev, "cannot allocate page in flow %d\n",
- flow_idx);
- ret = -ENOMEM;
- goto fail_rx;
- }
-
- ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
- if (ret < 0) {
- dev_err(common->dev,
- "cannot submit page to rx channel flow %d, error %d\n",
- flow_idx, ret);
- am65_cpsw_put_page(flow, page, false);
- goto fail_rx;
- }
- }
- }
-
- ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
- goto fail_rx;
- }
- for (i = 0; i < common->rx_ch_num_flows ; i++) {
- napi_enable(&rx_chn->flows[i].napi_rx);
- if (rx_chn->flows[i].irq_disabled) {
- rx_chn->flows[i].irq_disabled = false;
- enable_irq(rx_chn->flows[i].irq);
- }
- }
-
- for (tx = 0; tx < common->tx_ch_num; tx++) {
- ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
- tx, ret);
- tx--;
- goto fail_tx;
- }
- napi_enable(&tx_chn[tx].napi_tx);
- }
+ ret = am65_cpsw_create_txqs(common);
+ if (ret)
+ goto cleanup_rx;
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
-fail_tx:
- while (tx >= 0) {
- napi_disable(&tx_chn[tx].napi_tx);
- k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
- tx--;
- }
-
- for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
- flow = &rx_chn->flows[flow_idx];
- if (!flow->irq_disabled) {
- disable_irq(flow->irq);
- flow->irq_disabled = true;
- }
- napi_disable(&flow->napi_rx);
- }
-
- k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
-
-fail_rx:
- for (i = 0; i < common->rx_ch_num_flows; i++)
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
-
- am65_cpsw_destroy_xdp_rxqs(common);
+cleanup_rx:
+ am65_cpsw_destroy_rxqs(common);
return ret;
}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
- struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int i;
-
if (common->usage_count != 1)
return 0;
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
- /* shutdown tx channels */
- atomic_set(&common->tdown_cnt, common->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- reinit_completion(&common->tdown_complete);
-
- for (i = 0; i < common->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
-
- i = wait_for_completion_timeout(&common->tdown_complete,
- msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "tx timeout\n");
- for (i = 0; i < common->tx_ch_num; i++) {
- napi_disable(&tx_chn[i].napi_tx);
- hrtimer_cancel(&tx_chn[i].tx_hrtimer);
- }
-
- for (i = 0; i < common->tx_ch_num; i++) {
- k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
- am65_cpsw_nuss_tx_cleanup);
- k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
- }
-
- reinit_completion(&common->tdown_complete);
- k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
-
- if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
- i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "rx teardown timeout\n");
- }
-
- for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
- napi_disable(&rx_chn->flows[i].napi_rx);
- hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
- }
-
- k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
-
+ am65_cpsw_destroy_txqs(common);
+ am65_cpsw_destroy_rxqs(common);
cpsw_ale_stop(common->ale);
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
- am65_cpsw_destroy_xdp_rxqs(common);
-
dev_dbg(common->dev, "cpsw_nuss stopped\n");
return 0;
}
@@ -1014,6 +1041,15 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
common->usage_count++;
+ /* VLAN aware CPSW mode is incompatible with some DSA tagging schemes.
+ * Therefore disable VLAN_AWARE mode if any of the ports is a DSA Port.
+ */
+ if (netdev_uses_dsa(ndev)) {
+ reg = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+ reg &= ~AM65_CPSW_CTL_VLAN_AWARE;
+ writel(reg, common->cpsw_base + AM65_CPSW_REG_CTL);
+ }
+
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
am65_cpsw_port_enable_dscp_map(port);
@@ -2242,13 +2278,11 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
struct device *dev = common->dev;
int i;
- devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
-
common->tx_ch_rate_msk = 0;
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- if (tx_chn->irq)
+ if (tx_chn->irq > 0)
devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx);
@@ -2265,8 +2299,6 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
- am65_cpsw_nuss_tx_poll);
hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
@@ -2279,9 +2311,21 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
tx_chn->id, tx_chn->irq, ret);
goto err;
}
+
+ netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll);
}
+ return 0;
+
err:
+ for (--i ; i >= 0 ; i--) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ netif_napi_del(&tx_chn->napi_tx);
+ devm_free_irq(dev, tx_chn->irq, tx_chn);
+ }
+
return ret;
}
@@ -2362,12 +2406,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
goto err;
}
+ return 0;
+
err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
- return i;
- }
+ am65_cpsw_nuss_free_tx_chns(common);
return ret;
}
@@ -2395,7 +2437,6 @@ static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
rx_chn = &common->rx_chns;
flows = rx_chn->flows;
- devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
for (i = 0; i < common->rx_ch_num_flows; i++) {
if (!(flows[i].irq < 0))
@@ -2494,7 +2535,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
i, &rx_flow_cfg);
if (ret) {
dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
- goto err;
+ goto err_flow;
}
if (!i)
fdqring_id =
@@ -2506,14 +2547,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "Failed to get rx dma irq %d\n",
flow->irq);
ret = flow->irq;
- goto err;
+ goto err_flow;
}
snprintf(flow->name,
sizeof(flow->name), "%s-rx%d",
dev_name(dev), i);
- netif_napi_add(common->dma_ndev, &flow->napi_rx,
- am65_cpsw_nuss_rx_poll);
hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
@@ -2526,20 +2565,28 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
i, flow->irq, ret);
flow->irq = -EINVAL;
- goto err;
+ goto err_flow;
}
+
+ netif_napi_add(common->dma_ndev, &flow->napi_rx,
+ am65_cpsw_nuss_rx_poll);
}
/* setup classifier to route priorities to flows */
cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
-err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
- return i;
+ return 0;
+
+err_flow:
+ for (--i; i >= 0 ; i--) {
+ flow = &rx_chn->flows[i];
+ netif_napi_del(&flow->napi_rx);
+ devm_free_irq(dev, flow->irq, flow);
}
+err:
+ am65_cpsw_nuss_free_rx_chns(common);
+
return ret;
}
@@ -2559,20 +2606,15 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
{
u32 mac_lo, mac_hi, offset;
struct regmap *syscon;
- int ret;
- syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
+ syscon = syscon_regmap_lookup_by_phandle_args(of_node, "ti,syscon-efuse",
+ 1, &offset);
if (IS_ERR(syscon)) {
if (PTR_ERR(syscon) == -ENODEV)
return 0;
return PTR_ERR(syscon);
}
- ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
- &offset);
- if (ret)
- return ret;
-
regmap_read(syscon, offset, &mac_lo);
regmap_read(syscon, offset + 4, &mac_hi);
@@ -3349,7 +3391,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
if (ret)
- return ret;
+ goto err_remove_tx;
/* The DMA Channels are not guaranteed to be in a clean state.
* Reset and disable them to ensure that they are back to the
@@ -3370,7 +3412,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
- return ret;
+ goto err_remove_rx;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
@@ -3401,6 +3443,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
+err_remove_rx:
+ am65_cpsw_nuss_remove_rx_chns(common);
+err_remove_tx:
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
@@ -3420,6 +3466,8 @@ int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
@@ -3678,6 +3726,8 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
+ am65_cpsw_nuss_remove_rx_chns(common);
+ am65_cpsw_nuss_remove_tx_chns(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
am65_cpsw_disable_serdes_phy(common);
@@ -3739,8 +3789,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
if (ret)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
- if (ret)
+ if (ret) {
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
+ }
/* If RX IRQ was disabled before suspend, keep it disabled */
for (i = 0; i < common->rx_ch_num_flows; i++) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4ef8cf6ea135..0cb6fa6e5b7d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -635,6 +635,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -684,7 +686,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -1225,7 +1227,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 52e4e350b734..5cc72a91f220 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -127,15 +127,15 @@ struct cpsw_ale_dev_id {
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
- int idx, idx2;
+ int idx, idx2, index;
u32 hi_val = 0;
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be fetched exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ index = 2 - idx2; /* flip */
+ hi_val = ale_entry[index] << ((idx2 * 32) - start);
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -145,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
- int idx, idx2;
+ int idx, idx2, index;
value &= BITMASK(bits);
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be set exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
- ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ index = 2 - idx2; /* flip */
+ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[index] |= (value >> ((idx2 * 32) - start));
}
start -= idx * 32;
idx = 2 - idx; /* flip */
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 21d55a180ef6..bdc4db0d169c 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -434,18 +434,6 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
int cpsw_nway_reset(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index a98bcc5eb566..cec0a90659d9 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -554,7 +554,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -778,6 +778,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -1209,7 +1211,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 1f448290b9f4..f2fc55d9295d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -497,7 +497,6 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index d76fe6d05e10..00ed97860547 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -561,61 +561,134 @@ const struct icss_iep_clockops prueth_iep_clockops = {
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ int port_mask;
+ u8 vlan_id;
- port_mask |= icssg_fdb_lookup(emac, addr, 0);
- icssg_fdb_add_del(emac, addr, 0, port_mask, true);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, true);
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
+
+ port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
return 0;
}
static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
int other_port_mask;
+ int port_mask;
+ u8 vlan_id;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
- other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0);
+ port_mask = BIT(emac->port_id);
+ other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
- icssg_fdb_add_del(emac, addr, 0, port_mask, false);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, false);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
if (other_port_mask) {
- icssg_fdb_add_del(emac, addr, 0, other_port_mask, true);
- icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true);
+ icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, other_port_mask,
+ other_port_mask, true);
}
return 0;
}
-static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
+ const u8 *addr, u8 vid, bool add)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
-
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
+ icssg_fdb_add_del(emac, addr, vid,
ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, true);
+ ICSSG_FDB_ENTRY_BLOCK, add);
+
+ if (add)
+ icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
+ BIT(emac->port_id), add);
+}
+
+static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+ if (!emac)
+ return -EINVAL;
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ true);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
+ }
- icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
- BIT(emac->port_id), true);
return 0;
}
static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+ if (!emac)
+ return -EINVAL;
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ false);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
+ }
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, false);
+ return 0;
+}
+
+static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
+ void *args)
+{
+ struct prueth_emac *emac = args;
+
+ if (!vdev || !vid)
+ return 0;
+
+ netif_addr_lock_bh(vdev);
+ __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
+ vdev->addr_len);
+ netif_addr_unlock_bh(vdev);
+
+ if (emac->prueth->is_hsr_offload_mode)
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_hsr_add_mcast,
+ icssg_prueth_hsr_del_mcast);
+ else
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_add_mcast,
+ icssg_prueth_del_mcast);
return 0;
}
@@ -857,12 +930,22 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
return;
}
- if (emac->prueth->is_hsr_offload_mode)
+ if (emac->prueth->is_hsr_offload_mode) {
__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
icssg_prueth_hsr_del_mcast);
- else
+ if (rtnl_trylock()) {
+ vlan_for_each(emac->prueth->hsr_dev,
+ icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ } else {
__dev_mc_sync(ndev, icssg_prueth_add_mcast,
icssg_prueth_del_mcast);
+ if (rtnl_trylock()) {
+ vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ }
}
/**
@@ -907,19 +990,19 @@ static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
int untag_mask = 0;
- int port_mask;
- if (prueth->is_hsr_offload_mode) {
- port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id);
- untag_mask = 0;
+ if (prueth->is_hsr_offload_mode)
+ port_mask |= BIT(PRUETH_PORT_HOST);
- netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
- vid, port_mask, untag_mask);
+ __hw_addr_init(&emac->vlan_mcast_list[vid]);
+ netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
+ icssg_set_pvid(emac->prueth, vid, emac->port_id);
- icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
- icssg_set_pvid(emac->prueth, vid, emac->port_id);
- }
return 0;
}
@@ -928,18 +1011,16 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
int untag_mask = 0;
- int port_mask;
- if (prueth->is_hsr_offload_mode) {
+ if (prueth->is_hsr_offload_mode)
port_mask = BIT(PRUETH_PORT_HOST);
- untag_mask = 0;
- netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
- vid, port_mask, untag_mask);
+ netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
- icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
- }
return 0;
}
@@ -1254,7 +1335,7 @@ static int prueth_netdevice_port_link(struct net_device *ndev,
if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
prueth->br_members & BIT(PRUETH_PORT_MII1)) {
prueth->is_switch_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
emac->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
}
@@ -1312,7 +1393,7 @@ static int prueth_hsr_port_link(struct net_device *ndev)
NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
return -EOPNOTSUPP;
prueth->is_hsr_offload_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
emac0->port_vlan = prueth->default_vlan;
emac1->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 5473315ea204..329b46e9ee53 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -83,6 +83,12 @@
#define ICSS_CMD_ADD_FILTER 0x7
#define ICSS_CMD_ADD_MAC 0x8
+/* VLAN Filtering Related MACROs */
+#define PRUETH_DFLT_VLAN_HSR 1
+#define PRUETH_DFLT_VLAN_SW 1
+#define PRUETH_DFLT_VLAN_MAC 0
+#define MAX_VLAN_ID 256
+
/* In switch mode there are 3 real ports i.e. 3 mac addrs.
* however Linux sees only the host side port. The other 2 ports
* are the switch ports.
@@ -200,6 +206,8 @@ struct prueth_emac {
/* RX IRQ Coalescing Related */
struct hrtimer rx_hrtimer;
unsigned long rx_pace_timeout_ns;
+
+ struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
};
/**
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 3dc86397c367..64a19ff39562 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -1031,8 +1031,6 @@ static int prueth_probe(struct platform_device *pdev)
(unsigned long)prueth->msmcram.va);
prueth->msmcram.size = msmc_ram_size;
memset_io(prueth->msmcram.va, 0, msmc_ram_size);
- dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
- prueth->msmcram.va, prueth->msmcram.size);
prueth->iep0 = icss_iep_get_idx(np, 0);
if (IS_ERR(prueth->iep0)) {
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index dd4a07c97eee..5aa93144a4f5 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2320,7 +2320,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
if (ret < 0)
goto out_free_tmp_vptr_1;
- napi_disable(&vptr->napi);
+ netdev_lock(dev);
+ napi_disable_locked(&vptr->napi);
spin_lock_irqsave(&vptr->lock, flags);
@@ -2342,12 +2343,13 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
velocity_give_many_rx_descs(vptr);
- napi_enable(&vptr->napi);
+ napi_enable_locked(&vptr->napi);
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
spin_unlock_irqrestore(&vptr->lock, flags);
+ netdev_unlock(dev);
velocity_free_rings(tmp_vptr);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index d64b8abcf018..a3f4f3e42587 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -120,6 +120,9 @@
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+/* Constant to convert delay counts to microseconds */
+#define XAXIDMA_DELAY_SCALE (125ULL * USEC_PER_SEC)
+
/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_USEC 50
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0f4b02fe6f85..9e7fa012e4fa 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -238,11 +238,8 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
/* 1 Timeout Interval = 125 * (clock period of SG clock) */
result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
- (u64)125000000);
- if (result > 255)
- result = 255;
-
- return result;
+ XAXIDMA_DELAY_SCALE);
+ return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK));
}
/**
@@ -2056,14 +2053,31 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EBUSY;
}
- if (ecoalesce->rx_max_coalesced_frames)
- lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
- if (ecoalesce->rx_coalesce_usecs)
- lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
- if (ecoalesce->tx_max_coalesced_frames)
- lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
- if (ecoalesce->tx_coalesce_usecs)
- lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
+ if (ecoalesce->rx_max_coalesced_frames > 255 ||
+ ecoalesce->tx_max_coalesced_frames > 255) {
+ NL_SET_ERR_MSG(extack, "frames must be less than 256");
+ return -EINVAL;
+ }
+
+ if (!ecoalesce->rx_max_coalesced_frames ||
+ !ecoalesce->tx_max_coalesced_frames) {
+ NL_SET_ERR_MSG(extack, "frames must be non-zero");
+ return -EINVAL;
+ }
+
+ if ((ecoalesce->rx_max_coalesced_frames > 1 &&
+ !ecoalesce->rx_coalesce_usecs) ||
+ (ecoalesce->tx_max_coalesced_frames > 1 &&
+ !ecoalesce->tx_coalesce_usecs)) {
+ NL_SET_ERR_MSG(extack,
+ "usecs must be non-zero when frames is greater than one");
+ return -EINVAL;
+ }
+
+ lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
+ lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+ lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
return 0;
}
@@ -2331,11 +2345,12 @@ static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
}
static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_get_state(pcs_phy, state);
+ phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
}
static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index bc658bc60885..642155cb8315 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -235,7 +235,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
vni_to_tunnel_id(gnvh->vni),
gnvh->opt_len * 4);
if (!tun_dst) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
/* Update tunnel dst according to Geneve options. */
@@ -322,7 +322,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
len = skb->len;
err = gro_cells_receive(&geneve->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(geneve->dev, len);
+ dev_dstats_rx_add(geneve->dev, len);
return;
drop:
@@ -387,14 +387,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely((!geneve->cfg.inner_proto_inherit &&
inner_proto != htons(ETH_P_TEB)))) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
opts_len = geneveh->opt_len * 4;
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
!net_eq(geneve->net, dev_net(geneve->dev)))) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
@@ -1023,7 +1023,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
dev_kfree_skb(skb);
- DEV_STATS_INC(dev, tx_dropped);
+ dev_dstats_tx_dropped(dev);
return NETDEV_TX_OK;
}
} else {
@@ -1202,7 +1202,7 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
/* MTU range: 68 - (something less than 65535) */
dev->min_mtu = ETH_MIN_MTU;
/* The max_mtu calculation does not take account of GENEVE
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 89a996ad8cd0..d64740bf44ed 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -23,6 +23,8 @@
#include <net/net_namespace.h>
#include <net/protocol.h>
+#include <net/inet_dscp.h>
+#include <net/inet_sock.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/udp.h>
@@ -350,7 +352,7 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
fl4->saddr = saddr;
- fl4->flowi4_tos = ip_sock_rt_tos(sk);
+ fl4->flowi4_tos = inet_dscp_to_dsfield(inet_sk_dscp(inet_sk(sk)));
fl4->flowi4_scope = ip_sock_rt_scope(sk);
fl4->flowi4_proto = sk->sk_protocol;
@@ -1524,8 +1526,8 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
goto out_encap;
}
- gn = net_generic(dev_net(dev), gtp_net_id);
- list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+ gn = net_generic(src_net, gtp_net_id);
+ list_add(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
@@ -1551,7 +1553,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
- list_del_rcu(&gtp->list);
+ list_del(&gtp->list);
unregister_netdevice_queue(dev, head);
}
@@ -2271,16 +2273,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
struct pdp_ctx *pctx;
- struct gtp_net *gn;
-
- gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
rcu_read_lock();
- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ for_each_netdev_rcu(net, dev) {
+ if (dev->rtnl_link_ops != &gtp_link_ops)
+ continue;
+
+ gtp = netdev_priv(dev);
+
if (last_gtp && last_gtp != gtp)
continue;
else
@@ -2475,9 +2480,14 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
list_for_each_entry(net, net_list, exit_list) {
struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
+ struct gtp_dev *gtp, *gtp_next;
+ struct net_device *dev;
+
+ for_each_netdev(net, dev)
+ if (dev->rtnl_link_ops == &gtp_link_ops)
+ gtp_dellink(dev, dev_to_kill);
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
}
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e690b95b1bbb..234db693cefa 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -464,7 +464,7 @@ struct nvsp_1_message_send_receive_buffer_complete {
* LargeOffset SmallOffset
*/
- struct nvsp_1_receive_buffer_section sections[1];
+ struct nvsp_1_receive_buffer_section sections[];
} __packed;
/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9afb08dbc350..d6f5b9ea3109 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -866,7 +866,8 @@ static void netvsc_send_completion(struct net_device *ndev,
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
- sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
+ struct_size_t(struct nvsp_1_message_send_receive_buffer_complete,
+ sections, 1)) {
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
msglen);
return;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index ee2c3cf4df36..da3a97a65507 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -799,6 +799,12 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlying device to change its type. */
return NOTIFY_BAD;
+
+ case NETDEV_NOTIFY_PEERS:
+ case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ call_netdevice_notifiers(event, ipvlan->dev);
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1993b90b1a5f..c8840c3b9a1b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -264,13 +264,12 @@ static int __init blackhole_netdev_init(void)
if (!blackhole_netdev)
return -ENOMEM;
- rtnl_lock();
+ rtnl_net_lock(&init_net);
dev_init_scheduler(blackhole_netdev);
dev_activate(blackhole_netdev);
- rtnl_unlock();
+ rtnl_net_unlock(&init_net);
blackhole_netdev->flags |= IFF_UP | IFF_RUNNING;
- dev_net_set(blackhole_netdev, &init_net);
return 0;
}
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index d2b3f5a59141..e3dcdeacc12c 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -177,8 +177,7 @@ static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client)
return mcli;
err:
if (mcli) {
- if (mcli->client)
- i2c_unregister_device(mcli->client);
+ i2c_unregister_device(mcli->client);
kfree(mcli);
}
return ERR_PTR(rc);
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 2beb83154d39..cb53dccbde1a 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -17,37 +17,20 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
{
struct cavium_mdiobus *bus;
struct mii_bus *mii_bus;
- struct resource *res_mem;
- resource_size_t mdio_phys;
- resource_size_t regsize;
union cvmx_smix_en smi_en;
- int err = -ENOENT;
+ int err;
mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus));
if (!mii_bus)
return -ENOMEM;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res_mem == NULL) {
- dev_err(&pdev->dev, "found no memory resource\n");
- return -ENXIO;
- }
-
bus = mii_bus->priv;
bus->mii_bus = mii_bus;
- mdio_phys = res_mem->start;
- regsize = resource_size(res_mem);
- if (!devm_request_mem_region(&pdev->dev, mdio_phys, regsize,
- res_mem->name)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- return -ENXIO;
- }
-
- bus->register_base = devm_ioremap(&pdev->dev, mdio_phys, regsize);
- if (!bus->register_base) {
+ bus->register_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bus->register_base)) {
dev_err(&pdev->dev, "dev_ioremap failed\n");
- return -ENOMEM;
+ return PTR_ERR(bus->register_base);
}
smi_en.u64 = 0;
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 22680f47385d..37bc3131d31a 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -213,6 +213,9 @@ void mii_ethtool_get_link_ksettings(struct mii_if_info *mii,
lp_advertising = 0;
}
+ if (!(bmsr & BMSR_LSTATUS))
+ cmd->base.speed = SPEED_UNKNOWN;
+
mii->full_duplex = cmd->base.duplex;
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4ea44a2f48f7..86ab4a42769a 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -36,6 +36,7 @@
#include <linux/inet.h>
#include <linux/configfs.h>
#include <linux/etherdevice.h>
+#include <linux/u64_stats_sync.h>
#include <linux/utsname.h>
#include <linux/rtnetlink.h>
@@ -90,6 +91,12 @@ static DEFINE_MUTEX(target_cleanup_list_lock);
*/
static struct console netconsole_ext;
+struct netconsole_target_stats {
+ u64_stats_t xmit_drop_count;
+ u64_stats_t enomem_count;
+ struct u64_stats_sync syncp;
+};
+
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
@@ -97,6 +104,7 @@ static struct console netconsole_ext;
* @userdata_group: Links to the userdata configfs hierarchy
* @userdata_complete: Cached, formatted string of append
* @userdata_length: String length of userdata_complete
+ * @stats: Packet send stats for the target. Used for debugging.
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
* We maintain a strict 1:1 correspondence between this and
@@ -124,6 +132,7 @@ struct netconsole_target {
char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
size_t userdata_length;
#endif
+ struct netconsole_target_stats stats;
bool enabled;
bool extended;
bool release;
@@ -262,6 +271,7 @@ static void netconsole_process_cleanups_core(void)
* | remote_ip
* | local_mac
* | remote_mac
+ * | transmit_errors
* | userdata/
* | <key>/
* | value
@@ -371,6 +381,21 @@ static ssize_t remote_mac_show(struct config_item *item, char *buf)
return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac);
}
+static ssize_t transmit_errors_show(struct config_item *item, char *buf)
+{
+ struct netconsole_target *nt = to_target(item);
+ u64 xmit_drop_count, enomem_count;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&nt->stats.syncp);
+ xmit_drop_count = u64_stats_read(&nt->stats.xmit_drop_count);
+ enomem_count = u64_stats_read(&nt->stats.enomem_count);
+ } while (u64_stats_fetch_retry(&nt->stats.syncp, start));
+
+ return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count);
+}
+
/*
* This one is special -- targets created through the configfs interface
* are not enabled (and the corresponding netpoll activated) by default.
@@ -705,7 +730,7 @@ static void update_userdata(struct netconsole_target *nt)
struct userdatum *udm_item;
struct config_item *item;
- if (child_count >= MAX_USERDATA_ITEMS)
+ if (WARN_ON_ONCE(child_count >= MAX_USERDATA_ITEMS))
break;
child_count++;
@@ -842,6 +867,7 @@ CONFIGFS_ATTR(, remote_ip);
CONFIGFS_ATTR_RO(, local_mac);
CONFIGFS_ATTR(, remote_mac);
CONFIGFS_ATTR(, release);
+CONFIGFS_ATTR_RO(, transmit_errors);
static struct configfs_attribute *netconsole_target_attrs[] = {
&attr_enabled,
@@ -854,6 +880,7 @@ static struct configfs_attribute *netconsole_target_attrs[] = {
&attr_remote_ip,
&attr_local_mac,
&attr_remote_mac,
+ &attr_transmit_errors,
NULL,
};
@@ -1058,6 +1085,33 @@ static struct notifier_block netconsole_netdev_notifier = {
.notifier_call = netconsole_netdev_event,
};
+/**
+ * send_udp - Wrapper for netpoll_send_udp that counts errors
+ * @nt: target to send message to
+ * @msg: message to send
+ * @len: length of message
+ *
+ * Calls netpoll_send_udp and classifies the return value. If an error
+ * occurred it increments statistics in nt->stats accordingly.
+ * Only calls netpoll_send_udp if CONFIG_NETCONSOLE_DYNAMIC is disabled.
+ */
+static void send_udp(struct netconsole_target *nt, const char *msg, int len)
+{
+ int result = netpoll_send_udp(&nt->np, msg, len);
+
+ if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) {
+ if (result == NET_XMIT_DROP) {
+ u64_stats_update_begin(&nt->stats.syncp);
+ u64_stats_inc(&nt->stats.xmit_drop_count);
+ u64_stats_update_end(&nt->stats.syncp);
+ } else if (result == -ENOMEM) {
+ u64_stats_update_begin(&nt->stats.syncp);
+ u64_stats_inc(&nt->stats.enomem_count);
+ u64_stats_update_end(&nt->stats.syncp);
+ }
+ }
+}
+
static void send_msg_no_fragmentation(struct netconsole_target *nt,
const char *msg,
int msg_len,
@@ -1085,7 +1139,7 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt,
MAX_PRINT_CHUNK - msg_len,
"%s", userdata);
- netpoll_send_udp(&nt->np, buf, msg_len);
+ send_udp(nt, buf, msg_len);
}
static void append_release(char *buf)
@@ -1178,7 +1232,7 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf,
this_offset += this_chunk;
}
- netpoll_send_udp(&nt->np, buf, this_header + this_offset);
+ send_udp(nt, buf, this_header + this_offset);
offset += this_offset;
}
}
@@ -1288,7 +1342,7 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
tmp = msg;
for (left = len; left;) {
frag = min(left, MAX_PRINT_CHUNK);
- netpoll_send_udp(&nt->np, tmp, frag);
+ send_udp(nt, tmp, frag);
tmp += frag;
left -= frag;
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 5fe1eaef99b5..3b23f3d3ca2b 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -2,8 +2,8 @@
// Copyright (c) 2020 Facebook
#include <linux/debugfs.h>
-#include <linux/ethtool.h>
#include <linux/random.h>
+#include <net/netdev_queues.h>
#include "netdevsim.h"
@@ -72,6 +72,10 @@ static void nsim_get_ringparam(struct net_device *dev,
struct netdevsim *ns = netdev_priv(dev);
memcpy(ring, &ns->ethtool.ring, sizeof(ns->ethtool.ring));
+ kernel_ring->hds_thresh_max = NSIM_HDS_THRESHOLD_MAX;
+
+ if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
}
static int nsim_set_ringparam(struct net_device *dev,
@@ -103,10 +107,10 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct netdevsim *ns = netdev_priv(dev);
int err;
- mutex_lock(&dev->lock);
+ netdev_lock(dev);
err = netif_set_real_num_queues(dev, ch->combined_count,
ch->combined_count);
- mutex_unlock(&dev->lock);
+ netdev_unlock(dev);
if (err)
return err;
@@ -161,6 +165,8 @@ static int nsim_get_ts_info(struct net_device *dev,
static const struct ethtool_ops nsim_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_pause_stats = nsim_get_pause_stats,
.get_pauseparam = nsim_get_pauseparam,
.set_pauseparam = nsim_set_pauseparam,
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e068a9761c09..42f247cbdcee 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -15,11 +15,13 @@
#include <linux/debugfs.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
#include <net/netlink.h>
#include <net/net_shaper.h>
@@ -29,6 +31,8 @@
#include "netdevsim.h"
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
+
#define NSIM_RING_SIZE 256
static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
@@ -54,6 +58,7 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *peer_dev;
unsigned int len = skb->len;
struct netdevsim *peer_ns;
+ struct netdev_config *cfg;
struct nsim_rq *rq;
int rxq;
@@ -69,7 +74,14 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq >= peer_dev->num_rx_queues)
rxq = rxq % peer_dev->num_rx_queues;
- rq = &peer_ns->rq[rxq];
+ rq = peer_ns->rq[rxq];
+
+ cfg = peer_dev->cfg;
+ if (skb_is_nonlinear(skb) &&
+ (cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED ||
+ (cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ cfg->hds_thresh > len)))
+ skb_linearize(skb);
skb_tx_timestamp(skb);
if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
@@ -359,25 +371,24 @@ static int nsim_poll(struct napi_struct *napi, int budget)
return done;
}
-static int nsim_create_page_pool(struct nsim_rq *rq)
+static int nsim_create_page_pool(struct page_pool **p, struct napi_struct *napi)
{
- struct page_pool_params p = {
+ struct page_pool_params params = {
.order = 0,
.pool_size = NSIM_RING_SIZE,
.nid = NUMA_NO_NODE,
- .dev = &rq->napi.dev->dev,
- .napi = &rq->napi,
+ .dev = &napi->dev->dev,
+ .napi = napi,
.dma_dir = DMA_BIDIRECTIONAL,
- .netdev = rq->napi.dev,
+ .netdev = napi->dev,
};
+ struct page_pool *pool;
- rq->page_pool = page_pool_create(&p);
- if (IS_ERR(rq->page_pool)) {
- int err = PTR_ERR(rq->page_pool);
+ pool = page_pool_create(&params);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
- rq->page_pool = NULL;
- return err;
- }
+ *p = pool;
return 0;
}
@@ -388,15 +399,15 @@ static int nsim_init_napi(struct netdevsim *ns)
int err, i;
for (i = 0; i < dev->num_rx_queues; i++) {
- rq = &ns->rq[i];
+ rq = ns->rq[i];
- netif_napi_add(dev, &rq->napi, nsim_poll);
+ netif_napi_add_config(dev, &rq->napi, nsim_poll, i);
}
for (i = 0; i < dev->num_rx_queues; i++) {
- rq = &ns->rq[i];
+ rq = ns->rq[i];
- err = nsim_create_page_pool(rq);
+ err = nsim_create_page_pool(&rq->page_pool, &rq->napi);
if (err)
goto err_pp_destroy;
}
@@ -405,12 +416,12 @@ static int nsim_init_napi(struct netdevsim *ns)
err_pp_destroy:
while (i--) {
- page_pool_destroy(ns->rq[i].page_pool);
- ns->rq[i].page_pool = NULL;
+ page_pool_destroy(ns->rq[i]->page_pool);
+ ns->rq[i]->page_pool = NULL;
}
for (i = 0; i < dev->num_rx_queues; i++)
- __netif_napi_del(&ns->rq[i].napi);
+ __netif_napi_del(&ns->rq[i]->napi);
return err;
}
@@ -421,7 +432,7 @@ static void nsim_enable_napi(struct netdevsim *ns)
int i;
for (i = 0; i < dev->num_rx_queues; i++) {
- struct nsim_rq *rq = &ns->rq[i];
+ struct nsim_rq *rq = ns->rq[i];
netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
napi_enable(&rq->napi);
@@ -448,7 +459,7 @@ static void nsim_del_napi(struct netdevsim *ns)
int i;
for (i = 0; i < dev->num_rx_queues; i++) {
- struct nsim_rq *rq = &ns->rq[i];
+ struct nsim_rq *rq = ns->rq[i];
napi_disable(&rq->napi);
__netif_napi_del(&rq->napi);
@@ -456,8 +467,8 @@ static void nsim_del_napi(struct netdevsim *ns)
synchronize_net();
for (i = 0; i < dev->num_rx_queues; i++) {
- page_pool_destroy(ns->rq[i].page_pool);
- ns->rq[i].page_pool = NULL;
+ page_pool_destroy(ns->rq[i]->page_pool);
+ ns->rq[i]->page_pool = NULL;
}
}
@@ -595,6 +606,182 @@ static const struct netdev_stat_ops nsim_stat_ops = {
.get_base_stats = nsim_get_base_stats,
};
+static struct nsim_rq *nsim_queue_alloc(void)
+{
+ struct nsim_rq *rq;
+
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL_ACCOUNT);
+ if (!rq)
+ return NULL;
+
+ skb_queue_head_init(&rq->skb_queue);
+ return rq;
+}
+
+static void nsim_queue_free(struct nsim_rq *rq)
+{
+ skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
+ kfree(rq);
+}
+
+/* Queue reset mode is controlled by ns->rq_reset_mode.
+ * - normal - new NAPI new pool (old NAPI enabled when new added)
+ * - mode 1 - allocate new pool (NAPI is only disabled / enabled)
+ * - mode 2 - new NAPI new pool (old NAPI removed before new added)
+ * - mode 3 - new NAPI new pool (old NAPI disabled when new added)
+ */
+struct nsim_queue_mem {
+ struct nsim_rq *rq;
+ struct page_pool *pp;
+};
+
+static int
+nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ if (ns->rq_reset_mode > 3)
+ return -EINVAL;
+
+ if (ns->rq_reset_mode == 1)
+ return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi);
+
+ qmem->rq = nsim_queue_alloc();
+ if (!qmem->rq)
+ return -ENOMEM;
+
+ err = nsim_create_page_pool(&qmem->rq->page_pool, &qmem->rq->napi);
+ if (err)
+ goto err_free;
+
+ if (!ns->rq_reset_mode)
+ netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+
+ return 0;
+
+err_free:
+ nsim_queue_free(qmem->rq);
+ return err;
+}
+
+static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ page_pool_destroy(qmem->pp);
+ if (qmem->rq) {
+ if (!ns->rq_reset_mode)
+ netif_napi_del(&qmem->rq->napi);
+ page_pool_destroy(qmem->rq->page_pool);
+ nsim_queue_free(qmem->rq);
+ }
+}
+
+static int
+nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->rq_reset_mode == 1) {
+ ns->rq[idx]->page_pool = qmem->pp;
+ napi_enable(&ns->rq[idx]->napi);
+ return 0;
+ }
+
+ /* netif_napi_add()/_del() should normally be called from alloc/free,
+ * here we want to test various call orders.
+ */
+ if (ns->rq_reset_mode == 2) {
+ netif_napi_del(&ns->rq[idx]->napi);
+ netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ } else if (ns->rq_reset_mode == 3) {
+ netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ netif_napi_del(&ns->rq[idx]->napi);
+ }
+
+ ns->rq[idx] = qmem->rq;
+ napi_enable(&ns->rq[idx]->napi);
+
+ return 0;
+}
+
+static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ napi_disable(&ns->rq[idx]->napi);
+
+ if (ns->rq_reset_mode == 1) {
+ qmem->pp = ns->rq[idx]->page_pool;
+ page_pool_disable_direct_recycling(qmem->pp);
+ } else {
+ qmem->rq = ns->rq[idx];
+ }
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops nsim_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct nsim_queue_mem),
+ .ndo_queue_mem_alloc = nsim_queue_mem_alloc,
+ .ndo_queue_mem_free = nsim_queue_mem_free,
+ .ndo_queue_start = nsim_queue_start,
+ .ndo_queue_stop = nsim_queue_stop,
+};
+
+static ssize_t
+nsim_qreset_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ unsigned int queue, mode;
+ char buf[32];
+ ssize_t ret;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+ if (copy_from_user(buf, data, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ ret = sscanf(buf, "%u %u", &queue, &mode);
+ if (ret != 2)
+ return -EINVAL;
+
+ rtnl_lock();
+ if (!netif_running(ns->netdev)) {
+ ret = -ENETDOWN;
+ goto exit_unlock;
+ }
+
+ if (queue >= ns->netdev->real_num_rx_queues) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ ns->rq_reset_mode = mode;
+ ret = netdev_rx_queue_restart(ns->netdev, queue);
+ ns->rq_reset_mode = 0;
+ if (ret)
+ goto exit_unlock;
+
+ ret = count;
+exit_unlock:
+ rtnl_unlock();
+ return ret;
+}
+
+static const struct file_operations nsim_qreset_fops = {
+ .open = simple_open,
+ .write = nsim_qreset_write,
+ .owner = THIS_MODULE,
+};
+
static ssize_t
nsim_pp_hold_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
@@ -628,7 +815,7 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
if (!netif_running(ns->netdev) && val) {
ret = -ENETDOWN;
} else if (val) {
- ns->page = page_pool_dev_alloc_pages(ns->rq[0].page_pool);
+ ns->page = page_pool_dev_alloc_pages(ns->rq[0]->page_pool);
if (!ns->page)
ret = -ENOMEM;
} else {
@@ -677,27 +864,35 @@ static int nsim_queue_init(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
int i;
- ns->rq = kvcalloc(dev->num_rx_queues, sizeof(*ns->rq),
- GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ ns->rq = kcalloc(dev->num_rx_queues, sizeof(*ns->rq),
+ GFP_KERNEL_ACCOUNT);
if (!ns->rq)
return -ENOMEM;
- for (i = 0; i < dev->num_rx_queues; i++)
- skb_queue_head_init(&ns->rq[i].skb_queue);
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ ns->rq[i] = nsim_queue_alloc();
+ if (!ns->rq[i])
+ goto err_free_prev;
+ }
return 0;
+
+err_free_prev:
+ while (i--)
+ kfree(ns->rq[i]);
+ kfree(ns->rq);
+ return -ENOMEM;
}
-static void nsim_queue_free(struct netdevsim *ns)
+static void nsim_queue_uninit(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
int i;
for (i = 0; i < dev->num_rx_queues; i++)
- skb_queue_purge_reason(&ns->rq[i].skb_queue,
- SKB_DROP_REASON_QUEUE_PURGE);
+ nsim_queue_free(ns->rq[i]);
- kvfree(ns->rq);
+ kfree(ns->rq);
ns->rq = NULL;
}
@@ -713,6 +908,7 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
ns->phc = phc;
ns->netdev->netdev_ops = &nsim_netdev_ops;
ns->netdev->stat_ops = &nsim_stat_ops;
+ ns->netdev->queue_mgmt_ops = &nsim_queue_mgmt_ops;
err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
if (err)
@@ -741,7 +937,7 @@ err_ipsec_teardown:
nsim_macsec_teardown(ns);
nsim_bpf_uninit(ns);
err_rq_destroy:
- nsim_queue_free(ns);
+ nsim_queue_uninit(ns);
err_utn_destroy:
rtnl_unlock();
nsim_udp_tunnels_info_destroy(ns->netdev);
@@ -798,6 +994,9 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->pp_dfs = debugfs_create_file("pp_hold", 0600, nsim_dev_port->ddir,
ns, &nsim_pp_hold_fops);
+ ns->qr_dfs = debugfs_create_file("queue_reset", 0200,
+ nsim_dev_port->ddir, ns,
+ &nsim_qreset_fops);
return ns;
@@ -811,6 +1010,7 @@ void nsim_destroy(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
struct netdevsim *peer;
+ debugfs_remove(ns->qr_dfs);
debugfs_remove(ns->pp_dfs);
rtnl_lock();
@@ -823,7 +1023,7 @@ void nsim_destroy(struct netdevsim *ns)
nsim_macsec_teardown(ns);
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
- nsim_queue_free(ns);
+ nsim_queue_uninit(ns);
}
rtnl_unlock();
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index bf02efa10956..dcf073bc4802 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -16,6 +16,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
@@ -36,6 +37,8 @@
#define NSIM_IPSEC_VALID BIT(31)
#define NSIM_UDP_TUNNEL_N_PORTS 4
+#define NSIM_HDS_THRESHOLD_MAX 1024
+
struct nsim_sa {
struct xfrm_state *xs;
__be32 ipaddr[4];
@@ -101,7 +104,9 @@ struct netdevsim {
struct nsim_dev *nsim_dev;
struct nsim_dev_port *nsim_dev_port;
struct mock_phc *phc;
- struct nsim_rq *rq;
+ struct nsim_rq **rq;
+
+ int rq_reset_mode;
u64 tx_packets;
u64 tx_bytes;
@@ -134,6 +139,7 @@ struct netdevsim {
struct page *page;
struct dentry *pp_dfs;
+ struct dentry *qr_dfs;
struct nsim_ethtool ethtool;
struct netdevsim __rcu *peer;
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index c1d881dc6409..1e1b00756be7 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -338,6 +338,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
enum netkit_mode mode = NETKIT_L3;
unsigned char ifname_assign_type;
+ u16 headroom = 0, tailroom = 0;
struct ifinfomsg *ifmp = NULL;
struct net_device *peer;
char ifname[IFNAMSIZ];
@@ -371,6 +372,10 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
if (err < 0)
return err;
}
+ if (data[IFLA_NETKIT_HEADROOM])
+ headroom = nla_get_u16(data[IFLA_NETKIT_HEADROOM]);
+ if (data[IFLA_NETKIT_TAILROOM])
+ tailroom = nla_get_u16(data[IFLA_NETKIT_TAILROOM]);
}
if (ifmp && tbp[IFLA_IFNAME]) {
@@ -390,6 +395,14 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
return PTR_ERR(peer);
netif_inherit_tso_max(peer, dev);
+ if (headroom) {
+ peer->needed_headroom = headroom;
+ dev->needed_headroom = headroom;
+ }
+ if (tailroom) {
+ peer->needed_tailroom = tailroom;
+ dev->needed_tailroom = tailroom;
+ }
if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
eth_hw_addr_random(peer);
@@ -401,6 +414,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
nk->policy = policy_peer;
nk->scrub = scrub_peer;
nk->mode = mode;
+ nk->headroom = headroom;
bpf_mprog_bundle_init(&nk->bundle);
err = register_netdevice(peer);
@@ -426,6 +440,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
nk->policy = policy_prim;
nk->scrub = scrub_prim;
nk->mode = mode;
+ nk->headroom = headroom;
bpf_mprog_bundle_init(&nk->bundle);
err = register_netdevice(dev);
@@ -850,7 +865,18 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
struct net_device *peer = rtnl_dereference(nk->peer);
enum netkit_action policy;
struct nlattr *attr;
- int err;
+ int err, i;
+ static const struct {
+ u32 attr;
+ char *name;
+ } fixed_params[] = {
+ { IFLA_NETKIT_MODE, "operating mode" },
+ { IFLA_NETKIT_SCRUB, "scrubbing" },
+ { IFLA_NETKIT_PEER_SCRUB, "peer scrubbing" },
+ { IFLA_NETKIT_PEER_INFO, "peer info" },
+ { IFLA_NETKIT_HEADROOM, "headroom" },
+ { IFLA_NETKIT_TAILROOM, "tailroom" },
+ };
if (!nk->primary) {
NL_SET_ERR_MSG(extack,
@@ -858,28 +884,14 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
return -EACCES;
}
- if (data[IFLA_NETKIT_MODE]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_MODE],
- "netkit link operating mode cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_SCRUB]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_SCRUB],
- "netkit scrubbing cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_PEER_SCRUB]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_SCRUB],
- "netkit scrubbing cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_PEER_INFO]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
- "netkit peer info cannot be changed after device creation");
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(fixed_params); i++) {
+ attr = data[fixed_params[i].attr];
+ if (attr) {
+ NL_SET_ERR_MSG_ATTR_FMT(extack, attr,
+ "netkit link %s cannot be changed after device creation",
+ fixed_params[i].name);
+ return -EACCES;
+ }
}
if (data[IFLA_NETKIT_POLICY]) {
@@ -914,6 +926,8 @@ static size_t netkit_get_size(const struct net_device *dev)
nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */
nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */
nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */
+ nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_HEADROOM */
+ nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_TAILROOM */
0;
}
@@ -930,6 +944,10 @@ static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub))
return -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_NETKIT_HEADROOM, dev->needed_headroom))
+ return -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_NETKIT_TAILROOM, dev->needed_tailroom))
+ return -EMSGSIZE;
if (peer) {
nk = netkit_priv(peer);
@@ -947,6 +965,8 @@ static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
[IFLA_NETKIT_MODE] = NLA_POLICY_MAX(NLA_U32, NETKIT_L3),
[IFLA_NETKIT_POLICY] = { .type = NLA_U32 },
[IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 },
+ [IFLA_NETKIT_HEADROOM] = { .type = NLA_U16 },
+ [IFLA_NETKIT_TAILROOM] = { .type = NLA_U16 },
[IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
[IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
[IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT,
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index b79aedad855b..e46f588cae7d 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -35,6 +35,27 @@ enum sgmii_speed {
#define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs)
#define lynx_to_phylink_pcs(lynx) (&(lynx)->pcs)
+static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ case PHY_INTERFACE_MODE_USXGMII:
+ return LINK_INBAND_ENABLE;
+
+ default:
+ return 0;
+ }
+}
+
static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
struct phylink_link_state *state)
{
@@ -79,7 +100,7 @@ static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
state->duplex = DUPLEX_FULL;
}
-static void lynx_pcs_get_state(struct phylink_pcs *pcs,
+static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
@@ -88,7 +109,7 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs,
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- phylink_mii_c22_pcs_get_state(lynx->mdio, state);
+ phylink_mii_c22_pcs_get_state(lynx->mdio, neg_mode, state);
break;
case PHY_INTERFACE_MODE_2500BASEX:
lynx_pcs_get_state_2500basex(lynx->mdio, state);
@@ -306,15 +327,26 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
+ .pcs_inband_caps = lynx_pcs_inband_caps,
.pcs_get_state = lynx_pcs_get_state,
.pcs_config = lynx_pcs_config,
.pcs_an_restart = lynx_pcs_an_restart,
.pcs_link_up = lynx_pcs_link_up,
};
+static const phy_interface_t lynx_interfaces[] = {
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_USXGMII,
+};
+
static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
{
struct lynx_pcs *lynx;
+ int i;
lynx = kzalloc(sizeof(*lynx), GFP_KERNEL);
if (!lynx)
@@ -326,6 +358,9 @@ static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
lynx->pcs.neg_mode = true;
lynx->pcs.poll = true;
+ for (i = 0; i < ARRAY_SIZE(lynx_interfaces); i++)
+ __set_bit(lynx_interfaces[i], lynx->pcs.supported_interfaces);
+
return lynx_to_phylink_pcs(lynx);
}
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 4f63abe638c4..7d6261dee534 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -88,7 +88,24 @@ static struct mtk_pcs_lynxi *pcs_to_mtk_pcs_lynxi(struct phylink_pcs *pcs)
return container_of(pcs, struct mtk_pcs_lynxi, pcs);
}
+static unsigned int mtk_pcs_lynxi_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ default:
+ return 0;
+ }
+}
+
static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs);
@@ -98,7 +115,8 @@ static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm);
regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv);
- phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm),
+ phylink_mii_c22_pcs_decode_state(state, neg_mode,
+ FIELD_GET(SGMII_BMSR, bm),
FIELD_GET(SGMII_LPA, adv));
}
@@ -241,6 +259,7 @@ static void mtk_pcs_lynxi_disable(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = {
+ .pcs_inband_caps = mtk_pcs_lynxi_inband_caps,
.pcs_get_state = mtk_pcs_lynxi_get_state,
.pcs_config = mtk_pcs_lynxi_config,
.pcs_an_restart = mtk_pcs_lynxi_restart_an,
@@ -290,6 +309,10 @@ struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
mpcs->pcs.poll = true;
mpcs->interface = PHY_INTERFACE_MODE_NA;
+ __set_bit(PHY_INTERFACE_MODE_SGMII, mpcs->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, mpcs->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, mpcs->pcs.supported_interfaces);
+
return &mpcs->pcs;
}
EXPORT_SYMBOL(mtk_pcs_lynxi_create);
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 7246a910728d..1faa37f0e7b9 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -567,14 +567,40 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
return 0;
}
-void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
+static unsigned int xpcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+ const struct dw_xpcs_compat *compat;
+
+ compat = xpcs_find_compat(xpcs, interface);
+ if (!compat)
+ return 0;
+
+ switch (compat->an_mode) {
+ case DW_AN_C73:
+ return LINK_INBAND_ENABLE;
+
+ case DW_AN_C37_SGMII:
+ case DW_AN_C37_1000BASEX:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case DW_10GBASER:
+ case DW_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ default:
+ return 0;
+ }
+}
+
+static void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
{
const struct dw_xpcs_compat *compat;
for (compat = xpcs->desc->compat; compat->supported; compat++)
__set_bit(compat->interface, interfaces);
}
-EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
{
@@ -684,7 +710,9 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
if (ret < 0)
return ret;
- mask = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ val = 0;
+ mask = DW_VR_MII_DIG_CTRL1_2G5_EN | DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+
if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
val = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
@@ -1004,6 +1032,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
}
static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
int lpa, bmsr;
@@ -1032,7 +1061,7 @@ static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
}
}
- phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
}
return 0;
@@ -1060,7 +1089,7 @@ static int xpcs_get_state_2500basex(struct dw_xpcs *xpcs,
return 0;
}
-static void xpcs_get_state(struct phylink_pcs *pcs,
+static void xpcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
@@ -1088,7 +1117,7 @@ static void xpcs_get_state(struct phylink_pcs *pcs,
"xpcs_get_state_c37_sgmii", ERR_PTR(ret));
break;
case DW_AN_C37_1000BASEX:
- ret = xpcs_get_state_c37_1000basex(xpcs, state);
+ ret = xpcs_get_state_c37_1000basex(xpcs, neg_mode, state);
if (ret)
dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n",
"xpcs_get_state_c37_1000basex", ERR_PTR(ret));
@@ -1306,6 +1335,7 @@ static const struct dw_xpcs_desc xpcs_desc_list[] = {
static const struct phylink_pcs_ops xpcs_phylink_ops = {
.pcs_validate = xpcs_validate,
+ .pcs_inband_caps = xpcs_inband_caps,
.pcs_pre_config = xpcs_pre_config,
.pcs_config = xpcs_config,
.pcs_get_state = xpcs_get_state,
@@ -1418,6 +1448,8 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev)
if (ret)
goto out_clear_clks;
+ xpcs_get_interfaces(xpcs, xpcs->pcs.supported_interfaces);
+
if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
xpcs->pcs.poll = false;
else
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index 69434fd13f96..68d0d9e92a22 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -206,8 +206,8 @@ static int pfcp_newlink(struct net *net, struct net_device *dev,
goto exit_del_pfcp_sock;
}
- pn = net_generic(dev_net(dev), pfcp_net_id);
- list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
+ pn = net_generic(net, pfcp_net_id);
+ list_add(&pfcp->list, &pn->pfcp_dev_list);
netdev_dbg(dev, "registered new PFCP interface\n");
@@ -224,7 +224,7 @@ static void pfcp_dellink(struct net_device *dev, struct list_head *head)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
- list_del_rcu(&pfcp->list);
+ list_del(&pfcp->list);
unregister_netdevice_queue(dev, head);
}
@@ -247,11 +247,16 @@ static int __net_init pfcp_net_init(struct net *net)
static void __net_exit pfcp_net_exit(struct net *net)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
- struct pfcp_dev *pfcp;
+ struct pfcp_dev *pfcp, *pfcp_next;
+ struct net_device *dev;
LIST_HEAD(list);
rtnl_lock();
- list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
+ for_each_netdev(net, dev)
+ if (dev->rtnl_link_ops == &pfcp_link_ops)
+ pfcp_dellink(dev, &list);
+
+ list_for_each_entry_safe(pfcp, pfcp_next, &pn->pfcp_dev_list, list)
pfcp_dellink(pfcp->dev, &list);
unregister_netdevice_many(&list);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 15828f4710a9..41c15a2c2037 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -287,8 +287,15 @@ config MICROCHIP_PHY
config MICROCHIP_T1_PHY
tristate "Microchip T1 PHYs"
+ select MICROCHIP_PHY_RDS_PTP if NETWORK_PHY_TIMESTAMPING
+ depends on PTP_1588_CLOCK_OPTIONAL
help
- Supports the LAN87XX PHYs.
+ Supports the LAN8XXX PHYs.
+
+config MICROCHIP_PHY_RDS_PTP
+ tristate
+ help
+ Currently supports LAN887X T1 PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
@@ -343,10 +350,7 @@ config QSEMI_PHY
help
Currently supports the qs6612
-config REALTEK_PHY
- tristate "Realtek PHYs"
- help
- Supports the Realtek 821x PHY.
+source "drivers/net/phy/realtek/Kconfig"
config RENESAS_PHY
tristate "Renesas PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e6145153e837..c8dac6e92278 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
+obj-$(CONFIG_MICROCHIP_PHY_RDS_PTP) += microchip_rds_ptp.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROCHIP_T1S_PHY) += microchip_t1s.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc/
@@ -94,7 +95,7 @@ obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-y += qcom/
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
-obj-$(CONFIG_REALTEK_PHY) += realtek.o
+obj-$(CONFIG_REALTEK_PHY) += realtek/
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
obj-$(CONFIG_ROCKCHIP_PHY) += rockchip.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index a2a862bae2ed..7fa713ca8d45 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -1038,7 +1038,7 @@ static struct phy_driver adin_driver[] = {
module_phy_driver(adin_driver);
-static struct mdio_device_id __maybe_unused adin_tbl[] = {
+static const struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300) },
{ }
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 85f910e2d4fb..6bb469429b9d 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -340,7 +340,7 @@ static struct phy_driver adin_driver[] = {
module_phy_driver(adin_driver);
-static struct mdio_device_id __maybe_unused adin_tbl[] = {
+static const struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1110) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN2111) },
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
index 8d076b9609fd..e9fd24cb7270 100644
--- a/drivers/net/phy/air_en8811h.c
+++ b/drivers/net/phy/air_en8811h.c
@@ -1075,7 +1075,7 @@ static struct phy_driver en8811h_driver[] = {
module_phy_driver(en8811h_driver);
-static struct mdio_device_id __maybe_unused en8811h_tbl[] = {
+static const struct mdio_device_id __maybe_unused en8811h_tbl[] = {
{ PHY_ID_MATCH_MODEL(EN8811H_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 930b15fa6ce9..75b5fe65500a 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -111,7 +111,7 @@ static struct phy_driver am79c_drivers[] = {
module_phy_driver(am79c_drivers);
-static struct mdio_device_id __maybe_unused amd_tbl[] = {
+static const struct mdio_device_id __maybe_unused amd_tbl[] = {
{ PHY_ID_AC101L, 0xfffffff0 },
{ PHY_ID_AM79C874, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index bb56a66d2a48..e42ace4e682a 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -1200,7 +1200,7 @@ static struct phy_driver aqr_driver[] = {
module_phy_driver(aqr_driver);
-static struct mdio_device_id __maybe_unused aqr_tbl[] = {
+static const struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index eb74a8cf8df1..694df1401aa2 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -121,7 +121,7 @@ static struct phy_driver asix_driver[] = {
module_phy_driver(asix_driver);
-static struct mdio_device_id __maybe_unused asix_tbl[] = {
+static const struct mdio_device_id __maybe_unused asix_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A) },
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C) },
{ PHY_ID_ASIX_AX88796B, 0xfffffff0 },
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index da8f7cb41b44..15cbef8202bc 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -278,7 +278,7 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
}
};
-static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
{ PHY_ID_BCM_CYGNUS, 0xfffffff0, },
{ PHY_ID_BCM_OMEGA, 0xfffffff0, },
{ }
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index 2eea3d09b1e6..7969345f6b35 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -883,7 +883,7 @@ static struct phy_driver bcm54140_drivers[] = {
};
module_phy_driver(bcm54140_drivers);
-static struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
{ PHY_ID_BCM54140, BCM54140_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 0eb33be824f1..b46a736a3130 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -93,7 +93,7 @@ static struct phy_driver bcm63xx_driver[] = {
module_phy_driver(bcm63xx_driver);
-static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
{ 0x00406000, 0xfffffc00 },
{ 0x002bdc00, 0xfffffc00 },
{ }
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 97638ba7ae85..00e8fa14aa77 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -929,7 +929,7 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_16NM_EPHY(PHY_ID_BCM7712, "Broadcom BCM7712"),
};
-static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM72113, 0xfffffff0 },
{ PHY_ID_BCM72116, 0xfffffff0, },
{ PHY_ID_BCM72165, 0xfffffff0, },
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
index 97da3aee4942..d7f7cc44c532 100644
--- a/drivers/net/phy/bcm84881.c
+++ b/drivers/net/phy/bcm84881.c
@@ -235,11 +235,21 @@ static int bcm84881_read_status(struct phy_device *phydev)
return genphy_c45_read_mdix(phydev);
}
+/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
+ * or 802.3z control word, so inband will not work.
+ */
+static unsigned int bcm84881_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE;
+}
+
static struct phy_driver bcm84881_drivers[] = {
{
.phy_id = 0xae025150,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM84881",
+ .inband_caps = bcm84881_inband_caps,
.config_init = bcm84881_config_init,
.probe = bcm84881_probe,
.get_features = bcm84881_get_features,
@@ -252,7 +262,7 @@ static struct phy_driver bcm84881_drivers[] = {
module_phy_driver(bcm84881_drivers);
/* FIXME: module auto-loading for Clause 45 PHYs seems non-functional */
-static struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
{ 0xae025150, 0xfffffff0 },
{ },
};
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index ddded162c44c..22edb7e4c1a1 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -1717,7 +1717,7 @@ static struct phy_driver broadcom_drivers[] = {
module_phy_driver(broadcom_drivers);
-static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+static const struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
{ PHY_ID_BCM5421, 0xfffffff0 },
{ PHY_ID_BCM54210E, 0xfffffff0 },
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index ef5f412e101f..d87cf8b94cf8 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -145,7 +145,7 @@ static struct phy_driver cis820x_driver[] = {
module_phy_driver(cis820x_driver);
-static struct mdio_device_id __maybe_unused cicada_tbl[] = {
+static const struct mdio_device_id __maybe_unused cicada_tbl[] = {
{ 0x000fc410, 0x000ffff0 },
{ 0x000fc440, 0x000fffc0 },
{ }
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 40514a94e6ff..3b65f37f1c57 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -87,7 +87,7 @@ static struct phy_driver cortina_driver[] = {
module_phy_driver(cortina_driver);
-static struct mdio_device_id __maybe_unused cortina_tbl[] = {
+static const struct mdio_device_id __maybe_unused cortina_tbl[] = {
{ PHY_ID_CS4340, 0xffffffff},
{},
};
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 4ac4bce1bf32..fa3692508f16 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -209,7 +209,7 @@ static struct phy_driver dm91xx_driver[] = {
module_phy_driver(dm91xx_driver);
-static struct mdio_device_id __maybe_unused davicom_tbl[] = {
+static const struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
{ 0x0181b8b0, 0x0ffffff0 },
{ 0x0181b8a0, 0x0ffffff0 },
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 075d2beea716..85e231451093 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1548,7 +1548,7 @@ MODULE_LICENSE("GPL");
module_init(dp83640_init);
module_exit(dp83640_exit);
-static struct mdio_device_id __maybe_unused dp83640_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83640_tbl[] = {
{ DP83640_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index cf8b6d0bfaa9..6599feca1967 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -22,8 +22,6 @@
#define DP83826C_PHY_ID 0x2000a130
#define DP83826NC_PHY_ID 0x2000a110
-#define DP83822_DEVADDR 0x1f
-
#define MII_DP83822_CTRL_2 0x0a
#define MII_DP83822_PHYSTS 0x10
#define MII_DP83822_PHYSCR 0x11
@@ -32,6 +30,10 @@
#define MII_DP83822_FCSCR 0x14
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
+#define MII_DP83822_MLEDCR 0x25
+#define MII_DP83822_LEDCFG1 0x460
+#define MII_DP83822_IOCTRL1 0x462
+#define MII_DP83822_IOCTRL2 0x463
#define MII_DP83822_GENCFG 0x465
#define MII_DP83822_SOR1 0x467
@@ -106,6 +108,50 @@
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
+/* MLEDCR bits */
+#define DP83822_MLEDCR_CFG GENMASK(6, 3)
+#define DP83822_MLEDCR_ROUTE GENMASK(1, 0)
+#define DP83822_MLEDCR_ROUTE_LED_0 DP83822_MLEDCR_ROUTE
+
+/* LEDCFG1 bits */
+#define DP83822_LEDCFG1_LED1_CTRL GENMASK(11, 8)
+#define DP83822_LEDCFG1_LED3_CTRL GENMASK(7, 4)
+
+/* IOCTRL1 bits */
+#define DP83822_IOCTRL1_GPIO3_CTRL GENMASK(10, 8)
+#define DP83822_IOCTRL1_GPIO3_CTRL_LED3 BIT(0)
+#define DP83822_IOCTRL1_GPIO1_CTRL GENMASK(2, 0)
+#define DP83822_IOCTRL1_GPIO1_CTRL_LED_1 BIT(0)
+
+/* IOCTRL2 bits */
+#define DP83822_IOCTRL2_GPIO2_CLK_SRC GENMASK(6, 4)
+#define DP83822_IOCTRL2_GPIO2_CTRL GENMASK(2, 0)
+#define DP83822_IOCTRL2_GPIO2_CTRL_CLK_REF GENMASK(1, 0)
+#define DP83822_IOCTRL2_GPIO2_CTRL_MLED BIT(0)
+
+#define DP83822_CLK_SRC_MAC_IF 0x0
+#define DP83822_CLK_SRC_XI 0x1
+#define DP83822_CLK_SRC_INT_REF 0x2
+#define DP83822_CLK_SRC_RMII_MASTER_MODE_REF 0x4
+#define DP83822_CLK_SRC_FREE_RUNNING 0x6
+#define DP83822_CLK_SRC_RECOVERED 0x7
+
+#define DP83822_LED_FN_LINK 0x0 /* Link established */
+#define DP83822_LED_FN_RX_TX 0x1 /* Receive or Transmit activity */
+#define DP83822_LED_FN_TX 0x2 /* Transmit activity */
+#define DP83822_LED_FN_RX 0x3 /* Receive activity */
+#define DP83822_LED_FN_COLLISION 0x4 /* Collision detected */
+#define DP83822_LED_FN_LINK_100_BTX 0x5 /* 100 BTX link established */
+#define DP83822_LED_FN_LINK_10_BT 0x6 /* 10BT link established */
+#define DP83822_LED_FN_FULL_DUPLEX 0x7 /* Full duplex */
+#define DP83822_LED_FN_LINK_RX_TX 0x8 /* Link established, blink for rx or tx activity */
+#define DP83822_LED_FN_ACTIVE_STRETCH 0x9 /* Active Stretch Signal */
+#define DP83822_LED_FN_MII_LINK 0xa /* MII LINK (100BT+FD) */
+#define DP83822_LED_FN_LPI_MODE 0xb /* LPI Mode (EEE) */
+#define DP83822_LED_FN_RX_TX_ERR 0xc /* TX/RX MII Error */
+#define DP83822_LED_FN_LINK_LOST 0xd /* Link Lost */
+#define DP83822_LED_FN_PRBS_ERR 0xe /* Blink for PRBS error */
+
/* SOR1 mode */
#define DP83822_STRAP_MODE1 0
#define DP83822_STRAP_MODE2 BIT(0)
@@ -134,6 +180,13 @@
ADVERTISED_FIBRE | \
ADVERTISED_Pause | ADVERTISED_Asym_Pause)
+#define DP83822_MAX_LED_PINS 4
+
+#define DP83822_LED_INDEX_LED_0 0
+#define DP83822_LED_INDEX_LED_1_GPIO1 1
+#define DP83822_LED_INDEX_COL_GPIO2 2
+#define DP83822_LED_INDEX_RX_D3_GPIO3 3
+
struct dp83822_private {
bool fx_signal_det_low;
int fx_enabled;
@@ -141,6 +194,9 @@ struct dp83822_private {
u8 cfg_dac_minus;
u8 cfg_dac_plus;
struct ethtool_wolinfo wol;
+ bool set_gpio2_clk_out;
+ u32 gpio2_clk_out;
+ bool led_pin_enable[DP83822_MAX_LED_PINS];
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -159,14 +215,14 @@ static int dp83822_config_wol(struct phy_device *phydev,
/* MAC addresses start with byte 5, but stored in mac[0].
* 822 PHYs store bytes 4|5, 2|3, 0|1
*/
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA1,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA1,
(mac[1] << 8) | mac[0]);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA2,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA2,
(mac[3] << 8) | mac[2]);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA3,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA3,
(mac[5] << 8) | mac[4]);
- value = phy_read_mmd(phydev, DP83822_DEVADDR,
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG);
if (wol->wolopts & WAKE_MAGIC)
value |= DP83822_WOL_MAGIC_EN;
@@ -174,13 +230,13 @@ static int dp83822_config_wol(struct phy_device *phydev,
value &= ~DP83822_WOL_MAGIC_EN;
if (wol->wolopts & WAKE_MAGICSECURE) {
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP1,
(wol->sopass[1] << 8) | wol->sopass[0]);
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP2,
(wol->sopass[3] << 8) | wol->sopass[2]);
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP3,
(wol->sopass[5] << 8) | wol->sopass[4]);
value |= DP83822_WOL_SECURE_ON;
@@ -194,10 +250,10 @@ static int dp83822_config_wol(struct phy_device *phydev,
value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
DP83822_WOL_CLR_INDICATION;
- return phy_write_mmd(phydev, DP83822_DEVADDR,
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG, value);
} else {
- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG,
DP83822_WOL_EN |
DP83822_WOL_MAGIC_EN |
@@ -226,23 +282,23 @@ static void dp83822_get_wol(struct phy_device *phydev,
wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE);
wol->wolopts = 0;
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
if (value & DP83822_WOL_MAGIC_EN)
wol->wolopts |= WAKE_MAGIC;
if (value & DP83822_WOL_SECURE_ON) {
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP1);
wol->sopass[0] = (sopass_val & 0xff);
wol->sopass[1] = (sopass_val >> 8);
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP2);
wol->sopass[2] = (sopass_val & 0xff);
wol->sopass[3] = (sopass_val >> 8);
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP3);
wol->sopass[4] = (sopass_val & 0xff);
wol->sopass[5] = (sopass_val >> 8);
@@ -405,6 +461,48 @@ static int dp83822_read_status(struct phy_device *phydev)
return 0;
}
+static int dp83822_config_init_leds(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int ret;
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_0]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_MLEDCR,
+ DP83822_MLEDCR_ROUTE,
+ FIELD_PREP(DP83822_MLEDCR_ROUTE,
+ DP83822_MLEDCR_ROUTE_LED_0));
+ if (ret)
+ return ret;
+ } else if (dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL2,
+ DP83822_IOCTRL2_GPIO2_CTRL,
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CTRL,
+ DP83822_IOCTRL2_GPIO2_CTRL_MLED));
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_1_GPIO1]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL1,
+ DP83822_IOCTRL1_GPIO1_CTRL,
+ FIELD_PREP(DP83822_IOCTRL1_GPIO1_CTRL,
+ DP83822_IOCTRL1_GPIO1_CTRL_LED_1));
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_RX_D3_GPIO3]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL1,
+ DP83822_IOCTRL1_GPIO3_CTRL,
+ FIELD_PREP(DP83822_IOCTRL1_GPIO3_CTRL,
+ DP83822_IOCTRL1_GPIO3_CTRL_LED3));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int dp83822_config_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
@@ -415,6 +513,19 @@ static int dp83822_config_init(struct phy_device *phydev)
int err = 0;
int bmcr;
+ if (dp83822->set_gpio2_clk_out)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL2,
+ DP83822_IOCTRL2_GPIO2_CTRL |
+ DP83822_IOCTRL2_GPIO2_CLK_SRC,
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CTRL,
+ DP83822_IOCTRL2_GPIO2_CTRL_CLK_REF) |
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CLK_SRC,
+ dp83822->gpio2_clk_out));
+
+ err = dp83822_config_init_leds(phydev);
+ if (err)
+ return err;
+
if (phy_interface_is_rgmii(phydev)) {
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
true);
@@ -430,18 +541,18 @@ static int dp83822_config_init(struct phy_device *phydev)
if (tx_int_delay <= 0)
rgmii_delay |= DP83822_TX_CLK_SHIFT;
- err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ err = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
if (err)
return err;
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
if (err)
return err;
} else {
- err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
if (err)
@@ -496,7 +607,7 @@ static int dp83822_config_init(struct phy_device *phydev)
return err;
if (dp83822->fx_signal_det_low) {
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_GENCFG,
DP83822_SIG_DET_LOW);
if (err)
@@ -514,10 +625,10 @@ static int dp8382x_config_rmii_mode(struct phy_device *phydev)
if (!device_property_read_string(dev, "ti,rmii-mode", &of_val)) {
if (strcmp(of_val, "master") == 0) {
- ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_SEL);
} else if (strcmp(of_val, "slave") == 0) {
- ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_SEL);
} else {
phydev_err(phydev, "Invalid value for ti,rmii-mode property (%s)\n",
@@ -539,7 +650,7 @@ static int dp83826_config_init(struct phy_device *phydev)
int ret;
if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
- ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_EN);
if (ret)
return ret;
@@ -548,7 +659,7 @@ static int dp83826_config_init(struct phy_device *phydev)
if (ret)
return ret;
} else {
- ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_EN);
if (ret)
return ret;
@@ -560,7 +671,7 @@ static int dp83826_config_init(struct phy_device *phydev)
FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_5_TO_4,
dp83822->cfg_dac_minus));
mask = DP83826_VOD_CFG1_MINUS_MDIX_MASK | DP83826_VOD_CFG1_MINUS_MDI_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG1, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG1, mask, val);
if (ret)
return ret;
@@ -568,7 +679,7 @@ static int dp83826_config_init(struct phy_device *phydev)
FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_3_TO_0,
dp83822->cfg_dac_minus));
mask = DP83826_VOD_CFG2_MINUS_MDIX_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val);
if (ret)
return ret;
}
@@ -577,7 +688,7 @@ static int dp83826_config_init(struct phy_device *phydev)
val = FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDIX_MASK, dp83822->cfg_dac_plus) |
FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDI_MASK, dp83822->cfg_dac_plus);
mask = DP83826_VOD_CFG2_PLUS_MDIX_MASK | DP83826_VOD_CFG2_PLUS_MDI_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val);
if (ret)
return ret;
}
@@ -609,10 +720,66 @@ static int dp83822_phy_reset(struct phy_device *phydev)
}
#ifdef CONFIG_OF_MDIO
+static int dp83822_of_init_leds(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device_node *leds;
+ u32 index;
+ int err;
+
+ if (!node)
+ return 0;
+
+ leds = of_get_child_by_name(node, "leds");
+ if (!leds)
+ return 0;
+
+ for_each_available_child_of_node_scoped(leds, led) {
+ err = of_property_read_u32(led, "reg", &index);
+ if (err) {
+ of_node_put(leds);
+ return err;
+ }
+
+ if (index <= DP83822_LED_INDEX_RX_D3_GPIO3) {
+ dp83822->led_pin_enable[index] = true;
+ } else {
+ of_node_put(leds);
+ return -EINVAL;
+ }
+ }
+
+ of_node_put(leds);
+ /* LED_0 and COL(GPIO2) use the MLED function. MLED can be routed to
+ * only one of these two pins at a time.
+ */
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_0] &&
+ dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2]) {
+ phydev_err(phydev, "LED_0 and COL(GPIO2) cannot be used as LED output at the same time\n");
+ return -EINVAL;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2] &&
+ dp83822->set_gpio2_clk_out) {
+ phydev_err(phydev, "COL(GPIO2) cannot be used as LED output, already used as clock output\n");
+ return -EINVAL;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_RX_D3_GPIO3] &&
+ phydev->interface != PHY_INTERFACE_MODE_RMII) {
+ phydev_err(phydev, "RX_D3 can only be used as LED output when in RMII mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dp83822_of_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
+ const char *of_val;
/* Signal detection for the PHY is only enabled if the FX_EN and the
* SD_EN pins are strapped. Signal detection can only enabled if FX_EN
@@ -625,7 +792,30 @@ static int dp83822_of_init(struct phy_device *phydev)
dp83822->fx_enabled = device_property_present(dev,
"ti,fiber-mode");
- return 0;
+ if (!device_property_read_string(dev, "ti,gpio2-clk-out", &of_val)) {
+ if (strcmp(of_val, "mac-if") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_MAC_IF;
+ } else if (strcmp(of_val, "xi") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_XI;
+ } else if (strcmp(of_val, "int-ref") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_INT_REF;
+ } else if (strcmp(of_val, "rmii-master-mode-ref") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_RMII_MASTER_MODE_REF;
+ } else if (strcmp(of_val, "free-running") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_FREE_RUNNING;
+ } else if (strcmp(of_val, "recovered") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_RECOVERED;
+ } else {
+ phydev_err(phydev,
+ "Invalid value for ti,gpio2-clk-out property (%s)\n",
+ of_val);
+ return -EINVAL;
+ }
+
+ dp83822->set_gpio2_clk_out = true;
+ }
+
+ return dp83822_of_init_leds(phydev);
}
static int dp83826_to_dac_minus_one_regval(int percent)
@@ -673,7 +863,7 @@ static int dp83822_read_straps(struct phy_device *phydev)
int fx_enabled, fx_sd_enable;
int val;
- val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_SOR1);
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_SOR1);
if (val < 0)
return val;
@@ -723,7 +913,9 @@ static int dp83822_probe(struct phy_device *phydev)
if (ret)
return ret;
- dp83822_of_init(phydev);
+ ret = dp83822_of_init(phydev);
+ if (ret)
+ return ret;
if (dp83822->fx_enabled)
phydev->port = PORT_FIBRE;
@@ -748,7 +940,7 @@ static int dp83822_suspend(struct phy_device *phydev)
{
int value;
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
if (!(value & DP83822_WOL_EN))
genphy_suspend(phydev);
@@ -762,14 +954,138 @@ static int dp83822_resume(struct phy_device *phydev)
genphy_resume(phydev);
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, value |
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG, value |
DP83822_WOL_CLR_INDICATION);
return 0;
}
+static int dp83822_led_mode(u8 index, unsigned long rules)
+{
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return DP83822_LED_FN_LINK;
+ case BIT(TRIGGER_NETDEV_LINK_10):
+ return DP83822_LED_FN_LINK_10_BT;
+ case BIT(TRIGGER_NETDEV_LINK_100):
+ return DP83822_LED_FN_LINK_100_BTX;
+ case BIT(TRIGGER_NETDEV_FULL_DUPLEX):
+ return DP83822_LED_FN_FULL_DUPLEX;
+ case BIT(TRIGGER_NETDEV_TX):
+ return DP83822_LED_FN_TX;
+ case BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_RX;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_RX_TX;
+ case BIT(TRIGGER_NETDEV_TX_ERR) | BIT(TRIGGER_NETDEV_RX_ERR):
+ return DP83822_LED_FN_RX_TX_ERR;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_LINK_RX_TX;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83822_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = dp83822_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ return 0;
+}
+
+static int dp83822_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = dp83822_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ if (index == DP83822_LED_INDEX_LED_0 || index == DP83822_LED_INDEX_COL_GPIO2)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_MLEDCR, DP83822_MLEDCR_CFG,
+ FIELD_PREP(DP83822_MLEDCR_CFG, mode));
+ else if (index == DP83822_LED_INDEX_LED_1_GPIO1)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_LEDCFG1,
+ DP83822_LEDCFG1_LED1_CTRL,
+ FIELD_PREP(DP83822_LEDCFG1_LED1_CTRL,
+ mode));
+ else
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_LEDCFG1,
+ DP83822_LEDCFG1_LED3_CTRL,
+ FIELD_PREP(DP83822_LEDCFG1_LED3_CTRL,
+ mode));
+}
+
+static int dp83822_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index == DP83822_LED_INDEX_LED_0 || index == DP83822_LED_INDEX_COL_GPIO2) {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_MLEDCR);
+ if (val < 0)
+ return val;
+
+ val = FIELD_GET(DP83822_MLEDCR_CFG, val);
+ } else {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LEDCFG1);
+ if (val < 0)
+ return val;
+
+ if (index == DP83822_LED_INDEX_LED_1_GPIO1)
+ val = FIELD_GET(DP83822_LEDCFG1_LED1_CTRL, val);
+ else
+ val = FIELD_GET(DP83822_LEDCFG1_LED3_CTRL, val);
+ }
+
+ switch (val) {
+ case DP83822_LED_FN_LINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ case DP83822_LED_FN_LINK_10_BT:
+ *rules = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case DP83822_LED_FN_LINK_100_BTX:
+ *rules = BIT(TRIGGER_NETDEV_LINK_100);
+ break;
+ case DP83822_LED_FN_FULL_DUPLEX:
+ *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+ break;
+ case DP83822_LED_FN_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case DP83822_LED_FN_RX:
+ *rules = BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83822_LED_FN_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83822_LED_FN_RX_TX_ERR:
+ *rules = BIT(TRIGGER_NETDEV_TX_ERR) | BIT(TRIGGER_NETDEV_RX_ERR);
+ break;
+ case DP83822_LED_FN_LINK_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
#define DP83822_PHY_DRIVER(_id, _name) \
{ \
PHY_ID_MATCH_MODEL(_id), \
@@ -785,6 +1101,9 @@ static int dp83822_resume(struct phy_device *phydev)
.handle_interrupt = dp83822_handle_interrupt, \
.suspend = dp83822_suspend, \
.resume = dp83822_resume, \
+ .led_hw_is_supported = dp83822_led_hw_is_supported, \
+ .led_hw_control_set = dp83822_led_hw_control_set, \
+ .led_hw_control_get = dp83822_led_hw_control_get, \
}
#define DP83825_PHY_DRIVER(_id, _name) \
@@ -830,7 +1149,7 @@ static struct phy_driver dp83822_driver[] = {
};
module_phy_driver(dp83822_driver);
-static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
{ DP83825I_PHY_ID, 0xfffffff0 },
{ DP83826C_PHY_ID, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 351411f0aa6f..d88b1999d596 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -123,7 +123,7 @@ static int dp83848_config_init(struct phy_device *phydev)
return 0;
}
-static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TI_DP83620_PHY_ID, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 4120385c5a79..c1451df430ac 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1210,7 +1210,7 @@ static struct phy_driver dp83867_driver[] = {
};
module_phy_driver(dp83867_driver);
-static struct mdio_device_id __maybe_unused dp83867_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83867_tbl[] = {
{ DP83867_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index b6b38caf9c0e..a62cd838a9ea 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -928,7 +928,7 @@ static struct phy_driver dp83869_driver[] = {
};
module_phy_driver(dp83869_driver);
-static struct mdio_device_id __maybe_unused dp83869_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83869_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83869_PHY_ID) },
{ PHY_ID_MATCH_MODEL(DP83561_PHY_ID) },
{ }
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 7ea32fb77190..e480c2a07450 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -403,7 +403,7 @@ static struct phy_driver dp83811_driver[] = {
};
module_phy_driver(dp83811_driver);
-static struct mdio_device_id __maybe_unused dp83811_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83811_tbl[] = {
{ DP83TC811_PHY_ID, 0xfffffff0 },
{ },
};
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index 92aa3a2b9744..a42af9c168ec 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -34,6 +34,29 @@
#define DP83TD510E_CTRL_HW_RESET BIT(15)
#define DP83TD510E_CTRL_SW_RESET BIT(14)
+/*
+ * DP83TD510E_PKT_STAT_x registers correspond to similarly named registers
+ * in the datasheet (PKT_STAT_1 through PKT_STAT_6). These registers store
+ * 32-bit or 16-bit counters for TX and RX statistics and must be read in
+ * sequence to ensure the counters are cleared correctly.
+ *
+ * - DP83TD510E_PKT_STAT_1: Contains TX packet count bits [15:0].
+ * - DP83TD510E_PKT_STAT_2: Contains TX packet count bits [31:16].
+ * - DP83TD510E_PKT_STAT_3: Contains TX error packet count.
+ * - DP83TD510E_PKT_STAT_4: Contains RX packet count bits [15:0].
+ * - DP83TD510E_PKT_STAT_5: Contains RX packet count bits [31:16].
+ * - DP83TD510E_PKT_STAT_6: Contains RX error packet count.
+ *
+ * Keeping the register names as defined in the datasheet helps maintain
+ * clarity and alignment with the documentation.
+ */
+#define DP83TD510E_PKT_STAT_1 0x12b
+#define DP83TD510E_PKT_STAT_2 0x12c
+#define DP83TD510E_PKT_STAT_3 0x12d
+#define DP83TD510E_PKT_STAT_4 0x12e
+#define DP83TD510E_PKT_STAT_5 0x12f
+#define DP83TD510E_PKT_STAT_6 0x130
+
#define DP83TD510E_AN_STAT_1 0x60c
#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
@@ -58,8 +81,16 @@ static const u16 dp83td510_mse_sqi_map[] = {
0x0000 /* 24dB =< SNR */
};
+struct dp83td510_stats {
+ u64 tx_pkt_cnt;
+ u64 tx_err_pkt_cnt;
+ u64 rx_pkt_cnt;
+ u64 rx_err_pkt_cnt;
+};
+
struct dp83td510_priv {
bool alcd_test_active;
+ struct dp83td510_stats stats;
};
/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY
@@ -177,6 +208,85 @@ struct dp83td510_priv {
#define DP83TD510E_ALCD_COMPLETE BIT(15)
#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0)
+/**
+ * dp83td510_update_stats - Update the PHY statistics for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * The function reads the PHY statistics registers and updates the statistics
+ * structure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_update_stats(struct phy_device *phydev)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+ u32 count;
+ int ret;
+
+ /* The DP83TD510E_PKT_STAT registers are divided into two groups:
+ * - Group 1 (TX stats): DP83TD510E_PKT_STAT_1 to DP83TD510E_PKT_STAT_3
+ * - Group 2 (RX stats): DP83TD510E_PKT_STAT_4 to DP83TD510E_PKT_STAT_6
+ *
+ * Registers in each group are cleared only after reading them in a
+ * plain sequence (e.g., 1, 2, 3 for Group 1 or 4, 5, 6 for Group 2).
+ * Any deviation from the sequence, such as reading 1, 2, 1, 2, 3, will
+ * prevent the group from being cleared. Additionally, the counters
+ * for a group are frozen as soon as the first register in that group
+ * is accessed.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_1);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_2);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.tx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_3);
+ if (ret < 0)
+ return ret;
+ /* tx_err_pkt_cnt */
+ priv->stats.tx_err_pkt_cnt += ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_4);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_5);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.rx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_6);
+ if (ret < 0)
+ return ret;
+ /* rx_err_pkt_cnt */
+ priv->stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static void dp83td510_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+
+ stats->tx_packets = priv->stats.tx_pkt_cnt;
+ stats->tx_errors = priv->stats.tx_err_pkt_cnt;
+ stats->rx_packets = priv->stats.rx_pkt_cnt;
+ stats->rx_errors = priv->stats.rx_err_pkt_cnt;
+}
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -599,13 +709,15 @@ static struct phy_driver dp83td510_driver[] = {
.get_sqi_max = dp83td510_get_sqi_max,
.cable_test_start = dp83td510_cable_test_start,
.cable_test_get_status = dp83td510_cable_test_get_status,
+ .get_phy_stats = dp83td510_get_phy_stats,
+ .update_stats = dp83td510_update_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
module_phy_driver(dp83td510_driver);
-static struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 0ef4d7dba065..050f4537d140 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -51,6 +51,9 @@
/* Register 0x0405: Unknown Register */
#define DP83TG720S_UNKNOWN_0405 0x405
+#define DP83TG720S_LINK_QUAL_3 0x547
+#define DP83TG720S_LINK_LOSS_CNT_MASK GENMASK(15, 10)
+
/* Register 0x0576: TDR Master Link Down Control */
#define DP83TG720S_TDR_MASTER_LINK_DOWN 0x576
@@ -60,6 +63,29 @@
/* In RGMII mode, Enable or disable the internal delay for TXD */
#define DP83TG720S_RGMII_TX_CLK_SEL BIT(0)
+/*
+ * DP83TG720S_PKT_STAT_x registers correspond to similarly named registers
+ * in the datasheet (PKT_STAT_1 through PKT_STAT_6). These registers store
+ * 32-bit or 16-bit counters for TX and RX statistics and must be read in
+ * sequence to ensure the counters are cleared correctly.
+ *
+ * - DP83TG720S_PKT_STAT_1: Contains TX packet count bits [15:0].
+ * - DP83TG720S_PKT_STAT_2: Contains TX packet count bits [31:16].
+ * - DP83TG720S_PKT_STAT_3: Contains TX error packet count.
+ * - DP83TG720S_PKT_STAT_4: Contains RX packet count bits [15:0].
+ * - DP83TG720S_PKT_STAT_5: Contains RX packet count bits [31:16].
+ * - DP83TG720S_PKT_STAT_6: Contains RX error packet count.
+ *
+ * Keeping the register names as defined in the datasheet helps maintain
+ * clarity and alignment with the documentation.
+ */
+#define DP83TG720S_PKT_STAT_1 0x639
+#define DP83TG720S_PKT_STAT_2 0x63a
+#define DP83TG720S_PKT_STAT_3 0x63b
+#define DP83TG720S_PKT_STAT_4 0x63c
+#define DP83TG720S_PKT_STAT_5 0x63d
+#define DP83TG720S_PKT_STAT_6 0x63e
+
/* Register 0x083F: Unknown Register */
#define DP83TG720S_UNKNOWN_083F 0x83f
@@ -69,6 +95,113 @@
#define DP83TG720_SQI_MAX 7
+struct dp83tg720_stats {
+ u64 link_loss_cnt;
+ u64 tx_pkt_cnt;
+ u64 tx_err_pkt_cnt;
+ u64 rx_pkt_cnt;
+ u64 rx_err_pkt_cnt;
+};
+
+struct dp83tg720_priv {
+ struct dp83tg720_stats stats;
+};
+
+/**
+ * dp83tg720_update_stats - Update the PHY statistics for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * The function reads the PHY statistics registers and updates the statistics
+ * structure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83tg720_update_stats(struct phy_device *phydev)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+ u32 count;
+ int ret;
+
+ /* Read the link loss count */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LINK_QUAL_3);
+ if (ret < 0)
+ return ret;
+ /* link_loss_cnt */
+ count = FIELD_GET(DP83TG720S_LINK_LOSS_CNT_MASK, ret);
+ priv->stats.link_loss_cnt += count;
+
+ /* The DP83TG720S_PKT_STAT registers are divided into two groups:
+ * - Group 1 (TX stats): DP83TG720S_PKT_STAT_1 to DP83TG720S_PKT_STAT_3
+ * - Group 2 (RX stats): DP83TG720S_PKT_STAT_4 to DP83TG720S_PKT_STAT_6
+ *
+ * Registers in each group are cleared only after reading them in a
+ * plain sequence (e.g., 1, 2, 3 for Group 1 or 4, 5, 6 for Group 2).
+ * Any deviation from the sequence, such as reading 1, 2, 1, 2, 3, will
+ * prevent the group from being cleared. Additionally, the counters
+ * for a group are frozen as soon as the first register in that group
+ * is accessed.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_1);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_2);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.tx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_3);
+ if (ret < 0)
+ return ret;
+ /* tx_err_pkt_cnt */
+ priv->stats.tx_err_pkt_cnt += ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_4);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_5);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.rx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_6);
+ if (ret < 0)
+ return ret;
+ /* rx_err_pkt_cnt */
+ priv->stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static void dp83tg720_get_link_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+
+ link_stats->link_down_events = priv->stats.link_loss_cnt;
+}
+
+static void dp83tg720_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+
+ stats->tx_packets = priv->stats.tx_pkt_cnt;
+ stats->tx_errors = priv->stats.tx_err_pkt_cnt;
+ stats->rx_packets = priv->stats.rx_pkt_cnt;
+ stats->rx_errors = priv->stats.rx_err_pkt_cnt;
+}
+
/**
* dp83tg720_cable_test_start - Start the cable test for the DP83TG720 PHY.
* @phydev: Pointer to the phy_device structure.
@@ -182,6 +315,11 @@ static int dp83tg720_cable_test_get_status(struct phy_device *phydev,
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat);
+ /* save the current stats before resetting the PHY */
+ ret = dp83tg720_update_stats(phydev);
+ if (ret)
+ return ret;
+
return phy_init_hw(phydev);
}
@@ -217,6 +355,11 @@ static int dp83tg720_read_status(struct phy_device *phydev)
phy_sts = phy_read(phydev, DP83TG720S_MII_REG_10);
phydev->link = !!(phy_sts & DP83TG720S_LINK_STATUS);
if (!phydev->link) {
+ /* save the current stats before resetting the PHY */
+ ret = dp83tg720_update_stats(phydev);
+ if (ret)
+ return ret;
+
/* According to the "DP83TC81x, DP83TG72x Software
* Implementation Guide", the PHY needs to be reset after a
* link loss or if no link is created after at least 100ms.
@@ -341,12 +484,27 @@ static int dp83tg720_config_init(struct phy_device *phydev)
return genphy_c45_pma_baset1_read_master_slave(phydev);
}
+static int dp83tg720_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct dp83tg720_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver dp83tg720_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
.name = "TI DP83TG720S",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = dp83tg720_probe,
.config_aneg = dp83tg720_config_aneg,
.read_status = dp83tg720_read_status,
.get_features = genphy_c45_pma_read_ext_abilities,
@@ -355,13 +513,16 @@ static struct phy_driver dp83tg720_driver[] = {
.get_sqi_max = dp83tg720_get_sqi_max,
.cable_test_start = dp83tg720_cable_test_start,
.cable_test_get_status = dp83tg720_cable_test_get_status,
+ .get_link_stats = dp83tg720_get_link_stats,
+ .get_phy_stats = dp83tg720_get_phy_stats,
+ .update_stats = dp83tg720_update_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
module_phy_driver(dp83tg720_driver);
-static struct mdio_device_id __maybe_unused dp83tg720_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83tg720_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index be1b71d7cab7..6cd8d77586fd 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -94,7 +94,7 @@ static struct phy_driver et1011c_driver[] = { {
module_phy_driver(et1011c_driver);
-static struct mdio_device_id __maybe_unused et1011c_tbl[] = {
+static const struct mdio_device_id __maybe_unused et1011c_tbl[] = {
{ 0x0282f014, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index ee438b71a0b4..bbcc7d2b54cd 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -623,7 +623,7 @@ static struct phy_driver icplus_driver[] = {
module_phy_driver(icplus_driver);
-static struct mdio_device_id __maybe_unused icplus_tbl[] = {
+static const struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ PHY_ID_MATCH_MODEL(IP175C_PHY_ID) },
{ PHY_ID_MATCH_MODEL(IP1001_PHY_ID) },
{ PHY_ID_MATCH_EXACT(IP101A_PHY_ID) },
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index e6ed2413e514..a44771e8acdc 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -691,7 +691,7 @@ static struct phy_driver xway_gphy[] = {
};
module_phy_driver(xway_gphy);
-static struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
+static const struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
{ PHY_ID_PHY11G_1_3, 0xffffffff },
{ PHY_ID_PHY22F_1_3, 0xffffffff },
{ PHY_ID_PHY11G_1_4, 0xffffffff },
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index e3bf827b7959..5251a61c8b0f 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -348,7 +348,7 @@ static struct phy_driver lxt97x_driver[] = {
module_phy_driver(lxt97x_driver);
-static struct mdio_device_id __maybe_unused lxt_tbl[] = {
+static const struct mdio_device_id __maybe_unused lxt_tbl[] = {
{ 0x78100000, 0xfffffff0 },
{ 0x001378e0, 0xfffffff0 },
{ 0x00137a10, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 5107f58338af..4494b3e39ce2 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -939,7 +939,7 @@ static struct phy_driver mv88q2xxx_driver[] = {
module_phy_driver(mv88q2xxx_driver);
-static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88Q2220, MARVELL_PHY_ID_MASK },
{ /*sentinel*/ }
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index 0b777cdd7078..fad2f54c1eac 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -613,7 +613,7 @@ static struct phy_driver mv2222_drivers[] = {
};
module_phy_driver(mv2222_drivers);
-static struct mdio_device_id __maybe_unused mv2222_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv2222_tbl[] = {
{ MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index cd50cd6a7f75..44e1927de499 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -717,6 +717,48 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
return genphy_check_and_restart_aneg(phydev, changed);
}
+static unsigned int m88e1111_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ /* In 1000base-X and SGMII modes, the inband mode can be changed
+ * through the Fibre page BMCR ANENABLE bit.
+ */
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII)
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE |
+ LINK_INBAND_BYPASS;
+
+ return 0;
+}
+
+static int m88e1111_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ u16 extsr, bmcr;
+ int err;
+
+ if (phydev->interface != PHY_INTERFACE_MODE_1000BASEX &&
+ phydev->interface != PHY_INTERFACE_MODE_SGMII)
+ return -EINVAL;
+
+ if (modes == LINK_INBAND_BYPASS)
+ extsr = MII_M1111_HWCFG_SERIAL_AN_BYPASS;
+ else
+ extsr = 0;
+
+ if (modes == LINK_INBAND_DISABLE)
+ bmcr = 0;
+ else
+ bmcr = BMCR_ANENABLE;
+
+ err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+ MII_M1111_HWCFG_SERIAL_AN_BYPASS, extsr);
+ if (err < 0)
+ return extsr;
+
+ return phy_modify_paged(phydev, MII_MARVELL_FIBER_PAGE, MII_BMCR,
+ BMCR_ANENABLE, bmcr);
+}
+
static int m88e1111_config_aneg(struct phy_device *phydev)
{
int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
@@ -1508,7 +1550,6 @@ static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
{
- struct ethtool_keee eee;
int val, ret;
if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
@@ -1518,8 +1559,7 @@ static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
/* According to the Marvell data sheet EEE must be disabled for
* Fast Link Down detection to work properly
*/
- ret = genphy_c45_ethtool_get_eee(phydev, &eee);
- if (!ret && eee.eee_enabled) {
+ if (phydev->eee_cfg.eee_enabled) {
phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n");
return -EBUSY;
}
@@ -3677,6 +3717,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1112",
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1112_config_init,
.config_aneg = marvell_config_aneg,
.config_intr = marvell_config_intr,
@@ -3698,6 +3740,8 @@ static struct phy_driver marvell_drivers[] = {
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
@@ -3721,6 +3765,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1111 (Finisar)",
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
@@ -4143,7 +4189,7 @@ static struct phy_driver marvell_drivers[] = {
module_phy_driver(marvell_drivers);
-static struct mdio_device_id __maybe_unused marvell_tbl[] = {
+static const struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3082, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 6642eb642d4b..623bdb8466b8 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -1484,7 +1484,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
-static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv3310_tbl[] = {
{ MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index 38dc898eaf7b..bdf99b327029 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -1356,7 +1356,7 @@ static struct phy_driver mtk_socphy_driver[] = {
module_phy_driver(mtk_socphy_driver);
-static struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
+static const struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981) },
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7988) },
{ }
diff --git a/drivers/net/phy/mediatek/mtk-ge.c b/drivers/net/phy/mediatek/mtk-ge.c
index ed2617bc20f4..b517ca8573e7 100644
--- a/drivers/net/phy/mediatek/mtk-ge.c
+++ b/drivers/net/phy/mediatek/mtk-ge.c
@@ -93,7 +93,7 @@ static struct phy_driver mtk_gephy_driver[] = {
module_phy_driver(mtk_gephy_driver);
-static struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = {
+static const struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = {
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7530) },
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7531) },
{ }
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index bb9b33b6bce2..962ebbbc1348 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -221,7 +221,7 @@ static struct phy_driver meson_gxl_phy[] = {
},
};
-static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
+static const struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
{ PHY_ID_MATCH_VENDOR(0x01814400) },
{ PHY_ID_MATCH_VENDOR(0x01803301) },
{ }
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index eeb33eb181ac..9c0b1c229af6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2006,7 +2006,7 @@ static int ksz9477_config_init(struct phy_device *phydev)
* in this switch shall be regarded as broken.
*/
if (phydev->dev_flags & MICREL_NO_EEE)
- linkmode_fill(phydev->eee_broken_modes);
+ phy_disable_eee(phydev);
return kszphy_config_init(phydev);
}
@@ -5689,7 +5689,7 @@ MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused micrel_tbl[] = {
+static const struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
{ PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ9131, MICREL_PHY_ID_MASK },
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 691969a4910f..0e17cc458efd 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -548,7 +548,7 @@ static struct phy_driver microchip_phy_driver[] = {
module_phy_driver(microchip_phy_driver);
-static struct mdio_device_id __maybe_unused microchip_tbl[] = {
+static const struct mdio_device_id __maybe_unused microchip_tbl[] = {
{ 0x0007c132, 0xfffffff2 },
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX) },
{ }
diff --git a/drivers/net/phy/microchip_rds_ptp.c b/drivers/net/phy/microchip_rds_ptp.c
new file mode 100644
index 000000000000..3e6bf10cdeed
--- /dev/null
+++ b/drivers/net/phy/microchip_rds_ptp.c
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024 Microchip Technology
+
+#include "microchip_rds_ptp.h"
+
+static int mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_read_mmd(phydev, PTP_MMD(clock), addr);
+}
+
+static int mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_write_mmd(phydev, PTP_MMD(clock), addr, val);
+}
+
+static int mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 mask, u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_modify_mmd(phydev, PTP_MMD(clock), addr, mask, val);
+}
+
+static int mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_set_bits_mmd(phydev, PTP_MMD(clock), addr, val);
+}
+
+static int mchp_get_pulsewidth(struct phy_device *phydev,
+ struct ptp_perout_request *perout_request,
+ int *pulse_width)
+{
+ struct timespec64 ts_period;
+ s64 ts_on_nsec, period_nsec;
+ struct timespec64 ts_on;
+ static const s64 sup_on_necs[] = {
+ 100, /* 100ns */
+ 500, /* 500ns */
+ 1000, /* 1us */
+ 5000, /* 5us */
+ 10000, /* 10us */
+ 50000, /* 50us */
+ 100000, /* 100us */
+ 500000, /* 500us */
+ 1000000, /* 1ms */
+ 5000000, /* 5ms */
+ 10000000, /* 10ms */
+ 50000000, /* 50ms */
+ 100000000, /* 100ms */
+ 200000000, /* 200ms */
+ };
+
+ ts_period.tv_sec = perout_request->period.sec;
+ ts_period.tv_nsec = perout_request->period.nsec;
+
+ ts_on.tv_sec = perout_request->on.sec;
+ ts_on.tv_nsec = perout_request->on.nsec;
+ ts_on_nsec = timespec64_to_ns(&ts_on);
+ period_nsec = timespec64_to_ns(&ts_period);
+
+ if (period_nsec < 200) {
+ phydev_warn(phydev, "perout period small, minimum is 200ns\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(sup_on_necs); i++) {
+ if (ts_on_nsec <= sup_on_necs[i]) {
+ *pulse_width = i;
+ break;
+ }
+ }
+
+ phydev_info(phydev, "pulse width is %d\n", *pulse_width);
+ return 0;
+}
+
+static int mchp_general_event_config(struct mchp_rds_ptp_clock *clock,
+ int pulse_width)
+{
+ int general_config;
+
+ general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK);
+ if (general_config < 0)
+ return general_config;
+
+ general_config &= ~MCHP_RDS_PTP_GEN_CFG_LTC_EVT_MASK;
+ general_config |= MCHP_RDS_PTP_GEN_CFG_LTC_EVT_SET(pulse_width);
+ general_config &= ~MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD;
+ general_config |= MCHP_RDS_PTP_GEN_CFG_POLARITY;
+
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK, general_config);
+}
+
+static int mchp_set_clock_reload(struct mchp_rds_ptp_clock *clock,
+ s64 period_sec, u32 period_nsec)
+{
+ int rc;
+
+ rc = mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(period_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(period_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(period_nsec));
+ if (rc < 0)
+ return rc;
+
+ return mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(period_nsec) & 0x3fff);
+}
+
+static int mchp_set_clock_target(struct mchp_rds_ptp_clock *clock,
+ s64 start_sec, u32 start_nsec)
+{
+ int rc;
+
+ /* Set the start time */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(start_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(start_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(start_nsec));
+ if (rc < 0)
+ return rc;
+
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(start_nsec) & 0x3fff);
+}
+
+static int mchp_rds_ptp_perout_off(struct mchp_rds_ptp_clock *clock)
+{
+ u16 general_config;
+ int rc;
+
+ /* Set target to too far in the future, effectively disabling it */
+ rc = mchp_set_clock_target(clock, 0xFFFFFFFF, 0);
+ if (rc < 0)
+ return rc;
+
+ general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK);
+ general_config |= MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD;
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK, general_config);
+ if (rc < 0)
+ return rc;
+
+ clock->mchp_rds_ptp_event = -1;
+
+ return 0;
+}
+
+static bool mchp_get_event(struct mchp_rds_ptp_clock *clock, int pin)
+{
+ if (clock->mchp_rds_ptp_event < 0 && pin == clock->event_pin) {
+ clock->mchp_rds_ptp_event = pin;
+ return true;
+ }
+
+ return false;
+}
+
+static int mchp_rds_ptp_perout(struct ptp_clock_info *ptpci,
+ struct ptp_perout_request *perout, int on)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(ptpci,
+ struct mchp_rds_ptp_clock,
+ caps);
+ struct phy_device *phydev = clock->phydev;
+ int ret, event_pin, pulsewidth;
+
+ /* Reject requests with unsupported flags */
+ if (perout->flags & ~PTP_PEROUT_DUTY_CYCLE)
+ return -EOPNOTSUPP;
+
+ event_pin = ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT,
+ perout->index);
+ if (event_pin != clock->event_pin)
+ return -EINVAL;
+
+ if (!on) {
+ ret = mchp_rds_ptp_perout_off(clock);
+ return ret;
+ }
+
+ if (!mchp_get_event(clock, event_pin))
+ return -EINVAL;
+
+ ret = mchp_get_pulsewidth(phydev, perout, &pulsewidth);
+ if (ret < 0)
+ return ret;
+
+ /* Configure to pulse every period */
+ ret = mchp_general_event_config(clock, pulsewidth);
+ if (ret < 0)
+ return ret;
+
+ ret = mchp_set_clock_target(clock, perout->start.sec,
+ perout->start.nsec);
+ if (ret < 0)
+ return ret;
+
+ return mchp_set_clock_reload(clock, perout->period.sec,
+ perout->period.nsec);
+}
+
+static int mchp_rds_ptpci_enable(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *request, int on)
+{
+ switch (request->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return mchp_rds_ptp_perout(ptpci, &request->perout, on);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mchp_rds_ptpci_verify(struct ptp_clock_info *ptpci, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(ptpci,
+ struct mchp_rds_ptp_clock,
+ caps);
+
+ if (!(pin == clock->event_pin && chan == 0))
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock,
+ enum mchp_rds_ptp_fifo_dir dir)
+{
+ int rc;
+
+ if (dir == MCHP_RDS_PTP_EGRESS_FIFO)
+ skb_queue_purge(&clock->tx_queue);
+ else
+ skb_queue_purge(&clock->rx_queue);
+
+ for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) {
+ rc = mchp_rds_phy_read_mmd(clock,
+ dir == MCHP_RDS_PTP_EGRESS_FIFO ?
+ MCHP_RDS_PTP_TX_MSG_HDR2 :
+ MCHP_RDS_PTP_RX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return rc;
+ }
+ return mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
+ MCHP_RDS_PTP_PORT);
+}
+
+static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock,
+ bool enable)
+{
+ /* Enable or disable ptp interrupts */
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_INT_EN,
+ MCHP_RDS_PTP_PORT,
+ enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0);
+}
+
+static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ switch (clock->hwts_tx_type) {
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (ptp_msg_is_sync(skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&clock->tx_queue, skb);
+ break;
+ case HWTSTAMP_TX_OFF:
+ default:
+ kfree_skb(skb);
+ break;
+ }
+}
+
+static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ int type;
+
+ skb_push(skb, ETH_HLEN);
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return false;
+
+ ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
+ skb_pull_inline(skb, ETH_HLEN);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+
+ return true;
+}
+
+static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock,
+ struct mchp_rds_ptp_rx_ts *rx_ts)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool rc = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&clock->rx_queue.lock, flags);
+ skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) {
+ if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
+ continue;
+
+ if (skb_sig != rx_ts->seq_id)
+ continue;
+
+ __skb_unlink(skb, &clock->rx_queue);
+
+ rc = true;
+ break;
+ }
+ spin_unlock_irqrestore(&clock->rx_queue.lock, flags);
+
+ if (rc) {
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+ }
+
+ return rc;
+}
+
+static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock,
+ struct mchp_rds_ptp_rx_ts *rx_ts)
+{
+ unsigned long flags;
+
+ /* If we failed to match the skb add it to the queue for when
+ * the frame will come
+ */
+ if (!mchp_rds_ptp_match_skb(clock, rx_ts)) {
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_add(&rx_ts->list, &clock->rx_ts_list);
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+ } else {
+ kfree(rx_ts);
+ }
+}
+
+static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock *clock,
+ struct sk_buff *skb)
+{
+ struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ unsigned long flags;
+ u16 skb_sig;
+
+ if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
+ return;
+
+ /* Iterate over all RX timestamps and match it with the received skbs */
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
+ /* Check if we found the signature we were looking for. */
+ if (skb_sig != rx_ts->seq_id)
+ continue;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+
+ rx_ts_var = rx_ts;
+
+ break;
+ }
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+
+ if (rx_ts_var) {
+ list_del(&rx_ts_var->list);
+ kfree(rx_ts_var);
+ } else {
+ skb_queue_tail(&clock->rx_queue, skb);
+ }
+}
+
+static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ if (clock->rx_filter == HWTSTAMP_FILTER_NONE ||
+ type == PTP_CLASS_NONE)
+ return false;
+
+ if ((type & clock->version) == 0 || (type & clock->layer) == 0)
+ return false;
+
+ /* Here if match occurs skb is sent to application, If not skb is added
+ * to queue and sending skb to application will get handled when
+ * interrupt occurs i.e., it get handles in interrupt handler. By
+ * any means skb will reach the application so we should not return
+ * false here if skb doesn't matches.
+ */
+ mchp_rds_ptp_match_rx_skb(clock, skb);
+
+ return true;
+}
+
+static int mchp_rds_ptp_hwtstamp(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct mchp_rds_ptp_clock *clock =
+ container_of(mii_ts, struct mchp_rds_ptp_clock,
+ mii_ts);
+ struct mchp_rds_ptp_rx_ts *rx_ts, *tmp;
+ int txcfg = 0, rxcfg = 0;
+ unsigned long flags;
+ int rc;
+
+ clock->hwts_tx_type = config->tx_type;
+ clock->rx_filter = config->rx_filter;
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ clock->layer = 0;
+ clock->version = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ clock->layer = PTP_CLASS_L4;
+ clock->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ clock->layer = PTP_CLASS_L2;
+ clock->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
+ clock->version = PTP_CLASS_V2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Setup parsing of the frames and enable the timestamping for ptp
+ * frames
+ */
+ if (clock->layer & PTP_CLASS_L2) {
+ rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
+ txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
+ }
+ if (clock->layer & PTP_CLASS_L4) {
+ rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
+ MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
+ txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
+ MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
+ }
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, rxcfg);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, txcfg);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_TIMESTAMP_EN,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_TIMESTAMP_EN,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
+ if (rc < 0)
+ return rc;
+
+ if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ /* Enable / disable of the TX timestamp in the SYNC frames */
+ rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
+ else
+ rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
+ (u16)~MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
+
+ if (rc < 0)
+ return rc;
+
+ /* In case of multiple starts and stops, these needs to be cleared */
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+ }
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+
+ rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO);
+ if (rc < 0)
+ return rc;
+
+ /* Now enable the timestamping interrupts */
+ rc = mchp_rds_ptp_config_intr(clock,
+ config->rx_filter != HWTSTAMP_FILTER_NONE);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ info->phc_index = ptp_clock_index(clock->ptp_clock);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static int mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ struct timespec64 ts;
+ bool add = true;
+ int rc = 0;
+ u32 nsec;
+ s32 sec;
+
+ /* The HW allows up to 15 sec to adjust the time, but here we limit to
+ * 10 sec the adjustment. The reason is, in case the adjustment is 14
+ * sec and 999999999 nsec, then we add 8ns to compensate the actual
+ * increment so the value can be bigger than 15 sec. Therefore limit the
+ * possible adjustments so we will not have these corner cases
+ */
+ if (delta > 10000000000LL || delta < -10000000000LL) {
+ /* The timeadjustment is too big, so fall back using set time */
+ u64 now;
+
+ info->gettime64(info, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ info->settime64(info, &ts);
+ return 0;
+ }
+ sec = div_u64_rem(abs(delta), NSEC_PER_SEC, &nsec);
+ if (delta < 0 && nsec != 0) {
+ /* It is not allowed to adjust low the nsec part, therefore
+ * subtract more from second part and add to nanosecond such
+ * that would roll over, so the second part will increase
+ */
+ sec--;
+ nsec = NSEC_PER_SEC - nsec;
+ }
+
+ /* Calculate the adjustments and the direction */
+ if (delta < 0)
+ add = false;
+
+ if (nsec > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nsec += 8;
+
+ if (nsec >= NSEC_PER_SEC) {
+ /* carry into seconds */
+ sec++;
+ nsec -= NSEC_PER_SEC;
+ }
+ }
+
+ mutex_lock(&clock->ptp_lock);
+ if (sec) {
+ sec = abs(sec);
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK, sec);
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK,
+ ((add ?
+ MCHP_RDS_PTP_STEP_ADJ_HI_DIR :
+ 0) | ((sec >> 16) &
+ GENMASK(13, 0))));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC);
+ if (rc < 0)
+ goto out_unlock;
+ }
+
+ if (nsec) {
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK,
+ nsec & GENMASK(15, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK,
+ (nsec >> 16) & GENMASK(13, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC);
+ }
+
+ mutex_unlock(&clock->ptp_lock);
+ info->gettime64(info, &ts);
+ mutex_lock(&clock->ptp_lock);
+
+ /* Target update is required for pulse generation on events that
+ * are enabled
+ */
+ if (clock->mchp_rds_ptp_event >= 0)
+ mchp_set_clock_target(clock,
+ ts.tv_sec + MCHP_RDS_PTP_BUFFER_TIME, 0);
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info *info,
+ long scaled_ppm)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ u16 rate_lo, rate_hi;
+ bool faster = true;
+ u32 rate;
+ int rc;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ faster = false;
+ }
+
+ rate = MCHP_RDS_PTP_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (MCHP_RDS_PTP_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+
+ rate_lo = rate & GENMASK(15, 0);
+ rate_hi = (rate >> 16) & GENMASK(13, 0);
+
+ if (faster)
+ rate_hi |= MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR;
+
+ mutex_lock(&clock->ptp_lock);
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK, rate_hi);
+ if (rc < 0)
+ goto error;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK, rate_lo);
+ if (rc > 0)
+ rc = 0;
+error:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info *info,
+ struct timespec64 *ts)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ time64_t secs;
+ int rc = 0;
+ s64 nsecs;
+
+ mutex_lock(&clock->ptp_lock);
+ /* Set read bit to 1 to save current values of 1588 local time counter
+ * into PTP LTC seconds and nanoseconds registers.
+ */
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_CLOCK_READ);
+ if (rc < 0)
+ goto out_unlock;
+
+ /* Get LTC clock values */
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_HI,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_MID,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs |= rc;
+ secs <<= 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_LO,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_HI,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ nsecs = (rc & GENMASK(13, 0));
+ nsecs <<= 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_LO,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ nsecs |= rc;
+
+ set_normalized_timespec64(ts, secs, nsecs);
+
+ if (rc > 0)
+ rc = 0;
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ int rc;
+
+ mutex_lock(&clock->ptp_lock);
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(ts->tv_sec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_MID,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(ts->tv_sec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_32_bits(ts->tv_sec) & GENMASK(15, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(ts->tv_nsec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(ts->tv_nsec) & GENMASK(13, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ /* Set load bit to 1 to write PTP LTC seconds and nanoseconds
+ * registers to 1588 local time counter.
+ */
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD);
+ if (rc > 0)
+ rc = 0;
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static bool mchp_rds_ptp_get_sig_tx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ int type;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return false;
+
+ ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+
+ return true;
+}
+
+static void mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock *clock,
+ u32 seconds, u32 nsec, u16 seq_id)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool rc = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&clock->tx_queue.lock, flags);
+ skb_queue_walk_safe(&clock->tx_queue, skb, skb_tmp) {
+ if (!mchp_rds_ptp_get_sig_tx(skb, &skb_sig))
+ continue;
+
+ if (skb_sig != seq_id)
+ continue;
+
+ __skb_unlink(skb, &clock->tx_queue);
+ rc = true;
+ break;
+ }
+ spin_unlock_irqrestore(&clock->tx_queue.lock, flags);
+
+ if (rc) {
+ shhwtstamps.hwtstamp = ktime_set(seconds, nsec);
+ skb_complete_tx_timestamp(skb, &shhwtstamps);
+ }
+}
+
+static struct mchp_rds_ptp_rx_ts
+ *mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ struct phy_device *phydev = clock->phydev;
+ struct mchp_rds_ptp_rx_ts *rx_ts = NULL;
+ u32 sec, nsec;
+ int rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ if (!(rc & MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID)) {
+ phydev_err(phydev, "RX Timestamp is not valid!\n");
+ goto error;
+ }
+ nsec = (rc & GENMASK(13, 0)) << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ nsec |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ sec = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ sec |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+
+ rx_ts = kmalloc(sizeof(*rx_ts), GFP_KERNEL);
+ if (!rx_ts)
+ return NULL;
+
+ rx_ts->seconds = sec;
+ rx_ts->nsec = nsec;
+ rx_ts->seq_id = rc;
+
+error:
+ return rx_ts;
+}
+
+static void mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ int caps;
+
+ do {
+ struct mchp_rds_ptp_rx_ts *rx_ts;
+
+ rx_ts = mchp_rds_ptp_get_rx_ts(clock);
+ if (rx_ts)
+ mchp_rds_ptp_match_rx_ts(clock, rx_ts);
+
+ caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
+ MCHP_RDS_PTP_PORT);
+ if (caps < 0)
+ return;
+ } while (MCHP_RDS_PTP_RX_TS_CNT(caps) > 0);
+}
+
+static bool mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock *clock,
+ u32 *sec, u32 *nsec, u16 *seq)
+{
+ int rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ if (!(rc & MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID))
+ return false;
+ *nsec = (rc & GENMASK(13, 0)) << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *nsec = *nsec | rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *sec = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *sec = *sec | rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+
+ *seq = rc;
+
+ return true;
+}
+
+static void mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ int caps;
+
+ do {
+ u32 sec, nsec;
+ u16 seq;
+
+ if (mchp_rds_ptp_get_tx_ts(clock, &sec, &nsec, &seq))
+ mchp_rds_ptp_match_tx_skb(clock, sec, nsec, seq);
+
+ caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
+ MCHP_RDS_PTP_PORT);
+ if (caps < 0)
+ return;
+ } while (MCHP_RDS_PTP_TX_TS_CNT(caps) > 0);
+}
+
+int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool clear)
+{
+ if (clear)
+ return phy_clear_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
+ val);
+ else
+ return phy_set_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
+ val);
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_top_config_intr);
+
+irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock)
+{
+ int irq_sts;
+
+ /* To handle rogue interrupt scenarios */
+ if (!clock)
+ return IRQ_NONE;
+
+ do {
+ irq_sts = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
+ MCHP_RDS_PTP_PORT);
+ if (irq_sts < 0)
+ return IRQ_NONE;
+
+ if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_EN)
+ mchp_rds_ptp_process_rx_ts(clock);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_EN)
+ mchp_rds_ptp_process_tx_ts(clock);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN)
+ mchp_rds_ptp_flush_fifo(clock,
+ MCHP_RDS_PTP_EGRESS_FIFO);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN)
+ mchp_rds_ptp_flush_fifo(clock,
+ MCHP_RDS_PTP_INGRESS_FIFO);
+ } while (irq_sts & (MCHP_RDS_PTP_INT_RX_TS_EN |
+ MCHP_RDS_PTP_INT_TX_TS_EN |
+ MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN |
+ MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN));
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_handle_interrupt);
+
+static int mchp_rds_ptp_init(struct mchp_rds_ptp_clock *clock)
+{
+ int rc;
+
+ /* Disable PTP */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_DIS);
+ if (rc < 0)
+ return rc;
+
+ /* Disable TSU */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ /* Clear PTP interrupt status registers */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_HARD_RESET,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TSU_HARDRESET);
+ if (rc < 0)
+ return rc;
+
+ /* Predictor enable */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LATENCY_CORRECTION_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_LATENCY_SETTING);
+ if (rc < 0)
+ return rc;
+
+ /* Configure PTP operational mode */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_OP_MODE,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_OP_MODE_STANDALONE);
+ if (rc < 0)
+ return rc;
+
+ /* Reference clock configuration */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_REF_CLK_CFG,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_REF_CLK_CFG_SET);
+ if (rc < 0)
+ return rc;
+
+ /* Classifier configurations */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_VERSION,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_MAX_VERSION(0xff) |
+ MCHP_RDS_PTP_MIN_VERSION(0x0));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_VERSION,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_MAX_VERSION(0xff) |
+ MCHP_RDS_PTP_MIN_VERSION(0x0));
+ if (rc < 0)
+ return rc;
+
+ /* Enable TSU */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN);
+ if (rc < 0)
+ return rc;
+
+ /* Enable PTP */
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_EN);
+}
+
+struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
+ u16 clk_base_addr,
+ u16 port_base_addr)
+{
+ struct mchp_rds_ptp_clock *clock;
+ int rc;
+
+ clock = devm_kzalloc(&phydev->mdio.dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->port_base_addr = port_base_addr;
+ clock->clk_base_addr = clk_base_addr;
+ clock->mmd = mmd;
+
+ mutex_init(&clock->ptp_lock);
+ clock->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
+ MCHP_RDS_PTP_N_PIN,
+ sizeof(*clock->pin_config),
+ GFP_KERNEL);
+ if (!clock->pin_config)
+ return ERR_PTR(-ENOMEM);
+
+ for (int i = 0; i < MCHP_RDS_PTP_N_PIN; ++i) {
+ struct ptp_pin_desc *p = &clock->pin_config[i];
+
+ memset(p, 0, sizeof(*p));
+ snprintf(p->name, sizeof(p->name), "pin%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+ /* Register PTP clock */
+ clock->caps.owner = THIS_MODULE;
+ snprintf(clock->caps.name, 30, "%s", phydev->drv->name);
+ clock->caps.max_adj = MCHP_RDS_PTP_MAX_ADJ;
+ clock->caps.n_ext_ts = 0;
+ clock->caps.pps = 0;
+ clock->caps.n_pins = MCHP_RDS_PTP_N_PIN;
+ clock->caps.n_per_out = MCHP_RDS_PTP_N_PEROUT;
+ clock->caps.pin_config = clock->pin_config;
+ clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine;
+ clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime;
+ clock->caps.gettime64 = mchp_rds_ptp_ltc_gettime64;
+ clock->caps.settime64 = mchp_rds_ptp_ltc_settime64;
+ clock->caps.enable = mchp_rds_ptpci_enable;
+ clock->caps.verify = mchp_rds_ptpci_verify;
+ clock->caps.getcrosststamp = NULL;
+ clock->ptp_clock = ptp_clock_register(&clock->caps,
+ &phydev->mdio.dev);
+ if (IS_ERR(clock->ptp_clock))
+ return ERR_PTR(-EINVAL);
+
+ /* Check if PHC support is missing at the configuration level */
+ if (!clock->ptp_clock)
+ return NULL;
+
+ /* Initialize the SW */
+ skb_queue_head_init(&clock->tx_queue);
+ skb_queue_head_init(&clock->rx_queue);
+ INIT_LIST_HEAD(&clock->rx_ts_list);
+ spin_lock_init(&clock->rx_ts_lock);
+
+ clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp;
+ clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp;
+ clock->mii_ts.hwtstamp = mchp_rds_ptp_hwtstamp;
+ clock->mii_ts.ts_info = mchp_rds_ptp_ts_info;
+
+ phydev->mii_ts = &clock->mii_ts;
+
+ clock->mchp_rds_ptp_event = -1;
+
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
+ clock->phydev = phydev;
+
+ rc = mchp_rds_ptp_init(clock);
+ if (rc < 0)
+ return ERR_PTR(rc);
+
+ return clock;
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MICROCHIP PHY RDS PTP driver");
+MODULE_AUTHOR("Divya Koppera");
diff --git a/drivers/net/phy/microchip_rds_ptp.h b/drivers/net/phy/microchip_rds_ptp.h
new file mode 100644
index 000000000000..25af68337b94
--- /dev/null
+++ b/drivers/net/phy/microchip_rds_ptp.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2024 Microchip Technology
+ */
+
+#ifndef _MICROCHIP_RDS_PTP_H
+#define _MICROCHIP_RDS_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_clock.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#define MCHP_RDS_PTP_CMD_CTL 0x0
+#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC BIT(6)
+#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC BIT(5)
+#define MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD BIT(4)
+#define MCHP_RDS_PTP_CMD_CTL_CLOCK_READ BIT(3)
+#define MCHP_RDS_PTP_CMD_CTL_EN BIT(1)
+#define MCHP_RDS_PTP_CMD_CTL_DIS BIT(0)
+
+#define MCHP_RDS_PTP_REF_CLK_CFG 0x2
+#define MCHP_RDS_PTP_REF_CLK_SRC_250MHZ 0x0
+#define MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE BIT(9)
+#define MCHP_RDS_PTP_REF_CLK_PERIOD 4
+#define MCHP_RDS_PTP_REF_CLK_CFG_SET (MCHP_RDS_PTP_REF_CLK_SRC_250MHZ |\
+ MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE |\
+ MCHP_RDS_PTP_REF_CLK_PERIOD)
+
+#define MCHP_RDS_PTP_LTC_SEC_HI 0x5
+#define MCHP_RDS_PTP_LTC_SEC_MID 0x6
+#define MCHP_RDS_PTP_LTC_SEC_LO 0x7
+#define MCHP_RDS_PTP_LTC_NS_HI 0x8
+#define MCHP_RDS_PTP_LTC_NS_LO 0x9
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI 0xc
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR BIT(15)
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_LO 0xd
+#define MCHP_RDS_PTP_STEP_ADJ_HI 0x12
+#define MCHP_RDS_PTP_STEP_ADJ_HI_DIR BIT(15)
+#define MCHP_RDS_PTP_STEP_ADJ_LO 0x13
+#define MCHP_RDS_PTP_LTC_READ_SEC_HI 0x29
+#define MCHP_RDS_PTP_LTC_READ_SEC_MID 0x2a
+#define MCHP_RDS_PTP_LTC_READ_SEC_LO 0x2b
+#define MCHP_RDS_PTP_LTC_READ_NS_HI 0x2c
+#define MCHP_RDS_PTP_LTC_READ_NS_LO 0x2d
+#define MCHP_RDS_PTP_OP_MODE 0x41
+#define MCHP_RDS_PTP_OP_MODE_DIS 0
+#define MCHP_RDS_PTP_OP_MODE_STANDALONE 1
+#define MCHP_RDS_PTP_LATENCY_CORRECTION_CTL 0x44
+#define MCHP_RDS_PTP_PREDICTOR_EN BIT(6)
+#define MCHP_RDS_PTP_TX_PRED_DIS BIT(1)
+#define MCHP_RDS_PTP_RX_PRED_DIS BIT(0)
+#define MCHP_RDS_PTP_LATENCY_SETTING (MCHP_RDS_PTP_PREDICTOR_EN | \
+ MCHP_RDS_PTP_TX_PRED_DIS | \
+ MCHP_RDS_PTP_RX_PRED_DIS)
+
+#define MCHP_RDS_PTP_INT_EN 0x0
+#define MCHP_RDS_PTP_INT_STS 0x01
+#define MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN BIT(3)
+#define MCHP_RDS_PTP_INT_TX_TS_EN BIT(2)
+#define MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN BIT(1)
+#define MCHP_RDS_PTP_INT_RX_TS_EN BIT(0)
+#define MCHP_RDS_PTP_INT_ALL_MSK (MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN | \
+ MCHP_RDS_PTP_INT_TX_TS_EN | \
+ MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN |\
+ MCHP_RDS_PTP_INT_RX_TS_EN)
+
+#define MCHP_RDS_PTP_CAP_INFO 0x2e
+#define MCHP_RDS_PTP_TX_TS_CNT(v) (((v) & GENMASK(11, 8)) >> 8)
+#define MCHP_RDS_PTP_RX_TS_CNT(v) ((v) & GENMASK(3, 0))
+
+#define MCHP_RDS_PTP_RX_PARSE_CONFIG 0x42
+#define MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN 0x44
+#define MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN 0x45
+
+#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG 0x4e
+#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0)
+
+#define MCHP_RDS_PTP_RX_VERSION 0x48
+#define MCHP_RDS_PTP_RX_TIMESTAMP_EN 0x4d
+
+#define MCHP_RDS_PTP_RX_INGRESS_NS_HI 0x54
+#define MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID BIT(15)
+
+#define MCHP_RDS_PTP_RX_INGRESS_NS_LO 0x55
+#define MCHP_RDS_PTP_RX_INGRESS_SEC_HI 0x56
+#define MCHP_RDS_PTP_RX_INGRESS_SEC_LO 0x57
+#define MCHP_RDS_PTP_RX_MSG_HDR2 0x59
+
+#define MCHP_RDS_PTP_TX_PARSE_CONFIG 0x82
+#define MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN BIT(0)
+#define MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN BIT(1)
+#define MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN BIT(2)
+
+#define MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN 0x84
+#define MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN 0x85
+
+#define MCHP_RDS_PTP_TX_VERSION 0x88
+#define MCHP_RDS_PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
+#define MCHP_RDS_PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
+
+#define MCHP_RDS_PTP_TX_TIMESTAMP_EN 0x8d
+#define MCHP_RDS_PTP_TIMESTAMP_EN_SYNC BIT(0)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_DREQ BIT(1)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ BIT(2)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_PDRES BIT(3)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_ALL (MCHP_RDS_PTP_TIMESTAMP_EN_SYNC |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_DREQ |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_PDRES)
+
+#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG 0x8e
+#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0)
+
+#define MCHP_RDS_PTP_TX_MOD 0x8f
+#define MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT BIT(12)
+
+#define MCHP_RDS_PTP_TX_EGRESS_NS_HI 0x94
+#define MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID BIT(15)
+
+#define MCHP_RDS_PTP_TX_EGRESS_NS_LO 0x95
+#define MCHP_RDS_PTP_TX_EGRESS_SEC_HI 0x96
+#define MCHP_RDS_PTP_TX_EGRESS_SEC_LO 0x97
+#define MCHP_RDS_PTP_TX_MSG_HDR2 0x99
+
+#define MCHP_RDS_PTP_TSU_GEN_CONFIG 0xc0
+#define MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN BIT(0)
+
+#define MCHP_RDS_PTP_TSU_HARD_RESET 0xc1
+#define MCHP_RDS_PTP_TSU_HARDRESET BIT(0)
+
+#define MCHP_RDS_PTP_CLK_TRGT_SEC_HI 0x15
+#define MCHP_RDS_PTP_CLK_TRGT_SEC_LO 0x16
+#define MCHP_RDS_PTP_CLK_TRGT_NS_HI 0x17
+#define MCHP_RDS_PTP_CLK_TRGT_NS_LO 0x18
+
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_HI 0x19
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_LO 0x1a
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_HI 0x1b
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_LO 0x1c
+
+#define MCHP_RDS_PTP_GEN_CFG 0x01
+#define MCHP_RDS_PTP_GEN_CFG_LTC_EVT_MASK GENMASK(11, 8)
+
+#define MCHP_RDS_PTP_GEN_CFG_LTC_EVT_SET(value) (((value) & 0xF) << 4)
+#define MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD BIT(0)
+#define MCHP_RDS_PTP_GEN_CFG_POLARITY BIT(1)
+
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 4 clock cycles in 250MHz.
+ * The value is calculated as following: (1/1000000)/((2^-32)/4)
+ */
+#define MCHP_RDS_PTP_1PPM_FORMAT 17179
+#define MCHP_RDS_PTP_FIFO_SIZE 8
+#define MCHP_RDS_PTP_MAX_ADJ 31249999
+
+#define MCHP_RDS_PTP_BUFFER_TIME 2
+#define MCHP_RDS_PTP_N_PIN 4
+#define MCHP_RDS_PTP_N_PEROUT 1
+
+#define BASE_CLK(p) ((p)->clk_base_addr)
+#define BASE_PORT(p) ((p)->port_base_addr)
+#define PTP_MMD(p) ((p)->mmd)
+
+enum mchp_rds_ptp_base {
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_CLOCK
+};
+
+enum mchp_rds_ptp_fifo_dir {
+ MCHP_RDS_PTP_INGRESS_FIFO,
+ MCHP_RDS_PTP_EGRESS_FIFO
+};
+
+struct mchp_rds_ptp_clock {
+ struct mii_timestamper mii_ts;
+ struct phy_device *phydev;
+ struct ptp_clock *ptp_clock;
+
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+ struct list_head rx_ts_list;
+
+ struct ptp_clock_info caps;
+
+ /* Lock for Rx ts fifo */
+ spinlock_t rx_ts_lock;
+ int hwts_tx_type;
+
+ enum hwtstamp_rx_filters rx_filter;
+ int layer;
+ int version;
+ u16 port_base_addr;
+ u16 clk_base_addr;
+
+ /* Lock for phc */
+ struct mutex ptp_lock;
+ u8 mmd;
+ int mchp_rds_ptp_event;
+ int event_pin;
+ struct ptp_pin_desc *pin_config;
+};
+
+struct mchp_rds_ptp_rx_ts {
+ struct list_head list;
+ u32 seconds;
+ u32 nsec;
+ u16 seq_id;
+};
+
+#if IS_ENABLED(CONFIG_MICROCHIP_PHY_RDS_PTP)
+
+struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
+ u16 clk_base, u16 port_base);
+
+int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool enable);
+
+irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock);
+
+#else
+
+static inline struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device
+ *phydev, u8 mmd,
+ u16 clk_base,
+ u16 port_base)
+{
+ return NULL;
+}
+
+static inline int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool enable)
+{
+ return 0;
+}
+
+static inline irqreturn_t mchp_rds_ptp_handle_interrupt(struct
+ mchp_rds_ptp_clock
+ * clock)
+{
+ return IRQ_NONE;
+}
+
+#endif //CONFIG_MICROCHIP_PHY_RDS_PTP
+
+#endif //_MICROCHIP_RDS_PTP_H
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index b17bf6708003..62b36a318100 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -10,11 +10,15 @@
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <linux/bitfield.h>
+#include "microchip_rds_ptp.h"
#define PHY_ID_LAN87XX 0x0007c150
#define PHY_ID_LAN937X 0x0007c180
#define PHY_ID_LAN887X 0x0007c1f0
+#define MCHP_RDS_PTP_LTC_BASE_ADDR 0xe000
+#define MCHP_RDS_PTP_PORT_BASE_ADDR (MCHP_RDS_PTP_LTC_BASE_ADDR + 0x800)
+
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
@@ -229,10 +233,14 @@
#define LAN887X_INT_STS 0xf000
#define LAN887X_INT_MSK 0xf001
+#define LAN887X_INT_MSK_P1588_MOD_INT_MSK BIT(3)
#define LAN887X_INT_MSK_T1_PHY_INT_MSK BIT(2)
#define LAN887X_INT_MSK_LINK_UP_MSK BIT(1)
#define LAN887X_INT_MSK_LINK_DOWN_MSK BIT(0)
+#define LAN887X_MX_CHIP_TOP_REG_CONTROL1 0xF002
+#define LAN887X_MX_CHIP_TOP_REG_CONTROL1_EVT_EN BIT(8)
+
#define LAN887X_MX_CHIP_TOP_LINK_MSK (LAN887X_INT_MSK_LINK_UP_MSK |\
LAN887X_INT_MSK_LINK_DOWN_MSK)
@@ -319,6 +327,8 @@ struct lan887x_regwr_map {
struct lan887x_priv {
u64 stats[ARRAY_SIZE(lan887x_hw_stats)];
+ struct mchp_rds_ptp_clock *clock;
+ bool init_done;
};
static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
@@ -1269,8 +1279,28 @@ static int lan887x_get_features(struct phy_device *phydev)
static int lan887x_phy_init(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
int ret;
+ if (!priv->init_done && phy_interrupt_is_valid(phydev)) {
+ priv->clock = mchp_rds_ptp_probe(phydev, MDIO_MMD_VEND1,
+ MCHP_RDS_PTP_LTC_BASE_ADDR,
+ MCHP_RDS_PTP_PORT_BASE_ADDR);
+ if (IS_ERR(priv->clock))
+ return PTR_ERR(priv->clock);
+
+ /* Enable pin mux for EVT */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_MX_CHIP_TOP_REG_CONTROL1,
+ LAN887X_MX_CHIP_TOP_REG_CONTROL1_EVT_EN,
+ LAN887X_MX_CHIP_TOP_REG_CONTROL1_EVT_EN);
+
+ /* Initialize pin numbers specific to PEROUT */
+ priv->clock->event_pin = 3;
+
+ priv->init_done = true;
+ }
+
/* Clear loopback */
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
LAN887X_MIS_CFG_REG2,
@@ -1470,6 +1500,7 @@ static int lan887x_probe(struct phy_device *phydev)
if (!priv)
return -ENOMEM;
+ priv->init_done = false;
phydev->priv = priv;
return lan887x_phy_setup(phydev);
@@ -1518,6 +1549,7 @@ static void lan887x_get_strings(struct phy_device *phydev, u8 *data)
static int lan887x_config_intr(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -1537,12 +1569,24 @@ static int lan887x_config_intr(struct phy_device *phydev)
rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS);
}
+ if (rc < 0)
+ return rc;
- return rc < 0 ? rc : 0;
+ if (phy_is_default_hwtstamp(phydev)) {
+ return mchp_rds_ptp_top_config_intr(priv->clock,
+ LAN887X_INT_MSK,
+ LAN887X_INT_MSK_P1588_MOD_INT_MSK,
+ (phydev->interrupts ==
+ PHY_INTERRUPT_ENABLED));
+ }
+
+ return 0;
}
static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
+ int rc = IRQ_NONE;
int irq_status;
irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS);
@@ -1553,10 +1597,13 @@ static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev)
if (irq_status & LAN887X_MX_CHIP_TOP_LINK_MSK) {
phy_trigger_machine(phydev);
- return IRQ_HANDLED;
+ rc = IRQ_HANDLED;
}
- return IRQ_NONE;
+ if (irq_status & LAN887X_INT_MSK_P1588_MOD_INT_MSK)
+ rc = mchp_rds_ptp_handle_interrupt(priv->clock);
+
+ return rc;
}
static int lan887x_cd_reset(struct phy_device *phydev,
diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c
index 75d291154b4c..e50a0c102a86 100644
--- a/drivers/net/phy/microchip_t1s.c
+++ b/drivers/net/phy/microchip_t1s.c
@@ -497,7 +497,7 @@ static struct phy_driver microchip_t1s_driver[] = {
module_phy_driver(microchip_t1s_driver);
-static struct mdio_device_id __maybe_unused tbl[] = {
+static const struct mdio_device_id __maybe_unused tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1) },
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC1) },
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC2) },
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index bee381200ab8..19cf12ee8990 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -2699,7 +2699,7 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
-static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
{ PHY_ID_MATCH_VENDOR(PHY_VENDOR_MSCC) },
{ }
};
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index a8ccf257c109..94d9cb727121 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -1274,7 +1274,7 @@ static struct phy_driver gpy_drivers[] = {
};
module_phy_driver(gpy_drivers);
-static struct mdio_device_id __maybe_unused gpy_tbl[] = {
+static const struct mdio_device_id __maybe_unused gpy_tbl[] = {
{PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx)},
{PHY_ID_GPY115B, PHY_ID_GPYx15B_MASK},
{PHY_ID_MATCH_MODEL(PHY_ID_GPY115C)},
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 9ae9cc6b23c2..7f3ff322892e 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -173,7 +173,7 @@ MODULE_DESCRIPTION("NatSemi PHY driver");
MODULE_AUTHOR("Stuart Menefy");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused ns_tbl[] = {
+static const struct mdio_device_id __maybe_unused ns_tbl[] = {
{ DP83865_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/ncn26000.c b/drivers/net/phy/ncn26000.c
index 5680584f659e..cabdd83c614f 100644
--- a/drivers/net/phy/ncn26000.c
+++ b/drivers/net/phy/ncn26000.c
@@ -159,7 +159,7 @@ static struct phy_driver ncn26000_driver[] = {
module_phy_driver(ncn26000_driver);
-static struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
+static const struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_NCN26000) },
{ }
};
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index ade544bc007d..323717a4821f 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -2008,7 +2008,7 @@ static struct phy_driver nxp_c45_driver[] = {
module_phy_driver(nxp_c45_driver);
-static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
+static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
{ /*sentinel*/ },
diff --git a/drivers/net/phy/nxp-cbtx.c b/drivers/net/phy/nxp-cbtx.c
index 3d25491043a3..3286fcb4f47e 100644
--- a/drivers/net/phy/nxp-cbtx.c
+++ b/drivers/net/phy/nxp-cbtx.c
@@ -215,7 +215,7 @@ static struct phy_driver cbtx_driver[] = {
module_phy_driver(cbtx_driver);
-static struct mdio_device_id __maybe_unused cbtx_tbl[] = {
+static const struct mdio_device_id __maybe_unused cbtx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_CBTX_SJA1110) },
{ },
};
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 2c263ae44b4f..ed7fa26bac8e 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -888,7 +888,7 @@ static struct phy_driver tja11xx_driver[] = {
module_phy_driver(tja11xx_driver);
-static struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1102) },
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 944ae98ad110..0dac08e85304 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -1469,18 +1469,17 @@ EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status);
* @phydev: target phy_device struct
* @adv: variable to store advertised linkmodes
* @lp: variable to store LP advertised linkmodes
- * @is_enabled: variable to store EEE enabled/disabled configuration value
*
* Description: this function will read local and link partner PHY
* advertisements. Compare them return current EEE state.
*/
int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
- unsigned long *lp, bool *is_enabled)
+ unsigned long *lp)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- bool eee_enabled, eee_active;
+ bool eee_active;
int ret;
ret = genphy_c45_read_eee_adv(phydev, tmp_adv);
@@ -1491,9 +1490,8 @@ int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
if (ret)
return ret;
- eee_enabled = !linkmode_empty(tmp_adv);
linkmode_and(common, tmp_adv, tmp_lp);
- if (eee_enabled && !linkmode_empty(common))
+ if (!linkmode_empty(tmp_adv) && !linkmode_empty(common))
eee_active = phy_check_valid(phydev->speed, phydev->duplex,
common);
else
@@ -1503,8 +1501,6 @@ int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
linkmode_copy(adv, tmp_adv);
if (lp)
linkmode_copy(lp, tmp_lp);
- if (is_enabled)
- *is_enabled = eee_enabled;
return eee_active;
}
@@ -1521,15 +1517,13 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active);
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
struct ethtool_keee *data)
{
- bool is_enabled;
int ret;
ret = genphy_c45_eee_is_active(phydev, data->advertised,
- data->lp_advertised, &is_enabled);
+ data->lp_advertised);
if (ret < 0)
return ret;
- data->eee_enabled = is_enabled;
data->eee_active = phydev->eee_active;
linkmode_copy(data->supported, phydev->supported_eee);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 0d20b534122b..d0c1718e2b16 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -616,6 +616,49 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
EXPORT_SYMBOL(phy_ethtool_get_stats);
/**
+ * __phy_ethtool_get_phy_stats - Retrieve standardized PHY statistics
+ * @phydev: Pointer to the PHY device
+ * @phy_stats: Pointer to ethtool_eth_phy_stats structure
+ * @phydev_stats: Pointer to ethtool_phy_stats structure
+ *
+ * Fetches PHY statistics using a kernel-defined interface for consistent
+ * diagnostics. Unlike phy_ethtool_get_stats(), which allows custom stats,
+ * this function enforces a standardized format for better interoperability.
+ */
+void __phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+ if (!phydev->drv || !phydev->drv->get_phy_stats)
+ return;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_phy_stats(phydev, phy_stats, phydev_stats);
+ mutex_unlock(&phydev->lock);
+}
+
+/**
+ * __phy_ethtool_get_link_ext_stats - Retrieve extended link statistics for a PHY
+ * @phydev: Pointer to the PHY device
+ * @link_stats: Pointer to the structure to store extended link statistics
+ *
+ * Populates the ethtool_link_ext_stats structure with link down event counts
+ * and additional driver-specific link statistics, if available.
+ */
+void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ link_stats->link_down_events = READ_ONCE(phydev->link_down_events);
+
+ if (!phydev->drv || !phydev->drv->get_link_stats)
+ return;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_link_stats(phydev, link_stats);
+ mutex_unlock(&phydev->lock);
+}
+
+/**
* phy_ethtool_get_plca_cfg - Get PLCA RS configuration
* @phydev: the phy_device struct
* @plca_cfg: where to store the retrieved configuration
@@ -988,8 +1031,7 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
- err = genphy_c45_eee_is_active(phydev,
- NULL, NULL, NULL);
+ err = genphy_c45_eee_is_active(phydev, NULL, NULL);
phydev->eee_active = err > 0;
phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled &&
phydev->eee_active;
@@ -1006,6 +1048,59 @@ static int phy_check_link_status(struct phy_device *phydev)
}
/**
+ * phy_inband_caps - query which in-band signalling modes are supported
+ * @phydev: a pointer to a &struct phy_device
+ * @interface: the interface mode for the PHY
+ *
+ * Returns zero if it is unknown what in-band signalling is supported by the
+ * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise,
+ * returns a bit mask of the LINK_INBAND_* values from
+ * &enum link_inband_signalling to describe which inband modes are supported
+ * by the PHY for this interface mode.
+ */
+unsigned int phy_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (phydev->drv && phydev->drv->inband_caps)
+ return phydev->drv->inband_caps(phydev, interface);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phy_inband_caps);
+
+/**
+ * phy_config_inband - configure the desired PHY in-band mode
+ * @phydev: the phy_device struct
+ * @modes: in-band modes to configure
+ *
+ * Description: disables, enables or enables-with-bypass in-band signalling
+ * between the PHY and host system.
+ *
+ * Returns: zero on success, or negative errno value.
+ */
+int phy_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ int err;
+
+ if (!!(modes & LINK_INBAND_DISABLE) +
+ !!(modes & LINK_INBAND_ENABLE) +
+ !!(modes & LINK_INBAND_BYPASS) != 1)
+ return -EINVAL;
+
+ mutex_lock(&phydev->lock);
+ if (!phydev->drv)
+ err = -EIO;
+ else if (!phydev->drv->config_inband)
+ err = -EOPNOTSUPP;
+ else
+ err = phydev->drv->config_inband(phydev, modes);
+ mutex_unlock(&phydev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_config_inband);
+
+/**
* _phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
@@ -1347,6 +1442,23 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
+ * phy_update_stats - Update PHY device statistics if supported.
+ * @phydev: Pointer to the PHY device structure.
+ *
+ * If the PHY driver provides an update_stats callback, this function
+ * invokes it to update the PHY statistics. If not, it returns 0.
+ *
+ * Return: 0 on success, or a negative error code if the callback fails.
+ */
+static int phy_update_stats(struct phy_device *phydev)
+{
+ if (!phydev->drv->update_stats)
+ return 0;
+
+ return phydev->drv->update_stats(phydev);
+}
+
+/**
* phy_request_interrupt - request and enable interrupt for a PHY device
* @phydev: target phy_device struct
*
@@ -1415,6 +1527,9 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
case PHY_RUNNING:
err = phy_check_link_status(phydev);
func = &phy_check_link_status;
+
+ if (!err)
+ err = phy_update_stats(phydev);
break;
case PHY_CABLETEST:
err = phydev->drv->cable_test_get_status(phydev, &finished);
@@ -1589,6 +1704,47 @@ void phy_mac_interrupt(struct phy_device *phydev)
EXPORT_SYMBOL(phy_mac_interrupt);
/**
+ * phy_eee_tx_clock_stop_capable() - indicate whether the MAC can stop tx clock
+ * @phydev: target phy_device struct
+ *
+ * Indicate whether the MAC can disable the transmit xMII clock while in LPI
+ * state. Returns 1 if the MAC may stop the transmit clock, 0 if the MAC must
+ * not stop the transmit clock, or negative error.
+ */
+int phy_eee_tx_clock_stop_capable(struct phy_device *phydev)
+{
+ int stat1;
+
+ stat1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (stat1 < 0)
+ return stat1;
+
+ return !!(stat1 & MDIO_PCS_STAT1_CLKSTOP_CAP);
+}
+EXPORT_SYMBOL_GPL(phy_eee_tx_clock_stop_capable);
+
+/**
+ * phy_eee_rx_clock_stop() - configure PHY receive clock in LPI
+ * @phydev: target phy_device struct
+ * @clk_stop_enable: flag to indicate whether the clock can be stopped
+ *
+ * Configure whether the PHY can disable its receive clock during LPI mode,
+ * See IEEE 802.3 sections 22.2.2.2, 35.2.2.10, and 45.2.3.1.4.
+ *
+ * Returns: 0 or negative error.
+ */
+int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable)
+{
+ /* Configure the PHY to stop receiving xMII
+ * clock while it is signaling LPI.
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_PCS_CTRL1_CLKSTOP_EN,
+ clk_stop_enable ? MDIO_PCS_CTRL1_CLKSTOP_EN : 0);
+}
+EXPORT_SYMBOL_GPL(phy_eee_rx_clock_stop);
+
+/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
* @clk_stop_enable: PHY may stop the clock during LPI
@@ -1605,18 +1761,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (!phydev->drv)
return -EIO;
- ret = genphy_c45_eee_is_active(phydev, NULL, NULL, NULL);
+ ret = genphy_c45_eee_is_active(phydev, NULL, NULL);
if (ret < 0)
return ret;
if (!ret)
return -EPROTONOSUPPORT;
if (clk_stop_enable)
- /* Configure the PHY to stop receiving xMII
- * clock while it is signaling LPI.
- */
- ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
- MDIO_PCS_CTRL1_CLKSTOP_EN);
+ ret = phy_eee_rx_clock_stop(phydev, true);
return ret < 0 ? ret : 0;
}
@@ -1649,8 +1801,8 @@ EXPORT_SYMBOL(phy_get_eee_err);
* @phydev: target phy_device struct
* @data: ethtool_keee data
*
- * Description: reports the Supported/Advertisement/LP Advertisement
- * capabilities, etc.
+ * Description: get the current EEE settings, filling in all members of
+ * @data.
*/
int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b26bb33cd1d4..46713d27412b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -32,6 +32,7 @@
#include <linux/phy_link_topology.h>
#include <linux/pse-pd/pse.h>
#include <linux/property.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/rtnetlink.h>
#include <linux/sfp.h>
#include <linux/skbuff.h>
@@ -59,15 +60,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_features);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_gbit_fibre_features);
-__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
-EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
-
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_features);
-__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
-EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
-
const int phy_basic_ports_array[3] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
@@ -75,12 +70,7 @@ const int phy_basic_ports_array[3] = {
};
EXPORT_SYMBOL_GPL(phy_basic_ports_array);
-const int phy_fibre_port_array[1] = {
- ETHTOOL_LINK_MODE_FIBRE_BIT,
-};
-EXPORT_SYMBOL_GPL(phy_fibre_port_array);
-
-const int phy_all_ports_features_array[7] = {
+static const int phy_all_ports_features_array[7] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_MII_BIT,
@@ -89,7 +79,6 @@ const int phy_all_ports_features_array[7] = {
ETHTOOL_LINK_MODE_BNC_BIT,
ETHTOOL_LINK_MODE_Backplane_BIT,
};
-EXPORT_SYMBOL_GPL(phy_all_ports_features_array);
const int phy_10_100_features_array[4] = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
@@ -123,20 +112,6 @@ const int phy_10gbit_features_array[1] = {
};
EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
-static const int phy_10gbit_fec_features_array[1] = {
- ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
-};
-
-__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
-EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
-
-static const int phy_10gbit_full_features_array[] = {
- ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
-};
-
static const int phy_eee_cap1_features_array[] = {
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
@@ -198,20 +173,7 @@ static void features_init(void)
linkmode_set_bit_array(phy_gbit_features_array,
ARRAY_SIZE(phy_gbit_features_array),
phy_gbit_fibre_features);
- linkmode_set_bit_array(phy_fibre_port_array,
- ARRAY_SIZE(phy_fibre_port_array),
- phy_gbit_fibre_features);
-
- /* 10/100 half/full + 1000 half/full + TP/MII/FIBRE/AUI/BNC/Backplane*/
- linkmode_set_bit_array(phy_all_ports_features_array,
- ARRAY_SIZE(phy_all_ports_features_array),
- phy_gbit_all_ports_features);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- phy_gbit_all_ports_features);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- phy_gbit_all_ports_features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phy_gbit_fibre_features);
/* 10/100 half/full + 1000 half/full + 10G full*/
linkmode_set_bit_array(phy_all_ports_features_array,
@@ -227,17 +189,6 @@ static void features_init(void)
ARRAY_SIZE(phy_10gbit_features_array),
phy_10gbit_features);
- /* 10/100/1000/10G full */
- linkmode_set_bit_array(phy_all_ports_features_array,
- ARRAY_SIZE(phy_all_ports_features_array),
- phy_10gbit_full_features);
- linkmode_set_bit_array(phy_10gbit_full_features_array,
- ARRAY_SIZE(phy_10gbit_full_features_array),
- phy_10gbit_full_features);
- /* 10G FEC only */
- linkmode_set_bit_array(phy_10gbit_fec_features_array,
- ARRAY_SIZE(phy_10gbit_fec_features_array),
- phy_10gbit_fec_features);
linkmode_set_bit_array(phy_eee_cap1_features_array,
ARRAY_SIZE(phy_eee_cap1_features_array),
phy_eee_cap1_features);
@@ -1998,6 +1949,15 @@ void phy_detach(struct phy_device *phydev)
phy_suspend(phydev);
if (dev) {
+ struct hwtstamp_provider *hwprov;
+
+ hwprov = rtnl_dereference(dev->hwprov);
+ /* Disable timestamp if it is the one selected */
+ if (hwprov && hwprov->phydev == phydev) {
+ rcu_assign_pointer(dev->hwprov, NULL);
+ kfree_rcu(hwprov, rcu_head);
+ }
+
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_link_topo_del_phy(dev, phydev);
@@ -2994,6 +2954,23 @@ void phy_support_eee(struct phy_device *phydev)
EXPORT_SYMBOL(phy_support_eee);
/**
+ * phy_disable_eee - Disable EEE for the PHY
+ * @phydev: Target phy_device struct
+ *
+ * This function is used by MAC drivers for MAC's which don't support EEE.
+ * It disables EEE on the PHY layer.
+ */
+void phy_disable_eee(struct phy_device *phydev)
+{
+ linkmode_zero(phydev->advertising_eee);
+ phydev->eee_cfg.tx_lpi_enabled = false;
+ phydev->eee_cfg.eee_enabled = false;
+ /* don't let userspace re-enable EEE advertisement */
+ linkmode_fill(phydev->eee_broken_modes);
+}
+EXPORT_SYMBOL_GPL(phy_disable_eee);
+
+/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
*
@@ -3773,6 +3750,8 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = {
static const struct phylib_stubs __phylib_stubs = {
.hwtstamp_get = __phy_hwtstamp_get,
.hwtstamp_set = __phy_hwtstamp_set,
+ .get_phy_stats = __phy_ethtool_get_phy_stats,
+ .get_link_ext_stats = __phy_ethtool_get_link_ext_stats,
};
static void phylib_register_stubs(void)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 30a654e98352..214b62fba991 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -56,9 +56,11 @@ struct phylink {
struct phy_device *phydev;
phy_interface_t link_interface; /* PHY_INTERFACE_xxx */
u8 cfg_link_an_mode; /* MLO_AN_xxx */
- u8 cur_link_an_mode;
+ u8 req_link_an_mode; /* Requested MLO_AN_xxx mode */
+ u8 act_link_an_mode; /* Active MLO_AN_xxx mode */
u8 link_port; /* The current non-phy ethtool port */
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_lpi);
/* The link configuration settings */
struct phylink_link_state link_config;
@@ -74,17 +76,26 @@ struct phylink {
struct mutex state_mutex;
struct phylink_link_state phy_state;
+ unsigned int phy_ib_mode;
struct work_struct resolve;
unsigned int pcs_neg_mode;
unsigned int pcs_state;
bool link_failed;
+ bool mac_supports_eee_ops;
+ bool mac_supports_eee;
+ bool phy_enable_tx_lpi;
+ bool mac_enable_tx_lpi;
+ bool mac_tx_clk_stop;
+ u32 mac_tx_lpi_timer;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
u8 sfp_port;
+
+ struct eee_config eee_cfg;
};
#define phylink_printk(level, pl, fmt, ...) \
@@ -174,6 +185,24 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
+static const char *phylink_pcs_mode_str(unsigned int mode)
+{
+ if (!mode)
+ return "none";
+
+ if (mode & PHYLINK_PCS_NEG_OUTBAND)
+ return "outband";
+
+ if (mode & PHYLINK_PCS_NEG_INBAND) {
+ if (mode & PHYLINK_PCS_NEG_ENABLED)
+ return "inband,an-enabled";
+ else
+ return "inband,an-disabled";
+ }
+
+ return "unknown";
+}
+
static unsigned int phylink_interface_signal_rate(phy_interface_t interface)
{
switch (interface) {
@@ -671,6 +700,17 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
return -EINVAL;
}
+ /* Ensure that this PCS supports the interface which the MAC
+ * returned it for. It is an error for the MAC to return a PCS
+ * that does not support the interface mode.
+ */
+ if (!phy_interface_empty(pcs->supported_interfaces) &&
+ !test_bit(state->interface, pcs->supported_interfaces)) {
+ phylink_err(pl, "MAC returned PCS which does not support %s\n",
+ phy_modes(state->interface));
+ return -EINVAL;
+ }
+
/* Validate the link parameters with the PCS */
if (pcs->ops->pcs_validate) {
ret = pcs->ops->pcs_validate(pcs, supported, state);
@@ -971,6 +1011,15 @@ static void phylink_resolve_an_pause(struct phylink_link_state *state)
}
}
+static unsigned int phylink_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ if (pcs && pcs->ops->pcs_inband_caps)
+ return pcs->ops->pcs_inband_caps(pcs, interface);
+
+ return 0;
+}
+
static void phylink_pcs_pre_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
@@ -1024,6 +1073,24 @@ static void phylink_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
pcs->ops->pcs_link_up(pcs, neg_mode, interface, speed, duplex);
}
+/* Query inband for a specific interface mode, asking the MAC for the
+ * PCS which will be used to handle the interface mode.
+ */
+static unsigned int phylink_inband_caps(struct phylink *pl,
+ phy_interface_t interface)
+{
+ struct phylink_pcs *pcs;
+
+ if (!pl->mac_ops->mac_select_pcs)
+ return 0;
+
+ pcs = pl->mac_ops->mac_select_pcs(pl->config, interface);
+ if (!pcs)
+ return 0;
+
+ return phylink_pcs_inband_caps(pcs, interface);
+}
+
static void phylink_pcs_poll_stop(struct phylink *pl)
{
if (pl->cfg_link_an_mode == MLO_AN_INBAND)
@@ -1065,13 +1132,13 @@ static void phylink_mac_config(struct phylink *pl,
phylink_dbg(pl,
"%s: mode=%s/%s/%s adv=%*pb pause=%02x\n",
- __func__, phylink_an_mode_str(pl->cur_link_an_mode),
+ __func__, phylink_an_mode_str(pl->act_link_an_mode),
phy_modes(st.interface),
phy_rate_matching_to_str(st.rate_matching),
__ETHTOOL_LINK_MODE_MASK_NBITS, st.advertising,
st.pause);
- pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, &st);
+ pl->mac_ops->mac_config(pl->config, pl->act_link_an_mode, &st);
}
static void phylink_pcs_an_restart(struct phylink *pl)
@@ -1079,13 +1146,14 @@ static void phylink_pcs_an_restart(struct phylink *pl)
if (pl->pcs && linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
pl->link_config.advertising) &&
phy_interface_mode_is_8023z(pl->link_config.interface) &&
- phylink_autoneg_inband(pl->cur_link_an_mode))
+ phylink_autoneg_inband(pl->act_link_an_mode))
pl->pcs->ops->pcs_an_restart(pl->pcs);
}
/**
* phylink_pcs_neg_mode() - helper to determine PCS inband mode
- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pcs: a pointer to &struct phylink_pcs
* @interface: interface mode to be used
* @advertising: adertisement ethtool link mode mask
*
@@ -1102,11 +1170,21 @@ static void phylink_pcs_an_restart(struct phylink *pl)
* Note: this is for cases where the PCS itself is involved in negotiation
* (e.g. Clause 37, SGMII and similar) not Clause 73.
*/
-static unsigned int phylink_pcs_neg_mode(unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertising)
+static void phylink_pcs_neg_mode(struct phylink *pl, struct phylink_pcs *pcs,
+ phy_interface_t interface,
+ const unsigned long *advertising)
{
- unsigned int neg_mode;
+ unsigned int pcs_ib_caps = 0;
+ unsigned int phy_ib_caps = 0;
+ unsigned int neg_mode, mode;
+ enum {
+ INBAND_CISCO_SGMII,
+ INBAND_BASEX,
+ } type;
+
+ mode = pl->req_link_an_mode;
+
+ pl->phy_ib_mode = 0;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -1119,10 +1197,7 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode,
* inband communication. Note: there exist PHYs that run
* with SGMII but do not send the inband data.
*/
- if (!phylink_autoneg_inband(mode))
- neg_mode = PHYLINK_PCS_NEG_OUTBAND;
- else
- neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ type = INBAND_CISCO_SGMII;
break;
case PHY_INTERFACE_MODE_1000BASEX:
@@ -1133,21 +1208,143 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode,
* as well, but drivers may not support this, so may
* need to override this.
*/
- if (!phylink_autoneg_inband(mode))
+ type = INBAND_BASEX;
+ break;
+
+ default:
+ pl->pcs_neg_mode = PHYLINK_PCS_NEG_NONE;
+ pl->act_link_an_mode = mode;
+ return;
+ }
+
+ if (pcs)
+ pcs_ib_caps = phylink_pcs_inband_caps(pcs, interface);
+
+ if (pl->phydev)
+ phy_ib_caps = phy_inband_caps(pl->phydev, interface);
+
+ phylink_dbg(pl, "interface %s inband modes: pcs=%02x phy=%02x\n",
+ phy_modes(interface), pcs_ib_caps, phy_ib_caps);
+
+ if (!phylink_autoneg_inband(mode)) {
+ bool pcs_ib_only = false;
+ bool phy_ib_only = false;
+
+ if (pcs_ib_caps && pcs_ib_caps != LINK_INBAND_DISABLE) {
+ /* PCS supports reporting in-band capabilities, and
+ * supports more than disable mode.
+ */
+ if (pcs_ib_caps & LINK_INBAND_DISABLE)
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ else if (pcs_ib_caps & LINK_INBAND_ENABLE)
+ pcs_ib_only = true;
+ }
+
+ if (phy_ib_caps && phy_ib_caps != LINK_INBAND_DISABLE) {
+ /* PHY supports in-band capabilities, and supports
+ * more than disable mode.
+ */
+ if (phy_ib_caps & LINK_INBAND_DISABLE)
+ pl->phy_ib_mode = LINK_INBAND_DISABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+ else if (phy_ib_caps & LINK_INBAND_ENABLE)
+ phy_ib_only = true;
+ }
+
+ /* If either the PCS or PHY requires inband to be enabled,
+ * this is an invalid configuration. Provide a diagnostic
+ * message for this case, but don't try to force the issue.
+ */
+ if (pcs_ib_only || phy_ib_only)
+ phylink_warn(pl,
+ "firmware wants %s mode, but %s%s%s requires inband\n",
+ phylink_an_mode_str(mode),
+ pcs_ib_only ? "PCS" : "",
+ pcs_ib_only && phy_ib_only ? " and " : "",
+ phy_ib_only ? "PHY" : "");
+
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ } else if (type == INBAND_CISCO_SGMII || pl->phydev) {
+ /* For SGMII modes which are designed to be used with PHYs, or
+ * Base-X with a PHY, we try to use in-band mode where-ever
+ * possible. However, there are some PHYs e.g. BCM84881 which
+ * do not support in-band.
+ */
+ const unsigned int inband_ok = LINK_INBAND_ENABLE |
+ LINK_INBAND_BYPASS;
+ const unsigned int outband_ok = LINK_INBAND_DISABLE |
+ LINK_INBAND_BYPASS;
+ /* PCS PHY
+ * D E D E
+ * 0 0 0 0 no information inband enabled
+ * 1 0 0 0 pcs doesn't support outband
+ * 0 1 0 0 pcs required inband enabled
+ * 1 1 0 0 pcs optional inband enabled
+ * 0 0 1 0 phy doesn't support outband
+ * 1 0 1 0 pcs+phy doesn't support outband
+ * 0 1 1 0 pcs required, phy doesn't support, invalid
+ * 1 1 1 0 pcs optional, phy doesn't support, outband
+ * 0 0 0 1 phy required inband enabled
+ * 1 0 0 1 pcs doesn't support, phy required, invalid
+ * 0 1 0 1 pcs+phy required inband enabled
+ * 1 1 0 1 pcs optional, phy required inband enabled
+ * 0 0 1 1 phy optional inband enabled
+ * 1 0 1 1 pcs doesn't support, phy optional, outband
+ * 0 1 1 1 pcs required, phy optional inband enabled
+ * 1 1 1 1 pcs+phy optional inband enabled
+ */
+ if ((!pcs_ib_caps || pcs_ib_caps & inband_ok) &&
+ (!phy_ib_caps || phy_ib_caps & inband_ok)) {
+ /* In-band supported or unknown at both ends. Enable
+ * in-band mode with or without bypass at the PHY.
+ */
+ if (phy_ib_caps & LINK_INBAND_ENABLE)
+ pl->phy_ib_mode = LINK_INBAND_ENABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ } else if ((!pcs_ib_caps || pcs_ib_caps & outband_ok) &&
+ (!phy_ib_caps || phy_ib_caps & outband_ok)) {
+ /* Either in-band not supported at at least one end.
+ * In-band bypass at the other end is possible.
+ */
+ if (phy_ib_caps & LINK_INBAND_DISABLE)
+ pl->phy_ib_mode = LINK_INBAND_DISABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+
neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ if (pl->phydev)
+ mode = MLO_AN_PHY;
+ } else {
+ /* invalid */
+ phylink_warn(pl, "%s: incompatible in-band capabilities, trying in-band",
+ phy_modes(interface));
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ }
+ } else {
+ /* For Base-X without a PHY */
+ if (pcs_ib_caps == LINK_INBAND_DISABLE)
+ /* If the PCS doesn't support inband, then inband must
+ * be disabled.
+ */
+ neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
+ else if (pcs_ib_caps == LINK_INBAND_ENABLE)
+ /* If the PCS requires inband, then inband must always
+ * be enabled.
+ */
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
advertising))
neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else
neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
- break;
-
- default:
- neg_mode = PHYLINK_PCS_NEG_NONE;
- break;
}
- return neg_mode;
+ pl->pcs_neg_mode = neg_mode;
+ pl->act_link_an_mode = mode;
}
static void phylink_major_config(struct phylink *pl, bool restart,
@@ -1159,11 +1356,9 @@ static void phylink_major_config(struct phylink *pl, bool restart,
unsigned int neg_mode;
int err;
- phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
-
- pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
- state->interface,
- state->advertising);
+ phylink_dbg(pl, "major config, requested %s/%s\n",
+ phylink_an_mode_str(pl->req_link_an_mode),
+ phy_modes(state->interface));
if (pl->mac_ops->mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
@@ -1177,10 +1372,17 @@ static void phylink_major_config(struct phylink *pl, bool restart,
pcs_changed = pl->pcs != pcs;
}
+ phylink_pcs_neg_mode(pl, pcs, state->interface, state->advertising);
+
+ phylink_dbg(pl, "major config, active %s/%s/%s\n",
+ phylink_an_mode_str(pl->act_link_an_mode),
+ phylink_pcs_mode_str(pl->pcs_neg_mode),
+ phy_modes(state->interface));
+
phylink_pcs_poll_stop(pl);
if (pl->mac_ops->mac_prepare) {
- err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
+ err = pl->mac_ops->mac_prepare(pl->config, pl->act_link_an_mode,
state->interface);
if (err < 0) {
phylink_err(pl, "mac_prepare failed: %pe\n",
@@ -1214,7 +1416,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
phylink_pcs_enable(pl->pcs);
- neg_mode = pl->cur_link_an_mode;
+ neg_mode = pl->act_link_an_mode;
if (pl->pcs && pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
@@ -1230,13 +1432,20 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_pcs_an_restart(pl);
if (pl->mac_ops->mac_finish) {
- err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
+ err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode,
state->interface);
if (err < 0)
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
}
+ if (pl->phydev && pl->phy_ib_mode) {
+ err = phy_config_inband(pl->phydev, pl->phy_ib_mode);
+ if (err < 0)
+ phylink_err(pl, "phy_config_inband: %pe\n",
+ ERR_PTR(err));
+ }
+
if (pl->sfp_bus) {
rate_kbd = phylink_interface_signal_rate(state->interface);
if (rate_kbd)
@@ -1261,17 +1470,16 @@ static int phylink_change_inband_advert(struct phylink *pl)
return 0;
phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__,
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(pl->link_config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->link_config.advertising,
pl->link_config.pause);
/* Recompute the PCS neg mode */
- pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
- pl->link_config.interface,
- pl->link_config.advertising);
+ phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface,
+ pl->link_config.advertising);
- neg_mode = pl->cur_link_an_mode;
+ neg_mode = pl->act_link_an_mode;
if (pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
@@ -1293,12 +1501,24 @@ static int phylink_change_inband_advert(struct phylink *pl)
static void phylink_mac_pcs_get_state(struct phylink *pl,
struct phylink_link_state *state)
{
+ struct phylink_pcs *pcs;
+ bool autoneg;
+
linkmode_copy(state->advertising, pl->link_config.advertising);
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->rate_matching = pl->link_config.rate_matching;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising)) {
+ state->an_complete = 0;
+ state->link = 1;
+
+ pcs = pl->pcs;
+ if (!pcs || pcs->neg_mode)
+ autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
+ else
+ autoneg = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ state->advertising);
+
+ if (autoneg) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
state->pause = MLO_PAUSE_NONE;
@@ -1307,11 +1527,9 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->duplex = pl->link_config.duplex;
state->pause = pl->link_config.pause;
}
- state->an_complete = 0;
- state->link = 1;
- if (pl->pcs)
- pl->pcs->ops->pcs_get_state(pl->pcs, state);
+ if (pcs)
+ pcs->ops->pcs_get_state(pcs, pl->pcs_neg_mode, state);
else
state->link = 0;
}
@@ -1336,7 +1554,7 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
{
struct phylink_link_state link_state;
- switch (pl->cur_link_an_mode) {
+ switch (pl->req_link_an_mode) {
case MLO_AN_PHY:
link_state = pl->phy_state;
break;
@@ -1375,6 +1593,39 @@ static const char *phylink_pause_to_str(int pause)
}
}
+static void phylink_deactivate_lpi(struct phylink *pl)
+{
+ if (pl->mac_enable_tx_lpi) {
+ pl->mac_enable_tx_lpi = false;
+
+ phylink_dbg(pl, "disabling LPI\n");
+
+ pl->mac_ops->mac_disable_tx_lpi(pl->config);
+ }
+}
+
+static void phylink_activate_lpi(struct phylink *pl)
+{
+ int err;
+
+ if (!test_bit(pl->cur_interface, pl->config->lpi_interfaces)) {
+ phylink_dbg(pl, "MAC does not support LPI with %s\n",
+ phy_modes(pl->cur_interface));
+ return;
+ }
+
+ phylink_dbg(pl, "LPI timer %uus, tx clock stop %u\n",
+ pl->mac_tx_lpi_timer, pl->mac_tx_clk_stop);
+
+ err = pl->mac_ops->mac_enable_tx_lpi(pl->config, pl->mac_tx_lpi_timer,
+ pl->mac_tx_clk_stop);
+ if (!err)
+ pl->mac_enable_tx_lpi = true;
+ else
+ phylink_err(pl, "%ps() failed: %pe\n",
+ pl->mac_ops->mac_enable_tx_lpi, ERR_PTR(err));
+}
+
static void phylink_link_up(struct phylink *pl,
struct phylink_link_state link_state)
{
@@ -1410,17 +1661,20 @@ static void phylink_link_up(struct phylink *pl,
pl->cur_interface = link_state.interface;
- neg_mode = pl->cur_link_an_mode;
+ neg_mode = pl->act_link_an_mode;
if (pl->pcs && pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
phylink_pcs_link_up(pl->pcs, neg_mode, pl->cur_interface, speed,
duplex);
- pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode,
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->act_link_an_mode,
pl->cur_interface, speed, duplex,
!!(link_state.pause & MLO_PAUSE_TX), rx_pause);
+ if (pl->mac_supports_eee && pl->phy_enable_tx_lpi)
+ phylink_activate_lpi(pl);
+
if (ndev)
netif_carrier_on(ndev);
@@ -1437,25 +1691,29 @@ static void phylink_link_down(struct phylink *pl)
if (ndev)
netif_carrier_off(ndev);
- pl->mac_ops->mac_link_down(pl->config, pl->cur_link_an_mode,
+
+ phylink_deactivate_lpi(pl);
+
+ pl->mac_ops->mac_link_down(pl->config, pl->act_link_an_mode,
pl->cur_interface);
phylink_info(pl, "Link is Down\n");
}
+static bool phylink_link_is_up(struct phylink *pl)
+{
+ return pl->netdev ? netif_carrier_ok(pl->netdev) : pl->old_link_state;
+}
+
static void phylink_resolve(struct work_struct *w)
{
struct phylink *pl = container_of(w, struct phylink, resolve);
struct phylink_link_state link_state;
- struct net_device *ndev = pl->netdev;
bool mac_config = false;
bool retrigger = false;
bool cur_link_state;
mutex_lock(&pl->state_mutex);
- if (pl->netdev)
- cur_link_state = netif_carrier_ok(ndev);
- else
- cur_link_state = pl->old_link_state;
+ cur_link_state = phylink_link_is_up(pl);
if (pl->phylink_disable_state) {
pl->link_failed = false;
@@ -1463,10 +1721,10 @@ static void phylink_resolve(struct work_struct *w)
} else if (pl->link_failed) {
link_state.link = false;
retrigger = true;
- } else if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ } else if (pl->act_link_an_mode == MLO_AN_FIXED) {
phylink_get_fixed_state(pl, &link_state);
mac_config = link_state.link;
- } else if (pl->cur_link_an_mode == MLO_AN_PHY) {
+ } else if (pl->act_link_an_mode == MLO_AN_PHY) {
link_state = pl->phy_state;
mac_config = link_state.link;
} else {
@@ -1520,7 +1778,7 @@ static void phylink_resolve(struct work_struct *w)
}
}
- if (pl->cur_link_an_mode != MLO_AN_FIXED)
+ if (pl->act_link_an_mode != MLO_AN_FIXED)
phylink_apply_manual_flow(pl, &link_state);
if (mac_config) {
@@ -1644,7 +1902,7 @@ int phylink_set_fixed_link(struct phylink *pl,
pl->link_config.an_complete = 1;
pl->cfg_link_an_mode = MLO_AN_FIXED;
- pl->cur_link_an_mode = pl->cfg_link_an_mode;
+ pl->req_link_an_mode = pl->cfg_link_an_mode;
return 0;
}
@@ -1699,6 +1957,17 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(-EINVAL);
}
+ pl->mac_supports_eee_ops = mac_ops->mac_disable_tx_lpi &&
+ mac_ops->mac_enable_tx_lpi;
+ pl->mac_supports_eee = pl->mac_supports_eee_ops &&
+ pl->config->lpi_capabilities &&
+ !phy_interface_empty(pl->config->lpi_interfaces);
+
+ /* Set the default EEE configuration */
+ pl->eee_cfg.eee_enabled = pl->config->eee_enabled_default;
+ pl->eee_cfg.tx_lpi_enabled = pl->eee_cfg.eee_enabled;
+ pl->eee_cfg.tx_lpi_timer = pl->config->lpi_timer_default;
+
pl->phy_state.interface = iface;
pl->link_interface = iface;
if (iface == PHY_INTERFACE_MODE_MOCA)
@@ -1732,7 +2001,7 @@ struct phylink *phylink_create(struct phylink_config *config,
}
}
- pl->cur_link_an_mode = pl->cfg_link_an_mode;
+ pl->req_link_an_mode = pl->cfg_link_an_mode;
ret = phylink_register_sfp(pl, fwnode);
if (ret < 0) {
@@ -1803,16 +2072,22 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
pl->phy_state.link = up;
if (!up)
pl->link_failed = true;
+
+ /* Get the LPI state from phylib */
+ pl->phy_enable_tx_lpi = phydev->enable_tx_lpi;
+ pl->mac_tx_lpi_timer = phydev->eee_cfg.tx_lpi_timer;
mutex_unlock(&pl->state_mutex);
phylink_run_resolve(pl);
- phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s\n", up ? "up" : "down",
+ phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s/%slpi\n",
+ up ? "up" : "down",
phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
phy_rate_matching_to_str(phydev->rate_matching),
- phylink_pause_to_str(pl->phy_state.pause));
+ phylink_pause_to_str(pl->phy_state.pause),
+ phydev->enable_tx_lpi ? "" : "no");
}
static int phylink_validate_phy(struct phylink *pl, struct phy_device *phy,
@@ -1942,6 +2217,36 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
/* Restrict the phy advertisement according to the MAC support. */
linkmode_copy(phy->advertising, config.advertising);
+
+ /* If the MAC supports phylink managed EEE, restrict the EEE
+ * advertisement according to the MAC's LPI capabilities.
+ */
+ if (pl->mac_supports_eee) {
+ /* If EEE is enabled, then we need to call phy_support_eee()
+ * to ensure that the advertising mask is appropriately set.
+ * This also enables EEE at the PHY.
+ */
+ if (pl->eee_cfg.eee_enabled)
+ phy_support_eee(phy);
+
+ phy->eee_cfg.tx_lpi_enabled = pl->eee_cfg.tx_lpi_enabled;
+ phy->eee_cfg.tx_lpi_timer = pl->eee_cfg.tx_lpi_timer;
+
+ /* Convert the MAC's LPI capabilities to linkmodes */
+ linkmode_zero(pl->supported_lpi);
+ phylink_caps_to_linkmodes(pl->supported_lpi,
+ pl->config->lpi_capabilities);
+
+ /* Restrict the PHYs EEE support/advertisement to the modes
+ * that the MAC supports.
+ */
+ linkmode_and(phy->advertising_eee, phy->advertising_eee,
+ pl->supported_lpi);
+ } else if (pl->mac_supports_eee_ops) {
+ /* MAC supports phylink EEE, but wants EEE always disabled. */
+ phy_disable_eee(phy);
+ }
+
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
@@ -1957,7 +2262,17 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
if (pl->config->mac_managed_pm)
phy->mac_managed_pm = true;
- return 0;
+ /* Allow the MAC to stop its clock if the PHY has the capability */
+ pl->mac_tx_clk_stop = phy_eee_tx_clock_stop_capable(phy) > 0;
+
+ /* Explicitly configure whether the PHY is allowed to stop it's
+ * receive clock.
+ */
+ ret = phy_eee_rx_clock_stop(phy, pl->config->eee_rx_clk_stop_enable);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+
+ return ret;
}
static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
@@ -2114,6 +2429,8 @@ void phylink_disconnect_phy(struct phylink *pl)
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
pl->phydev = NULL;
+ pl->phy_enable_tx_lpi = false;
+ pl->mac_tx_clk_stop = false;
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
flush_work(&pl->resolve);
@@ -2189,7 +2506,7 @@ void phylink_start(struct phylink *pl)
ASSERT_RTNL();
phylink_info(pl, "configuring for %s/%s link mode\n",
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(pl->link_config.interface));
/* Always set the carrier off */
@@ -2474,7 +2791,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
linkmode_copy(kset->link_modes.supported, pl->supported);
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
/* We are using fixed settings. Report these as the
* current link settings - and note that these also
@@ -2505,6 +2822,26 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
+static bool phylink_validate_pcs_inband_autoneg(struct phylink *pl,
+ phy_interface_t interface,
+ unsigned long *adv)
+{
+ unsigned int inband = phylink_inband_caps(pl, interface);
+ unsigned int mask;
+
+ /* If the PCS doesn't implement inband support, be permissive. */
+ if (!inband)
+ return true;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv))
+ mask = LINK_INBAND_ENABLE;
+ else
+ mask = LINK_INBAND_DISABLE;
+
+ /* Check whether the PCS implements the required mode */
+ return !!(inband & mask);
+}
+
/**
* phylink_ethtool_ksettings_set() - set the link settings
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -2566,7 +2903,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* If we have a fixed link, refuse to change link parameters.
* If the link parameters match, accept them but do nothing.
*/
- if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ if (pl->req_link_an_mode == MLO_AN_FIXED) {
if (s->speed != pl->link_config.speed ||
s->duplex != pl->link_config.duplex)
return -EINVAL;
@@ -2582,7 +2919,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* is our default case) but do not allow the advertisement to
* be changed. If the advertisement matches, simply return.
*/
- if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ if (pl->req_link_an_mode == MLO_AN_FIXED) {
if (!linkmode_equal(config.advertising,
pl->link_config.advertising))
return -EINVAL;
@@ -2617,7 +2954,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
linkmode_copy(support, pl->supported);
if (phylink_validate(pl, support, &config)) {
phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
return -EINVAL;
@@ -2635,6 +2972,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
phylink_is_empty_linkmode(config.advertising))
return -EINVAL;
+ /* Validate the autonegotiation state. We don't have a PHY in this
+ * situation, so the PCS is the media-facing entity.
+ */
+ if (!phylink_validate_pcs_inband_autoneg(pl, config.interface,
+ config.advertising))
+ return -EINVAL;
+
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@@ -2717,7 +3061,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
ASSERT_RTNL();
- if (pl->cur_link_an_mode == MLO_AN_FIXED)
+ if (pl->req_link_an_mode == MLO_AN_FIXED)
return -EOPNOTSUPP;
if (!phylink_test(pl->supported, Pause) &&
@@ -2841,8 +3185,16 @@ int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_keee *eee)
ASSERT_RTNL();
- if (pl->phydev)
+ if (pl->mac_supports_eee_ops && !pl->mac_supports_eee)
+ return ret;
+
+ if (pl->phydev) {
ret = phy_ethtool_get_eee(pl->phydev, eee);
+ /* Restrict supported linkmode mask */
+ if (ret == 0 && pl->mac_supports_eee_ops)
+ linkmode_and(eee->supported, eee->supported,
+ pl->supported_lpi);
+ }
return ret;
}
@@ -2855,12 +3207,29 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
*/
int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_keee *eee)
{
+ bool mac_eee = pl->mac_supports_eee;
int ret = -EOPNOTSUPP;
ASSERT_RTNL();
- if (pl->phydev)
+ phylink_dbg(pl, "mac %s phylink EEE%s, adv %*pbl, LPI%s timer %uus\n",
+ mac_eee ? "supports" : "does not support",
+ eee->eee_enabled ? ", enabled" : "",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, eee->advertised,
+ eee->tx_lpi_enabled ? " enabled" : "", eee->tx_lpi_timer);
+
+ if (pl->mac_supports_eee_ops && !mac_eee)
+ return ret;
+
+ if (pl->phydev) {
+ /* Restrict advertisement mask */
+ if (pl->mac_supports_eee_ops)
+ linkmode_and(eee->advertised, eee->advertised,
+ pl->supported_lpi);
ret = phy_ethtool_set_eee(pl->phydev, eee);
+ if (ret == 0)
+ eee_to_eeecfg(&pl->eee_cfg, eee);
+ }
return ret;
}
@@ -2981,7 +3350,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
struct phylink_link_state state;
int val = 0xffff;
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
if (phy_id == 0) {
phylink_get_fixed_state(pl, &state);
@@ -3006,7 +3375,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
unsigned int reg, unsigned int val)
{
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
break;
@@ -3176,11 +3545,11 @@ static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl,
return interface;
}
-static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void phylink_sfp_set_config(struct phylink *pl, unsigned long *supported,
+ struct phylink_link_state *state,
+ bool changed)
{
- bool changed = false;
+ u8 mode = MLO_AN_INBAND;
phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
phylink_an_mode_str(mode), phy_modes(state->interface),
@@ -3196,9 +3565,9 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
changed = true;
}
- if (pl->cur_link_an_mode != mode ||
+ if (pl->req_link_an_mode != mode ||
pl->link_config.interface != state->interface) {
- pl->cur_link_an_mode = mode;
+ pl->req_link_an_mode = mode;
pl->link_config.interface = state->interface;
changed = true;
@@ -3213,8 +3582,7 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
phylink_mac_initial_config(pl, false);
}
-static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
- struct phy_device *phy)
+static int phylink_sfp_config_phy(struct phylink *pl, struct phy_device *phy)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
@@ -3258,7 +3626,7 @@ static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
pl->link_port = pl->sfp_port;
- phylink_sfp_set_config(pl, mode, support, &config);
+ phylink_sfp_set_config(pl, support, &config, true);
return 0;
}
@@ -3314,6 +3682,12 @@ static int phylink_sfp_config_optical(struct phylink *pl)
phylink_dbg(pl, "optical SFP: chosen %s interface\n",
phy_modes(interface));
+ if (!phylink_validate_pcs_inband_autoneg(pl, interface,
+ config.advertising)) {
+ phylink_err(pl, "autoneg setting not compatible with PCS");
+ return -EINVAL;
+ }
+
config.interface = interface;
/* Ignore errors if we're expecting a PHY to attach later */
@@ -3327,7 +3701,7 @@ static int phylink_sfp_config_optical(struct phylink *pl)
pl->link_port = pl->sfp_port;
- phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config);
+ phylink_sfp_set_config(pl, pl->sfp_support, &config, false);
return 0;
}
@@ -3398,19 +3772,16 @@ static void phylink_sfp_link_up(void *upstream)
phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK);
}
-/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
- * or 802.3z control word, so inband will not work.
- */
-static bool phylink_phy_no_inband(struct phy_device *phy)
-{
- return phy->is_c45 && phy_id_compare(phy->c45_ids.device_ids[1],
- 0xae025150, 0xfffffff0);
-}
-
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
struct phylink *pl = upstream;
- u8 mode;
+
+ if (!phy->drv) {
+ phylink_err(pl, "PHY %s (id 0x%.8lx) has no driver loaded\n",
+ phydev_name(phy), (unsigned long)phy->phy_id);
+ phylink_err(pl, "Drivers which handle known common cases: CONFIG_BCM84881_PHY, CONFIG_MARVELL_PHY\n");
+ return -EINVAL;
+ }
/*
* This is the new way of dealing with flow control for PHYs,
@@ -3421,17 +3792,12 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
*/
phy_support_asym_pause(phy);
- if (phylink_phy_no_inband(phy))
- mode = MLO_AN_PHY;
- else
- mode = MLO_AN_INBAND;
-
/* Set the PHY's host supported interfaces */
phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces,
pl->config->supported_interfaces);
/* Do the initial configuration */
- return phylink_sfp_config_phy(pl, mode, phy);
+ return phylink_sfp_config_phy(pl, phy);
}
static void phylink_sfp_disconnect_phy(void *upstream,
@@ -3626,6 +3992,7 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state,
/**
* phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
* @state: a pointer to a &struct phylink_link_state.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @bmsr: The value of the %MII_BMSR register
* @lpa: The value of the %MII_LPA register
*
@@ -3638,32 +4005,45 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state,
* accessing @bmsr and @lpa cannot be done with MDIO directly.
*/
void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
- u16 bmsr, u16 lpa)
+ unsigned int neg_mode, u16 bmsr, u16 lpa)
{
state->link = !!(bmsr & BMSR_LSTATUS);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
- /* If there is no link or autonegotiation is disabled, the LP advertisement
- * data is not meaningful, so don't go any further.
- */
- if (!state->link || !linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising))
+
+ /* If the link is down, the advertisement data is undefined. */
+ if (!state->link)
return;
switch (state->interface) {
case PHY_INTERFACE_MODE_1000BASEX:
- phylink_decode_c37_word(state, lpa, SPEED_1000);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ phylink_decode_c37_word(state, lpa, SPEED_1000);
+ } else {
+ state->speed = SPEED_1000;
+ state->duplex = DUPLEX_FULL;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ }
break;
case PHY_INTERFACE_MODE_2500BASEX:
- phylink_decode_c37_word(state, lpa, SPEED_2500);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ phylink_decode_c37_word(state, lpa, SPEED_2500);
+ } else {
+ state->speed = SPEED_2500;
+ state->duplex = DUPLEX_FULL;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ }
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- phylink_decode_sgmii_word(state, lpa);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ phylink_decode_sgmii_word(state, lpa);
break;
+
case PHY_INTERFACE_MODE_QUSGMII:
- phylink_decode_usgmii_word(state, lpa);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ phylink_decode_usgmii_word(state, lpa);
break;
default:
@@ -3676,6 +4056,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
/**
* phylink_mii_c22_pcs_get_state() - read the MAC PCS state
* @pcs: a pointer to a &struct mdio_device.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @state: a pointer to a &struct phylink_link_state.
*
* Helper for MAC PCS supporting the 802.3 clause 22 register set for
@@ -3688,6 +4069,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
* structure.
*/
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
int bmsr, lpa;
@@ -3699,7 +4081,7 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
return;
}
- phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_get_state);
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 105602581a03..26350b962890 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -1098,7 +1098,7 @@ static struct phy_driver at803x_driver[] = {
module_phy_driver(at803x_driver);
-static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+static const struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
{ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index bd8a51ec0ecd..3279de857b47 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -828,7 +828,7 @@ static struct phy_driver qca807x_drivers[] = {
};
module_phy_driver(qca807x_drivers);
-static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca807x_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
{ }
diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
index 5048304ccc9e..71498c518f0f 100644
--- a/drivers/net/phy/qcom/qca808x.c
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -655,7 +655,7 @@ static struct phy_driver qca808x_driver[] = {
module_phy_driver(qca808x_driver);
-static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca808x_tbl[] = {
{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
index 7a5039920b9f..bc70ed8efd86 100644
--- a/drivers/net/phy/qcom/qca83xx.c
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -259,7 +259,7 @@ static struct phy_driver qca83xx_driver[] = {
module_phy_driver(qca83xx_driver);
-static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 30d15f7c9b03..7b70ba6cab66 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -155,7 +155,7 @@ static struct phy_driver qs6612_driver[] = { {
module_phy_driver(qs6612_driver);
-static struct mdio_device_id __maybe_unused qs6612_tbl[] = {
+static const struct mdio_device_id __maybe_unused qs6612_tbl[] = {
{ 0x00181440, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/realtek/Kconfig b/drivers/net/phy/realtek/Kconfig
new file mode 100644
index 000000000000..31935f147d87
--- /dev/null
+++ b/drivers/net/phy/realtek/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config REALTEK_PHY
+ tristate "Realtek PHYs"
+ help
+ Currently supports RTL821x/RTL822x and fast ethernet PHYs
+
+config REALTEK_PHY_HWMON
+ def_bool REALTEK_PHY && HWMON
+ depends on !(REALTEK_PHY=y && HWMON=m)
+ help
+ Optional hwmon support for the temperature sensor
diff --git a/drivers/net/phy/realtek/Makefile b/drivers/net/phy/realtek/Makefile
new file mode 100644
index 000000000000..dd21cf87f2f1
--- /dev/null
+++ b/drivers/net/phy/realtek/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+realtek-y += realtek_main.o
+realtek-$(CONFIG_REALTEK_PHY_HWMON) += realtek_hwmon.o
+obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/realtek/realtek.h b/drivers/net/phy/realtek/realtek.h
new file mode 100644
index 000000000000..a39b44fa18a0
--- /dev/null
+++ b/drivers/net/phy/realtek/realtek.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef REALTEK_H
+#define REALTEK_H
+
+#include <linux/phy.h>
+
+int rtl822x_hwmon_init(struct phy_device *phydev);
+
+#endif /* REALTEK_H */
diff --git a/drivers/net/phy/realtek/realtek_hwmon.c b/drivers/net/phy/realtek/realtek_hwmon.c
new file mode 100644
index 000000000000..1ecb410bb941
--- /dev/null
+++ b/drivers/net/phy/realtek/realtek_hwmon.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HWMON support for Realtek PHY's
+ *
+ * Author: Heiner Kallweit <hkallweit1@gmail.com>
+ */
+
+#include <linux/hwmon.h>
+#include <linux/phy.h>
+
+#include "realtek.h"
+
+#define RTL822X_VND2_TSALRM 0xa662
+#define RTL822X_VND2_TSRR 0xbd84
+#define RTL822X_VND2_TSSR 0xb54c
+
+static int rtl822x_hwmon_get_temp(int raw)
+{
+ if (raw >= 512)
+ raw -= 1024;
+
+ return 1000 * raw / 2;
+}
+
+static int rtl822x_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int raw;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSRR) & 0x3ff;
+ *val = rtl822x_hwmon_get_temp(raw);
+ break;
+ case hwmon_temp_max:
+ /* Chip reduces speed to 1G if threshold is exceeded */
+ raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSSR) >> 6;
+ *val = rtl822x_hwmon_get_temp(raw);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops rtl822x_hwmon_ops = {
+ .visible = 0444,
+ .read = rtl822x_hwmon_read,
+};
+
+static const struct hwmon_channel_info * const rtl822x_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX),
+ NULL
+};
+
+static const struct hwmon_chip_info rtl822x_hwmon_chip_info = {
+ .ops = &rtl822x_hwmon_ops,
+ .info = rtl822x_hwmon_info,
+};
+
+int rtl822x_hwmon_init(struct phy_device *phydev)
+{
+ struct device *hwdev, *dev = &phydev->mdio.dev;
+ const char *name;
+
+ /* Ensure over-temp alarm is reset. */
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSALRM, 3);
+
+ name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ hwdev = devm_hwmon_device_register_with_info(dev, name, phydev,
+ &rtl822x_hwmon_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwdev);
+}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek/realtek_main.c
index f65d7f1f348e..572a933636b0 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -14,6 +14,8 @@
#include <linux/delay.h>
#include <linux/clk.h>
+#include "realtek.h"
+
#define RTL821x_PHYSR 0x11
#define RTL821x_PHYSR_DUPLEX BIT(13)
#define RTL821x_PHYSR_SPEED GENMASK(15, 14)
@@ -736,7 +738,11 @@ static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret;
- if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
+ if (devnum == MDIO_MMD_VEND2) {
+ rtl821x_write_page(phydev, regnum >> 4);
+ ret = __phy_read(phydev, 0x10 + ((regnum & 0xf) >> 1));
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
rtl821x_write_page(phydev, 0xa5c);
ret = __phy_read(phydev, 0x12);
rtl821x_write_page(phydev, 0);
@@ -760,7 +766,11 @@ static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
{
int ret;
- if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
+ if (devnum == MDIO_MMD_VEND2) {
+ rtl821x_write_page(phydev, regnum >> 4);
+ ret = __phy_write(phydev, 0x10 + ((regnum & 0xf) >> 1), val);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
rtl821x_write_page(phydev, 0xa5d);
ret = __phy_write(phydev, 0x10, val);
rtl821x_write_page(phydev, 0);
@@ -812,6 +822,15 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return ret;
}
+static int rtl822x_probe(struct phy_device *phydev)
+{
+ if (IS_ENABLED(CONFIG_REALTEK_PHY_HWMON) &&
+ phydev->phy_id != RTL_GENERIC_PHYID)
+ return rtl822x_hwmon_init(phydev);
+
+ return 0;
+}
+
static int rtl822xb_config_init(struct phy_device *phydev)
{
bool has_2500, has_sgmii;
@@ -952,15 +971,15 @@ static int rtl822x_read_status(struct phy_device *phydev)
{
int lpadv, ret;
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+
ret = rtlgen_read_status(phydev);
if (ret < 0)
return ret;
if (phydev->autoneg == AUTONEG_DISABLE ||
- !phydev->autoneg_complete) {
- mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ !phydev->autoneg_complete)
return 0;
- }
lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
if (lpadv < 0)
@@ -1023,26 +1042,25 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
{
int ret, val;
- ret = genphy_c45_read_status(phydev);
- if (ret < 0)
- return ret;
-
- if (phydev->autoneg == AUTONEG_DISABLE ||
- !genphy_c45_aneg_done(phydev))
- mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
-
/* Vendor register as C45 has no standardized support for 1000BaseT */
- if (phydev->autoneg == AUTONEG_ENABLE) {
+ if (phydev->autoneg == AUTONEG_ENABLE && genphy_c45_aneg_done(phydev)) {
val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
RTL822X_VND2_GANLPAR);
if (val < 0)
return val;
-
- mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+ } else {
+ val = 0;
}
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
- if (!phydev->link)
+ ret = genphy_c45_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (!phydev->link) {
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
return 0;
+ }
/* Read actual speed from vendor register. */
val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_VND2_PHYSR);
@@ -1456,6 +1474,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vb_cg_c22_match_phy_device,
.name = "RTL8221B-VB-CG 2.5Gbps PHY (C22)",
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.config_init = rtl822xb_config_init,
@@ -1468,6 +1487,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vb_cg_c45_match_phy_device,
.name = "RTL8221B-VB-CG 2.5Gbps PHY (C45)",
+ .probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
@@ -1478,6 +1498,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
.name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.config_init = rtl822xb_config_init,
@@ -1490,6 +1511,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vn_cg_c45_match_phy_device,
.name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)",
+ .probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
@@ -1500,6 +1522,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8251b_c45_match_phy_device,
.name = "RTL8251B 5Gbps PHY",
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.read_status = rtl822x_read_status,
@@ -1511,6 +1534,7 @@ static struct phy_driver realtek_drvs[] = {
.match_phy_device = rtl_internal_nbaset_match_phy_device,
.name = "Realtek Internal NBASE-T PHY",
.flags = PHY_IS_INTERNAL,
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.read_status = rtl822x_read_status,
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index bb13e75183ee..b338f385e15a 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -188,7 +188,7 @@ static struct phy_driver rockchip_phy_driver[] = {
module_phy_driver(rockchip_phy_driver);
-static struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = {
+static const struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = {
{ INTERNAL_EPHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index e1853599d9ba..31463b9e5697 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -838,7 +838,7 @@ MODULE_DESCRIPTION("SMSC PHY driver");
MODULE_AUTHOR("Herbert Valerio Riedel");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused smsc_tbl[] = {
+static const struct mdio_device_id __maybe_unused smsc_tbl[] = {
{ 0x0007c0a0, 0xfffffff0 },
{ 0x0007c0b0, 0xfffffff0 },
{ 0x0007c0c0, 0xfffffff0 },
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 7196e927c2cd..076a370be849 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -289,7 +289,7 @@ static int ks8995_reset(struct ks8995_switch *ks)
}
static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
struct device *dev;
struct ks8995_switch *ks8995;
@@ -301,7 +301,7 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
}
static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
struct device *dev;
struct ks8995_switch *ks8995;
@@ -401,8 +401,8 @@ static const struct bin_attribute ks8995_registers_attr = {
.mode = 0600,
},
.size = KS8995_REGS_SIZE,
- .read = ks8995_registers_read,
- .write = ks8995_registers_write,
+ .read_new = ks8995_registers_read,
+ .write_new = ks8995_registers_write,
};
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 309e4c3496c4..d4835d4c50e0 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -124,7 +124,7 @@ static struct phy_driver ste10xp_pdriver[] = {
module_phy_driver(ste10xp_pdriver);
-static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
+static const struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
{ STE101P_PHY_ID, 0xfffffff0 },
{ STE100P_PHY_ID, 0xffffffff },
{ }
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 8057ea8dbc21..752d4bf7bb99 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -87,7 +87,7 @@ static struct phy_driver teranetics_driver[] = {
module_phy_driver(teranetics_driver);
-static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+static const struct mdio_device_id __maybe_unused teranetics_tbl[] = {
{ PHY_ID_TN2020, 0xffffffff },
{ }
};
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 38834347a427..900cb756c366 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -90,7 +90,7 @@ static struct phy_driver upd60620_driver[1] = { {
module_phy_driver(upd60620_driver);
-static struct mdio_device_id __maybe_unused upd60620_tbl[] = {
+static const struct mdio_device_id __maybe_unused upd60620_tbl[] = {
{ UPD60620_PHY_ID, 0xfffffffe },
{ }
};
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2377179de017..b1b7bbba284e 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -674,7 +674,7 @@ static struct phy_driver vsc82xx_driver[] = {
module_phy_driver(vsc82xx_driver);
-static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
+static const struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
{ PHY_ID_VSC8244, 0x000fffc0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index 0af7db80b2f8..fc9e23927b3b 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -431,31 +431,6 @@ static int pd692x0_pi_disable(struct pse_controller_dev *pcdev, int id)
return 0;
}
-static int pd692x0_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
-{
- struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
- struct pd692x0_msg msg, buf = {0};
- int ret;
-
- ret = pd692x0_fw_unavailable(priv);
- if (ret)
- return ret;
-
- msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
- msg.sub[2] = id;
- ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf.sub[1]) {
- priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
- return 1;
- } else {
- priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
- return 0;
- }
-}
-
struct pd692x0_pse_ext_state_mapping {
u32 status_code;
enum ethtool_c33_pse_ext_state pse_ext_state;
@@ -517,21 +492,38 @@ pd692x0_pse_ext_state_map[] = {
{ /* sentinel */ }
};
-static void
-pd692x0_get_ext_state(struct ethtool_c33_pse_ext_state_info *c33_ext_state_info,
- u32 status_code)
+static int
+pd692x0_pi_get_ext_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_ext_state_info *ext_state_info)
{
+ struct ethtool_c33_pse_ext_state_info *c33_ext_state_info;
const struct pd692x0_pse_ext_state_mapping *ext_state_map;
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ c33_ext_state_info = &ext_state_info->c33_ext_state_info;
ext_state_map = pd692x0_pse_ext_state_map;
while (ext_state_map->status_code) {
- if (ext_state_map->status_code == status_code) {
+ if (ext_state_map->status_code == buf.sub[0]) {
c33_ext_state_info->c33_pse_ext_state = ext_state_map->pse_ext_state;
c33_ext_state_info->__c33_pse_ext_substate = ext_state_map->pse_ext_substate;
- return;
+ return 0;
}
ext_state_map++;
}
+
+ return 0;
}
struct pd692x0_class_pw {
@@ -613,35 +605,36 @@ static int pd692x0_pi_set_pw_from_table(struct device *dev,
}
static int
-pd692x0_pi_get_pw_ranges(struct pse_control_status *st)
+pd692x0_pi_get_pw_limit_ranges(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges)
{
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
const struct pd692x0_class_pw *pw_table;
int i;
pw_table = pd692x0_class_pw_table;
- st->c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
- sizeof(struct ethtool_c33_pse_pw_limit_range),
- GFP_KERNEL);
- if (!st->c33_pw_limit_ranges)
+ c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
+ sizeof(*c33_pw_limit_ranges),
+ GFP_KERNEL);
+ if (!c33_pw_limit_ranges)
return -ENOMEM;
for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
- st->c33_pw_limit_ranges[i].min = pw_table->class_pw;
- st->c33_pw_limit_ranges[i].max = pw_table->class_pw + pw_table->max_added_class_pw;
+ c33_pw_limit_ranges[i].min = pw_table->class_pw;
+ c33_pw_limit_ranges[i].max = pw_table->class_pw +
+ pw_table->max_added_class_pw;
}
- st->c33_pw_limit_nb_ranges = i;
- return 0;
+ pw_limit_ranges->c33_pw_limit_ranges = c33_pw_limit_ranges;
+ return i;
}
-static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
- unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+static int
+pd692x0_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_msg msg, buf = {0};
- u32 class;
int ret;
ret = pd692x0_fw_unavailable(priv);
@@ -654,39 +647,65 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
if (ret < 0)
return ret;
- /* Compare Port Status (Communication Protocol Document par. 7.1) */
- if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
- else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
- else if (buf.sub[0] == 0x12)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
- else
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
-
if (buf.sub[1])
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
else
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
- priv->admin_state[id] = status->c33_admin_state;
+ priv->admin_state[id] = admin_state->c33_admin_state;
- pd692x0_get_ext_state(&status->c33_ext_state_info, buf.sub[0]);
- status->c33_actual_pw = (buf.data[0] << 4 | buf.data[1]) * 100;
+ return 0;
+}
- msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+static int
+pd692x0_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
msg.sub[2] = id;
- memset(&buf, 0, sizeof(buf));
ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
if (ret < 0)
return ret;
- ret = pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
- if (ret < 0)
+ /* Compare Port Status (Communication Protocol Document par. 7.1) */
+ if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
+ else if (buf.sub[0] == 0x12)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
+ else
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ return 0;
+}
+
+static int
+pd692x0_pi_get_pw_class(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ u32 class;
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
return ret;
- status->c33_avail_pw_limit = ret;
- memset(&buf, 0, sizeof(buf));
msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_CLASS];
msg.sub[2] = id;
ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
@@ -695,13 +714,29 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
class = buf.data[3] >> 4;
if (class <= 8)
- status->c33_pw_class = class;
+ return class;
- ret = pd692x0_pi_get_pw_ranges(status);
+ return 0;
+}
+
+static int
+pd692x0_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
if (ret < 0)
return ret;
- return 0;
+ return (buf.data[0] << 4 | buf.data[1]) * 100;
}
static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv)
@@ -999,13 +1034,12 @@ static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
return (buf.sub[0] << 8 | buf.sub[1]) * 100000;
}
-static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
- int id)
+static int pd692x0_pi_get_pw_limit(struct pse_controller_dev *pcdev,
+ int id)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_msg msg, buf = {0};
- int mW, uV, uA, ret;
- s64 tmp_64;
+ int ret;
msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
msg.sub[2] = id;
@@ -1013,48 +1047,24 @@ static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
if (ret < 0)
return ret;
- ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
- if (ret < 0)
- return ret;
- mW = ret;
-
- ret = pd692x0_pi_get_voltage(pcdev, id);
- if (ret < 0)
- return ret;
- uV = ret;
-
- tmp_64 = mW;
- tmp_64 *= 1000000000ull;
- /* uA = mW * 1000000000 / uV */
- uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
- return uA;
+ return pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
}
-static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
- int id, int max_uA)
+static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev,
+ int id, int max_mW)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct device *dev = &priv->client->dev;
struct pd692x0_msg msg, buf = {0};
- int uV, ret, mW;
- s64 tmp_64;
+ int ret;
ret = pd692x0_fw_unavailable(priv);
if (ret)
return ret;
- ret = pd692x0_pi_get_voltage(pcdev, id);
- if (ret < 0)
- return ret;
- uV = ret;
-
msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
msg.sub[2] = id;
- tmp_64 = uV;
- tmp_64 *= max_uA;
- /* mW = uV * uA / 1000000000 */
- mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
- ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW);
+ ret = pd692x0_pi_set_pw_from_table(dev, &msg, max_mW);
if (ret)
return ret;
@@ -1063,13 +1073,17 @@ static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
static const struct pse_controller_ops pd692x0_ops = {
.setup_pi_matrix = pd692x0_setup_pi_matrix,
- .ethtool_get_status = pd692x0_ethtool_get_status,
+ .pi_get_admin_state = pd692x0_pi_get_admin_state,
+ .pi_get_pw_status = pd692x0_pi_get_pw_status,
+ .pi_get_ext_state = pd692x0_pi_get_ext_state,
+ .pi_get_pw_class = pd692x0_pi_get_pw_class,
+ .pi_get_actual_pw = pd692x0_pi_get_actual_pw,
.pi_enable = pd692x0_pi_enable,
.pi_disable = pd692x0_pi_disable,
- .pi_is_enabled = pd692x0_pi_is_enabled,
.pi_get_voltage = pd692x0_pi_get_voltage,
- .pi_get_current_limit = pd692x0_pi_get_current_limit,
- .pi_set_current_limit = pd692x0_pi_set_current_limit,
+ .pi_get_pw_limit = pd692x0_pi_get_pw_limit,
+ .pi_set_pw_limit = pd692x0_pi_set_pw_limit,
+ .pi_get_pw_limit_ranges = pd692x0_pi_get_pw_limit_ranges,
};
#define PD692X0_FW_LINE_MAX_SZ 0xff
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 2906ce173f66..4f2a54afc4d0 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -6,6 +6,7 @@
//
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/of.h>
#include <linux/pse-pd/pse.h>
#include <linux/regulator/driver.h>
@@ -210,16 +211,25 @@ out:
static int pse_pi_is_enabled(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ struct pse_admin_state admin_state = {0};
const struct pse_controller_ops *ops;
int id, ret;
ops = pcdev->ops;
- if (!ops->pi_is_enabled)
+ if (!ops->pi_get_admin_state)
return -EOPNOTSUPP;
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = ops->pi_is_enabled(pcdev, id);
+ ret = ops->pi_get_admin_state(pcdev, id, &admin_state);
+ if (ret)
+ goto out;
+
+ if (admin_state.podl_admin_state == ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED ||
+ admin_state.c33_admin_state == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED)
+ ret = 1;
+
+out:
mutex_unlock(&pcdev->lock);
return ret;
@@ -291,33 +301,25 @@ static int pse_pi_get_voltage(struct regulator_dev *rdev)
return ret;
}
-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
- int id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status);
-
static int pse_pi_get_current_limit(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- struct netlink_ext_ack extack = {};
- struct pse_control_status st = {};
- int id, uV, ret;
+ int id, uV, mW, ret;
s64 tmp_64;
ops = pcdev->ops;
id = rdev_get_id(rdev);
+ if (!ops->pi_get_pw_limit || !ops->pi_get_voltage)
+ return -EOPNOTSUPP;
+
mutex_lock(&pcdev->lock);
- if (ops->pi_get_current_limit) {
- ret = ops->pi_get_current_limit(pcdev, id);
+ ret = ops->pi_get_pw_limit(pcdev, id);
+ if (ret < 0)
goto out;
- }
+ mW = ret;
- /* If pi_get_current_limit() callback not populated get voltage
- * from pi_get_voltage() and power limit from ethtool_get_status()
- * to calculate current limit.
- */
- ret = _pse_pi_get_voltage(rdev);
+ ret = pse_pi_get_voltage(rdev);
if (!ret) {
dev_err(pcdev->dev, "Voltage null\n");
ret = -ERANGE;
@@ -327,16 +329,7 @@ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
goto out;
uV = ret;
- ret = _pse_ethtool_get_status(pcdev, id, &extack, &st);
- if (ret)
- goto out;
-
- if (!st.c33_avail_pw_limit) {
- ret = -ENODATA;
- goto out;
- }
-
- tmp_64 = st.c33_avail_pw_limit;
+ tmp_64 = mW;
tmp_64 *= 1000000000ull;
/* uA = mW * 1000000000 / uV */
ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
@@ -351,15 +344,33 @@ static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- int id, ret;
+ int id, mW, ret;
+ s64 tmp_64;
ops = pcdev->ops;
- if (!ops->pi_set_current_limit)
+ if (!ops->pi_set_pw_limit || !ops->pi_get_voltage)
return -EOPNOTSUPP;
+ if (max_uA > MAX_PI_CURRENT)
+ return -ERANGE;
+
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = ops->pi_set_current_limit(pcdev, id, max_uA);
+ ret = pse_pi_get_voltage(rdev);
+ if (!ret) {
+ dev_err(pcdev->dev, "Voltage null\n");
+ ret = -ERANGE;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ tmp_64 = ret;
+ tmp_64 *= max_uA;
+ /* mW = uA * uV / 1000000000 */
+ mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+ ret = ops->pi_set_pw_limit(pcdev, id, mW);
+out:
mutex_unlock(&pcdev->lock);
return ret;
@@ -403,17 +414,16 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
- if (pcdev->ops->pi_set_current_limit) {
+ if (pcdev->ops->pi_set_pw_limit)
rinit_data->constraints.valid_ops_mask |=
REGULATOR_CHANGE_CURRENT;
- rinit_data->constraints.max_uA = MAX_PI_CURRENT;
- }
rinit_data->supply_regulator = "vpwr";
rconfig.dev = pcdev->dev;
rconfig.driver_data = pcdev;
rconfig.init_data = rinit_data;
+ rconfig.of_node = pcdev->pi[id].np;
rdev = devm_regulator_register(pcdev->dev, rdesc, &rconfig);
if (IS_ERR(rdev)) {
@@ -444,6 +454,13 @@ int pse_controller_register(struct pse_controller_dev *pcdev)
if (!pcdev->nr_lines)
pcdev->nr_lines = 1;
+ if (!pcdev->ops->pi_get_admin_state ||
+ !pcdev->ops->pi_get_pw_status) {
+ dev_err(pcdev->dev,
+ "Mandatory status report callbacks are missing");
+ return -EINVAL;
+ }
+
ret = of_load_pse_pis(pcdev);
if (ret)
return ret;
@@ -736,23 +753,6 @@ out:
}
EXPORT_SYMBOL_GPL(of_pse_control_get);
-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
- int id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
-{
- const struct pse_controller_ops *ops;
-
- ops = pcdev->ops;
- if (!ops->ethtool_get_status) {
- NL_SET_ERR_MSG(extack,
- "PSE driver does not support status report");
- return -EOPNOTSUPP;
- }
-
- return ops->ethtool_get_status(pcdev, id, extack, status);
-}
-
/**
* pse_ethtool_get_status - get status of PSE control
* @psec: PSE control pointer
@@ -763,15 +763,81 @@ static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
*/
int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+ struct ethtool_pse_control_status *status)
{
- int err;
+ struct pse_admin_state admin_state = {0};
+ struct pse_pw_status pw_status = {0};
+ const struct pse_controller_ops *ops;
+ struct pse_controller_dev *pcdev;
+ int ret;
- mutex_lock(&psec->pcdev->lock);
- err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status);
- mutex_unlock(&psec->pcdev->lock);
+ pcdev = psec->pcdev;
+ ops = pcdev->ops;
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_get_admin_state(pcdev, psec->id, &admin_state);
+ if (ret)
+ goto out;
+ status->podl_admin_state = admin_state.podl_admin_state;
+ status->c33_admin_state = admin_state.c33_admin_state;
- return err;
+ ret = ops->pi_get_pw_status(pcdev, psec->id, &pw_status);
+ if (ret)
+ goto out;
+ status->podl_pw_status = pw_status.podl_pw_status;
+ status->c33_pw_status = pw_status.c33_pw_status;
+
+ if (ops->pi_get_ext_state) {
+ struct pse_ext_state_info ext_state_info = {0};
+
+ ret = ops->pi_get_ext_state(pcdev, psec->id,
+ &ext_state_info);
+ if (ret)
+ goto out;
+
+ memcpy(&status->c33_ext_state_info,
+ &ext_state_info.c33_ext_state_info,
+ sizeof(status->c33_ext_state_info));
+ }
+
+ if (ops->pi_get_pw_class) {
+ ret = ops->pi_get_pw_class(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_pw_class = ret;
+ }
+
+ if (ops->pi_get_actual_pw) {
+ ret = ops->pi_get_actual_pw(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_actual_pw = ret;
+ }
+
+ if (ops->pi_get_pw_limit) {
+ ret = ops->pi_get_pw_limit(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_avail_pw_limit = ret;
+ }
+
+ if (ops->pi_get_pw_limit_ranges) {
+ struct pse_pw_limit_ranges pw_limit_ranges = {0};
+
+ ret = ops->pi_get_pw_limit_ranges(pcdev, psec->id,
+ &pw_limit_ranges);
+ if (ret < 0)
+ goto out;
+
+ status->c33_pw_limit_ranges =
+ pw_limit_ranges.c33_pw_limit_ranges;
+ status->c33_pw_limit_nb_ranges = ret;
+ }
+out:
+ mutex_unlock(&psec->pcdev->lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
@@ -876,6 +942,9 @@ int pse_ethtool_set_pw_limit(struct pse_control *psec,
int uV, uA, ret;
s64 tmp_64;
+ if (pw_limit > MAX_PI_PW)
+ return -ERANGE;
+
ret = regulator_get_voltage(psec->ps);
if (!ret) {
NL_SET_ERR_MSG(extack,
diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c
index 64ab36974fe0..6ce6773fff31 100644
--- a/drivers/net/pse-pd/pse_regulator.c
+++ b/drivers/net/pse-pd/pse_regulator.c
@@ -52,17 +52,19 @@ pse_reg_pi_disable(struct pse_controller_dev *pcdev, int id)
}
static int
-pse_reg_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+pse_reg_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
- return regulator_is_enabled(priv->ps);
+ admin_state->podl_admin_state = priv->admin_state;
+
+ return 0;
}
static int
-pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+pse_reg_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
int ret;
@@ -72,20 +74,19 @@ pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
return ret;
if (!ret)
- status->podl_pw_status = ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
+ pw_status->podl_pw_status =
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
else
- status->podl_pw_status =
+ pw_status->podl_pw_status =
ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING;
- status->podl_admin_state = priv->admin_state;
-
return 0;
}
static const struct pse_controller_ops pse_reg_ops = {
- .ethtool_get_status = pse_reg_ethtool_get_status,
+ .pi_get_admin_state = pse_reg_pi_get_admin_state,
+ .pi_get_pw_status = pse_reg_pi_get_pw_status,
.pi_enable = pse_reg_pi_enable,
- .pi_is_enabled = pse_reg_pi_is_enabled,
.pi_disable = pse_reg_pi_disable,
};
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 8797ca1a8a21..5e9dda2c0eac 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -25,20 +25,32 @@
#define TPS23881_REG_GEN_MASK 0x17
#define TPS23881_REG_NBITACC BIT(5)
#define TPS23881_REG_PW_EN 0x19
+#define TPS23881_REG_2PAIR_POL1 0x1e
#define TPS23881_REG_PORT_MAP 0x26
#define TPS23881_REG_PORT_POWER 0x29
-#define TPS23881_REG_POEPLUS 0x40
+#define TPS23881_REG_4PAIR_POL1 0x2a
+#define TPS23881_REG_INPUT_V 0x2e
+#define TPS23881_REG_CHAN1_A 0x30
+#define TPS23881_REG_CHAN1_V 0x32
+#define TPS23881_REG_FOLDBACK 0x40
#define TPS23881_REG_TPON BIT(0)
#define TPS23881_REG_FWREV 0x41
#define TPS23881_REG_DEVID 0x43
#define TPS23881_REG_DEVID_MASK 0xF0
#define TPS23881_DEVICE_ID 0x02
+#define TPS23881_REG_CHAN1_CLASS 0x4c
#define TPS23881_REG_SRAM_CTRL 0x60
#define TPS23881_REG_SRAM_DATA 0x61
+#define TPS23881_UV_STEP 3662
+#define TPS23881_NA_STEP 70190
+#define TPS23881_MW_STEP 500
+#define TPS23881_MIN_PI_PW_LIMIT_MW 2000
+
struct tps23881_port_desc {
u8 chan[2];
bool is_4p;
+ int pw_pol;
};
struct tps23881_priv {
@@ -53,36 +65,123 @@ static struct tps23881_priv *to_tps23881_priv(struct pse_controller_dev *pcdev)
return container_of(pcdev, struct tps23881_priv, pcdev);
}
+/*
+ * Helper to extract a value from a u16 register value, which is made of two
+ * u8 registers. The function calculates the bit offset based on the channel
+ * and extracts the relevant bits using a provided field mask.
+ *
+ * @param reg_val: The u16 register value (composed of two u8 registers).
+ * @param chan: The channel number (0-7).
+ * @param field_offset: The base bit offset to apply (e.g., 0 or 4).
+ * @param field_mask: The mask to apply to extract the required bits.
+ * @return: The extracted value for the specific channel.
+ */
+static u16 tps23881_calc_val(u16 reg_val, u8 chan, u8 field_offset,
+ u16 field_mask)
+{
+ if (chan >= 4)
+ reg_val >>= 8;
+
+ return (reg_val >> field_offset) & field_mask;
+}
+
+/*
+ * Helper to combine individual channel values into a u16 register value.
+ * The function sets the value for a specific channel in the appropriate
+ * position.
+ *
+ * @param reg_val: The current u16 register value.
+ * @param chan: The channel number (0-7).
+ * @param field_offset: The base bit offset to apply (e.g., 0 or 4).
+ * @param field_mask: The mask to apply for the field (e.g., 0x0F).
+ * @param field_val: The value to set for the specific channel (masked by
+ * field_mask).
+ * @return: The updated u16 register value with the channel value set.
+ */
+static u16 tps23881_set_val(u16 reg_val, u8 chan, u8 field_offset,
+ u16 field_mask, u16 field_val)
+{
+ field_val &= field_mask;
+
+ if (chan < 4) {
+ reg_val &= ~(field_mask << field_offset);
+ reg_val |= (field_val << field_offset);
+ } else {
+ reg_val &= ~(field_mask << (field_offset + 8));
+ reg_val |= (field_val << (field_offset + 8));
+ }
+
+ return reg_val;
+}
+
+static int
+tps23881_pi_set_pw_pol_limit(struct tps23881_priv *priv, int id, u8 pw_pol,
+ bool is_4p)
+{
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u16 val;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ if (!is_4p) {
+ reg = TPS23881_REG_2PAIR_POL1 + (chan % 4);
+ } else {
+ /* One chan is enough to configure the 4p PI power limit */
+ if ((chan % 4) < 2)
+ reg = TPS23881_REG_4PAIR_POL1;
+ else
+ reg = TPS23881_REG_4PAIR_POL1 + 1;
+ }
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_set_val(ret, chan, 0, 0xff, pw_pol);
+ return i2c_smbus_write_word_data(client, reg, val);
+}
+
+static int tps23881_pi_enable_manual_pol(struct tps23881_priv *priv, int id)
+{
+ struct i2c_client *client = priv->client;
+ int ret;
+ u8 chan;
+ u16 val;
+
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_FOLDBACK);
+ if (ret < 0)
+ return ret;
+
+ /* No need to test if the chan is PoE4 as setting either bit for a
+ * 4P configured port disables the automatic configuration on both
+ * channels.
+ */
+ chan = priv->port[id].chan[0];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ return i2c_smbus_write_byte_data(client, TPS23881_REG_FOLDBACK, val);
+}
+
static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
u8 chan;
u16 val;
- int ret;
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
chan = priv->port[id].chan[0];
- if (chan < 4)
- val = BIT(chan);
- else
- val = BIT(chan + 4);
+ val = tps23881_set_val(0, chan, 0, BIT(chan % 4), BIT(chan % 4));
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- val |= BIT(chan);
- else
- val |= BIT(chan + 4);
+ val = tps23881_set_val(val, chan, 0, BIT(chan % 4),
+ BIT(chan % 4));
}
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
}
static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
@@ -97,32 +196,67 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
return -ERANGE;
chan = priv->port[id].chan[0];
- if (chan < 4)
- val = BIT(chan + 4);
- else
- val = BIT(chan + 8);
+ val = tps23881_set_val(0, chan, 4, BIT(chan % 4), BIT(chan % 4));
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- val |= BIT(chan + 4);
- else
- val |= BIT(chan + 8);
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4),
+ BIT(chan % 4));
}
ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
if (ret)
return ret;
- return 0;
+ /* PWOFF command resets lots of register which need to be
+ * configured again. According to the datasheet "It may take upwards
+ * of 5ms after PWOFFn command for all register values to be updated"
+ */
+ mdelay(5);
+
+ /* Enable detection and classification */
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DET_CLA_EN);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4), BIT(chan % 4));
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4),
+ BIT(chan % 4));
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4),
+ BIT(chan % 4));
+ }
+
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
+ if (ret)
+ return ret;
+
+ /* No power policy */
+ if (priv->port[id].pw_pol < 0)
+ return 0;
+
+ ret = tps23881_pi_enable_manual_pol(priv, id);
+ if (ret < 0)
+ return ret;
+
+ /* Set power policy */
+ return tps23881_pi_set_pw_pol_limit(priv, id, priv->port[id].pw_pol,
+ priv->port[id].is_4p);
}
-static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+static int
+tps23881_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
bool enabled;
u8 chan;
+ u16 val;
int ret;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
@@ -130,32 +264,35 @@ static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
return ret;
chan = priv->port[id].chan[0];
- if (chan < 4)
- enabled = ret & BIT(chan);
- else
- enabled = ret & BIT(chan + 4);
+ val = tps23881_calc_val(ret, chan, 0, BIT(chan % 4));
+ enabled = !!(val);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- enabled &= !!(ret & BIT(chan));
- else
- enabled &= !!(ret & BIT(chan + 4));
+ val = tps23881_calc_val(ret, chan, 0, BIT(chan % 4));
+ enabled &= !!(val);
}
/* Return enabled status only if both channel are on this state */
- return enabled;
+ if (enabled)
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
}
-static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
- unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+static int
+tps23881_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
- bool enabled, delivering;
+ bool delivering;
u8 chan;
+ u16 val;
int ret;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
@@ -163,40 +300,197 @@ static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
return ret;
chan = priv->port[id].chan[0];
- if (chan < 4) {
- enabled = ret & BIT(chan);
- delivering = ret & BIT(chan + 4);
- } else {
- enabled = ret & BIT(chan + 4);
- delivering = ret & BIT(chan + 8);
- }
+ val = tps23881_calc_val(ret, chan, 4, BIT(chan % 4));
+ delivering = !!(val);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4) {
- enabled &= !!(ret & BIT(chan));
- delivering &= !!(ret & BIT(chan + 4));
- } else {
- enabled &= !!(ret & BIT(chan + 4));
- delivering &= !!(ret & BIT(chan + 8));
- }
+ val = tps23881_calc_val(ret, chan, 4, BIT(chan % 4));
+ delivering &= !!(val);
}
/* Return delivering status only if both channel are on this state */
if (delivering)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
else
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
-
- /* Return enabled status only if both channel are on this state */
- if (enabled)
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
- else
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
return 0;
}
+static int tps23881_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret;
+ u64 uV;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_INPUT_V);
+ if (ret < 0)
+ return ret;
+
+ uV = ret & 0x3fff;
+ uV *= TPS23881_UV_STEP;
+
+ return (int)uV;
+}
+
+static int
+tps23881_pi_get_chan_current(struct tps23881_priv *priv, u8 chan)
+{
+ struct i2c_client *client = priv->client;
+ int reg, ret;
+ u64 tmp_64;
+
+ /* Registers 0x30 to 0x3d */
+ reg = TPS23881_REG_CHAN1_A + (chan % 4) * 4 + (chan >= 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ tmp_64 = ret & 0x3fff;
+ tmp_64 *= TPS23881_NA_STEP;
+ /* uA = nA / 1000 */
+ tmp_64 = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000);
+ return (int)tmp_64;
+}
+
+static int tps23881_pi_get_pw_class(struct pse_controller_dev *pcdev,
+ int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ reg = TPS23881_REG_CHAN1_CLASS + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ return tps23881_calc_val(ret, chan, 4, 0x0f);
+}
+
+static int
+tps23881_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, uV, uA;
+ u64 tmp_64;
+ u8 chan;
+
+ ret = tps23881_pi_get_voltage(&priv->pcdev, id);
+ if (ret < 0)
+ return ret;
+ uV = ret;
+
+ chan = priv->port[id].chan[0];
+ ret = tps23881_pi_get_chan_current(priv, chan);
+ if (ret < 0)
+ return ret;
+ uA = ret;
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ ret = tps23881_pi_get_chan_current(priv, chan);
+ if (ret < 0)
+ return ret;
+ uA += ret;
+ }
+
+ tmp_64 = uV;
+ tmp_64 *= uA;
+ /* mW = uV * uA / 1000000000 */
+ return DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+}
+
+static int
+tps23881_pi_get_pw_limit_chan(struct tps23881_priv *priv, u8 chan)
+{
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u16 val;
+
+ reg = TPS23881_REG_2PAIR_POL1 + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, chan, 0, 0xff);
+ return val * TPS23881_MW_STEP;
+}
+
+static int tps23881_pi_get_pw_limit(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, mW;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ ret = tps23881_pi_get_pw_limit_chan(priv, chan);
+ if (ret < 0)
+ return ret;
+
+ mW = ret;
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ ret = tps23881_pi_get_pw_limit_chan(priv, chan);
+ if (ret < 0)
+ return ret;
+ mW += ret;
+ }
+
+ return mW;
+}
+
+static int tps23881_pi_set_pw_limit(struct pse_controller_dev *pcdev,
+ int id, int max_mW)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ u8 pw_pol;
+ int ret;
+
+ if (max_mW < TPS23881_MIN_PI_PW_LIMIT_MW || MAX_PI_PW < max_mW) {
+ dev_err(&priv->client->dev,
+ "power limit %d out of ranges [%d,%d]",
+ max_mW, TPS23881_MIN_PI_PW_LIMIT_MW, MAX_PI_PW);
+ return -ERANGE;
+ }
+
+ ret = tps23881_pi_enable_manual_pol(priv, id);
+ if (ret < 0)
+ return ret;
+
+ pw_pol = DIV_ROUND_CLOSEST_ULL(max_mW, TPS23881_MW_STEP);
+
+ /* Save power policy to reconfigure it after a disabled call */
+ priv->port[id].pw_pol = pw_pol;
+ return tps23881_pi_set_pw_pol_limit(priv, id, pw_pol,
+ priv->port[id].is_4p);
+}
+
+static int
+tps23881_pi_get_pw_limit_ranges(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges)
+{
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+
+ c33_pw_limit_ranges = kzalloc(sizeof(*c33_pw_limit_ranges),
+ GFP_KERNEL);
+ if (!c33_pw_limit_ranges)
+ return -ENOMEM;
+
+ c33_pw_limit_ranges->min = TPS23881_MIN_PI_PW_LIMIT_MW;
+ c33_pw_limit_ranges->max = MAX_PI_PW;
+ pw_limit_ranges->c33_pw_limit_ranges = c33_pw_limit_ranges;
+
+ /* Return the number of ranges */
+ return 1;
+}
+
/* Parse managers subnode into a array of device node */
static int
tps23881_get_of_channels(struct tps23881_priv *priv,
@@ -480,7 +774,7 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
struct i2c_client *client = priv->client;
u8 pi_id, lgcl_chan, hw_chan;
u16 val = 0;
- int i, ret;
+ int i;
for (i = 0; i < port_cnt; i++) {
pi_id = port_matrix[i].pi_id;
@@ -491,6 +785,9 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
if (port_matrix[i].exist)
priv->port[pi_id].chan[0] = lgcl_chan;
+ /* Initialize power policy internal value */
+ priv->port[pi_id].pw_pol = -1;
+
/* Set hardware port matrix for all ports */
val |= hw_chan << (lgcl_chan * 2);
@@ -511,11 +808,7 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
}
/* Write hardware ports matrix */
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
}
static int
@@ -564,11 +857,7 @@ tps23881_set_ports_conf(struct tps23881_priv *priv,
val |= BIT(port_matrix[i].lgcl_chan[1]) |
BIT(port_matrix[i].lgcl_chan[1] + 4);
}
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
}
static int
@@ -594,11 +883,7 @@ tps23881_set_ports_matrix(struct tps23881_priv *priv,
if (ret)
return ret;
- ret = tps23881_set_ports_conf(priv, port_matrix);
- if (ret)
- return ret;
-
- return 0;
+ return tps23881_set_ports_conf(priv, port_matrix);
}
static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev)
@@ -626,8 +911,14 @@ static const struct pse_controller_ops tps23881_ops = {
.setup_pi_matrix = tps23881_setup_pi_matrix,
.pi_enable = tps23881_pi_enable,
.pi_disable = tps23881_pi_disable,
- .pi_is_enabled = tps23881_pi_is_enabled,
- .ethtool_get_status = tps23881_ethtool_get_status,
+ .pi_get_admin_state = tps23881_pi_get_admin_state,
+ .pi_get_pw_status = tps23881_pi_get_pw_status,
+ .pi_get_pw_class = tps23881_pi_get_pw_class,
+ .pi_get_actual_pw = tps23881_pi_get_actual_pw,
+ .pi_get_voltage = tps23881_pi_get_voltage,
+ .pi_get_pw_limit = tps23881_pi_get_pw_limit,
+ .pi_set_pw_limit = tps23881_pi_set_pw_limit,
+ .pi_get_pw_limit_ranges = tps23881_pi_get_pw_limit_ranges,
};
static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin";
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 5aa41d5f7765..5ca6ecf0ce5f 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1329,9 +1329,9 @@ int tap_queue_resize(struct tap_dev *tap)
list_for_each_entry(q, &tap->queue_list, next)
rings[i++] = &q->ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- __skb_array_destroy_skb);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);
kfree(rings);
return ret;
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index c7690adec8db..dc7cbd6a9798 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -1175,6 +1175,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EBUSY;
}
+ if (netdev_has_upper_dev(port_dev, dev)) {
+ NL_SET_ERR_MSG(extack, "Device is already a lower device of the team interface");
+ netdev_err(dev, "Device %s is already a lower device of the team interface\n",
+ portname);
+ return -EBUSY;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e816aaba8e5f..28624cca91f8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -574,14 +574,18 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
return ret;
}
-static inline bool tun_not_capable(struct tun_struct *tun)
+static inline bool tun_capable(struct tun_struct *tun)
{
const struct cred *cred = current_cred();
struct net *net = dev_net(tun->dev);
- return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
- (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
- !ns_capable(net->user_ns, CAP_NET_ADMIN);
+ if (ns_capable(net->user_ns, CAP_NET_ADMIN))
+ return 1;
+ if (uid_valid(tun->owner) && uid_eq(cred->euid, tun->owner))
+ return 1;
+ if (gid_valid(tun->group) && in_egroup_p(tun->group))
+ return 1;
+ return 0;
}
static void tun_set_real_num_queues(struct tun_struct *tun)
@@ -2778,7 +2782,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
!!(tun->flags & IFF_MULTI_QUEUE))
return -EINVAL;
- if (tun_not_capable(tun))
+ if (!tun_capable(tun))
return -EPERM;
err = security_tun_dev_open(tun->security);
if (err < 0)
@@ -3697,9 +3701,9 @@ static int tun_queue_resize(struct tun_struct *tun)
list_for_each_entry(tfile, &tun->disabled, next)
rings[i++] = &tfile->tx_ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- tun_ptr_free);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ tun_ptr_free);
kfree(rings);
return ret;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 531b1b6a37d1..a91bf9c7e31d 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -439,7 +439,7 @@ struct lan78xx_net {
struct usb_anchor deferred;
struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
- struct mutex phy_mutex; /* for phy access */
+ struct mutex mdiobus_mutex; /* for MDIO bus access */
unsigned int pipe_in, pipe_out, pipe_intr;
unsigned int bulk_in_delay;
@@ -472,10 +472,6 @@ struct lan78xx_net {
struct irq_domain_data domain_data;
};
-/* define external phy id */
-#define PHY_LAN8835 (0x0007C130)
-#define PHY_KSZ9031RNX (0x00221620)
-
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param(msg_level, int, 0);
@@ -625,8 +621,8 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
*data = *buf;
} else if (net_ratelimit()) {
netdev_warn(dev->net,
- "Failed to read register index 0x%08x. ret = %d",
- index, ret);
+ "Failed to read register index 0x%08x. ret = %pe",
+ index, ERR_PTR(ret));
}
kfree(buf);
@@ -656,8 +652,8 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
if (unlikely(ret < 0) &&
net_ratelimit()) {
netdev_warn(dev->net,
- "Failed to write register index 0x%08x. ret = %d",
- index, ret);
+ "Failed to write register index 0x%08x. ret = %pe",
+ index, ERR_PTR(ret));
}
kfree(buf);
@@ -678,11 +674,7 @@ static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
buf &= ~mask;
buf |= (mask & data);
- ret = lan78xx_write_reg(dev, reg, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return lan78xx_write_reg(dev, reg, buf);
}
static int lan78xx_read_stats(struct lan78xx_net *dev,
@@ -812,8 +804,156 @@ static void lan78xx_update_stats(struct lan78xx_net *dev)
usb_autopm_put_interface(dev->intf);
}
-/* Loop until the read is completed with timeout called with phy_mutex held */
-static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
+{
+ return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
+}
+
+static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
+ u32 hw_disabled)
+{
+ unsigned long timeout;
+ bool stopped = true;
+ int ret;
+ u32 buf;
+
+ /* Stop the h/w block (if not already stopped) */
+
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_enabled) {
+ buf &= ~hw_enabled;
+
+ ret = lan78xx_write_reg(dev, reg, buf);
+ if (ret < 0)
+ return ret;
+
+ stopped = false;
+ timeout = jiffies + HW_DISABLE_TIMEOUT;
+ do {
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_disabled)
+ stopped = true;
+ else
+ msleep(HW_DISABLE_DELAY_MS);
+ } while (!stopped && !time_after(jiffies, timeout));
+ }
+
+ return stopped ? 0 : -ETIMEDOUT;
+}
+
+static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
+{
+ return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
+}
+
+static int lan78xx_start_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start tx path");
+
+ /* Start the MAC transmitter */
+
+ ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the Tx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop tx path");
+
+ /* Stop the Tx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the MAC transmitter */
+
+ ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Tx path is stopped before calling
+ * lan78xx_flush_tx_fifo().
+ */
+static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
+}
+
+static int lan78xx_start_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start rx path");
+
+ /* Start the Rx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the MAC receiver*/
+
+ ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop rx path");
+
+ /* Stop the MAC receiver */
+
+ ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the Rx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Rx path is stopped before calling
+ * lan78xx_flush_rx_fifo().
+ */
+static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
+}
+
+/* Loop until the read is completed with timeout called with mdiobus_mutex held */
+static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
{
unsigned long start_time = jiffies;
u32 val;
@@ -821,14 +961,14 @@ static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, MII_ACC, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & MII_ACC_MII_BUSY_))
return 0;
} while (!time_after(jiffies, start_time + HZ));
- return -EIO;
+ return -ETIMEDOUT;
}
static inline u32 mii_access(int id, int index, int read)
@@ -854,8 +994,8 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, E2P_CMD, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & E2P_CMD_EPC_BUSY_) ||
(val & E2P_CMD_EPC_TIMEOUT_))
@@ -865,7 +1005,7 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
netdev_warn(dev->net, "EEPROM read operation timeout");
- return -EIO;
+ return -ETIMEDOUT;
}
return 0;
@@ -879,8 +1019,8 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, E2P_CMD, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & E2P_CMD_EPC_BUSY_))
return 0;
@@ -889,75 +1029,81 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
} while (!time_after(jiffies, start_time + HZ));
netdev_warn(dev->net, "EEPROM is busy");
- return -EIO;
+ return -ETIMEDOUT;
}
static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- u32 val;
- u32 saved;
+ u32 val, saved;
int i, ret;
- int retval;
/* depends on chip, some EEPROM pins are muxed with LED function.
* disable & restore LED function to access EEPROM.
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ return ret;
+
saved = val;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
+ if (ret < 0)
+ return ret;
}
- retval = lan78xx_eeprom_confirm_not_busy(dev);
- if (retval)
- return retval;
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ if (ret == -ETIMEDOUT)
+ goto read_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto read_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
ret = lan78xx_read_reg(dev, E2P_DATA, &val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
data[i] = val & 0xFF;
offset++;
}
- retval = 0;
-exit:
+read_raw_eeprom_done:
if (dev->chipid == ID_REV_CHIP_ID_7800_)
- ret = lan78xx_write_reg(dev, HW_CFG, saved);
+ return lan78xx_write_reg(dev, HW_CFG, saved);
- return retval;
+ return 0;
}
static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- u8 sig;
int ret;
+ u8 sig;
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
- if ((ret == 0) && (sig == EEPROM_INDICATOR))
- ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
- else
- ret = -EINVAL;
+ if (ret < 0)
+ return ret;
- return ret;
+ if (sig != EEPROM_INDICATOR)
+ return -ENODATA;
+
+ return lan78xx_read_raw_eeprom(dev, offset, length, data);
}
static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
@@ -966,113 +1112,144 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 val;
u32 saved;
int i, ret;
- int retval;
/* depends on chip, some EEPROM pins are muxed with LED function.
* disable & restore LED function to access EEPROM.
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ return ret;
+
saved = val;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
+ if (ret < 0)
+ return ret;
}
- retval = lan78xx_eeprom_confirm_not_busy(dev);
- if (retval)
- goto exit;
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
/* Issue write/erase enable command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
/* Fill data register */
val = data[i];
ret = lan78xx_write_reg(dev, E2P_DATA, val);
- if (ret < 0) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
/* Send "write" command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (ret < 0) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
offset++;
}
- retval = 0;
-exit:
+write_raw_eeprom_done:
if (dev->chipid == ID_REV_CHIP_ID_7800_)
- ret = lan78xx_write_reg(dev, HW_CFG, saved);
+ return lan78xx_write_reg(dev, HW_CFG, saved);
- return retval;
+ return 0;
}
static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- int i;
- u32 buf;
unsigned long timeout;
+ int ret, i;
+ u32 buf;
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
usleep_range(1, 10);
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_PWR_DN_PWRDN_N_);
}
for (i = 0; i < length; i++) {
- lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+ if (ret < 0)
+ return ret;
- lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
- lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_STATUS");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_STATUS_BUSY_);
- lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ if (ret < 0)
+ return ret;
data[i] = (u8)(buf & 0xFF);
}
@@ -1086,45 +1263,72 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
int i;
u32 buf;
unsigned long timeout;
+ int ret;
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN completion");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_PWR_DN_PWRDN_N_);
}
/* set to BYTE program mode */
- lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
- lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
- lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
- lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
- lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"Timeout on OTP_STATUS completion");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_STATUS_BUSY_);
}
@@ -1161,7 +1365,7 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
if (unlikely(ret < 0))
- return -EIO;
+ return ret;
if (dp_sel & DP_SEL_DPRDY_)
return 0;
@@ -1171,44 +1375,51 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
netdev_warn(dev->net, "%s timed out", __func__);
- return -EIO;
+ return -ETIMEDOUT;
}
static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
u32 addr, u32 length, u32 *buf)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
- u32 dp_sel;
int i, ret;
- if (usb_autopm_get_interface(dev->intf) < 0)
- return 0;
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
mutex_lock(&pdata->dataport_mutex);
ret = lan78xx_dataport_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto dataport_write;
- ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
-
- dp_sel &= ~DP_SEL_RSEL_MASK_;
- dp_sel |= ram_select;
- ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+ ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
+ if (ret < 0)
+ goto dataport_write;
for (i = 0; i < length; i++) {
ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_dataport_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto dataport_write;
}
-done:
+dataport_write:
+ if (ret < 0)
+ netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
+
mutex_unlock(&pdata->dataport_mutex);
usb_autopm_put_interface(dev->intf);
@@ -1244,23 +1455,39 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
struct lan78xx_priv *pdata =
container_of(param, struct lan78xx_priv, set_multicast);
struct lan78xx_net *dev = pdata->dev;
- int i;
+ int i, ret;
netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
pdata->rfe_ctl);
- lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
- DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+ ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
+ DP_SEL_VHF_VLAN_LEN,
+ DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+ if (ret < 0)
+ goto multicast_write_done;
for (i = 1; i < NUM_OF_MAF; i++) {
- lan78xx_write_reg(dev, MAF_HI(i), 0);
- lan78xx_write_reg(dev, MAF_LO(i),
- pdata->pfilter_table[i][1]);
- lan78xx_write_reg(dev, MAF_HI(i),
- pdata->pfilter_table[i][0]);
+ ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+ if (ret < 0)
+ goto multicast_write_done;
+
+ ret = lan78xx_write_reg(dev, MAF_LO(i),
+ pdata->pfilter_table[i][1]);
+ if (ret < 0)
+ goto multicast_write_done;
+
+ ret = lan78xx_write_reg(dev, MAF_HI(i),
+ pdata->pfilter_table[i][0]);
+ if (ret < 0)
+ goto multicast_write_done;
}
- lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+multicast_write_done:
+ if (ret < 0)
+ netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
+ return;
}
static void lan78xx_set_multicast(struct net_device *netdev)
@@ -1369,24 +1596,24 @@ static int lan78xx_mac_reset(struct lan78xx_net *dev)
u32 val;
int ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* Resetting the device while there is activity on the MDIO
* bus can result in the MAC interface locking up and not
* completing register access transactions.
*/
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto exit_unlock;
ret = lan78xx_read_reg(dev, MAC_CR, &val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
val |= MAC_CR_RST_;
ret = lan78xx_write_reg(dev, MAC_CR, val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
/* Wait for the reset to complete before allowing any further
* MAC register accesses otherwise the MAC may lock up.
@@ -1394,17 +1621,17 @@ static int lan78xx_mac_reset(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, MAC_CR, &val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
if (!(val & MAC_CR_RST_)) {
ret = 0;
- goto done;
+ goto exit_unlock;
}
} while (!time_after(jiffies, start_time + HZ));
ret = -ETIMEDOUT;
-done:
- mutex_unlock(&dev->phy_mutex);
+exit_unlock:
+ mutex_unlock(&dev->mdiobus_mutex);
return ret;
}
@@ -1630,6 +1857,7 @@ static void lan78xx_get_wol(struct net_device *netdev,
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
if (unlikely(ret < 0)) {
+ netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
wol->supported = 0;
wol->wolopts = 0;
} else {
@@ -1661,10 +1889,13 @@ static int lan78xx_set_wol(struct net_device *netdev,
pdata->wol = wol->wolopts;
- device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+ ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+ if (ret < 0)
+ goto exit_pm_put;
- phy_ethtool_set_wol(netdev->phydev, wol);
+ ret = phy_ethtool_set_wol(netdev->phydev, wol);
+exit_pm_put:
usb_autopm_put_interface(dev->intf);
return ret;
@@ -1869,30 +2100,35 @@ exit:
static int lan78xx_get_regs_len(struct net_device *netdev)
{
- if (!netdev->phydev)
- return (sizeof(lan78xx_regs));
- else
- return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
+ return sizeof(lan78xx_regs);
}
static void
lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *buf)
{
- u32 *data = buf;
- int i, j;
struct lan78xx_net *dev = netdev_priv(netdev);
+ unsigned int data_count = 0;
+ u32 *data = buf;
+ int i, ret;
/* Read Device/MAC registers */
- for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
- lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
+ for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
+ ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
+ if (ret < 0) {
+ netdev_warn(dev->net,
+ "failed to read register 0x%08x\n",
+ lan78xx_regs[i]);
+ goto clean_data;
+ }
- if (!netdev->phydev)
- return;
+ data_count++;
+ }
- /* Read PHY registers */
- for (j = 0; j < 32; i++, j++)
- data[i] = phy_read(netdev->phydev, j);
+ return;
+
+clean_data:
+ memset(data, 0, data_count * sizeof(u32));
}
static const struct ethtool_ops lan78xx_ethtool_ops = {
@@ -1920,13 +2156,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_regs = lan78xx_get_regs,
};
-static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+static int lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
u8 addr[6];
+ int ret;
- lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
- lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ if (ret < 0)
+ return ret;
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1959,14 +2201,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
- lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (ret < 0)
+ return ret;
}
- lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ if (ret < 0)
+ return ret;
eth_hw_addr_set(dev->net, addr);
+
+ return 0;
}
/* MDIO read and write wrappers for phylib */
@@ -1980,27 +2234,31 @@ static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
if (ret < 0)
return ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* confirm MII not busy */
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
/* set the address, index & direction (read from PHY) */
addr = mii_access(phy_id, idx, MII_READ);
ret = lan78xx_write_reg(dev, MII_ACC, addr);
+ if (ret < 0)
+ goto done;
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
ret = lan78xx_read_reg(dev, MII_DATA, &val);
+ if (ret < 0)
+ goto done;
ret = (int)(val & 0xFFFF);
done:
- mutex_unlock(&dev->phy_mutex);
+ mutex_unlock(&dev->mdiobus_mutex);
usb_autopm_put_interface(dev->intf);
return ret;
@@ -2017,28 +2275,32 @@ static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
if (ret < 0)
return ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* confirm MII not busy */
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
val = (u32)regval;
ret = lan78xx_write_reg(dev, MII_DATA, val);
+ if (ret < 0)
+ goto done;
/* set the address, index & direction (write to PHY) */
addr = mii_access(phy_id, idx, MII_WRITE);
ret = lan78xx_write_reg(dev, MII_ACC, addr);
+ if (ret < 0)
+ goto done;
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
done:
- mutex_unlock(&dev->phy_mutex);
+ mutex_unlock(&dev->mdiobus_mutex);
usb_autopm_put_interface(dev->intf);
- return 0;
+ return ret;
}
static int lan78xx_mdio_init(struct lan78xx_net *dev)
@@ -2164,13 +2426,22 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
struct lan78xx_net *dev =
container_of(data, struct lan78xx_net, domain_data);
u32 buf;
+ int ret;
/* call register access here because irq_bus_lock & irq_bus_sync_unlock
* are only two callbacks executed in non-atomic contex.
*/
- lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ if (ret < 0)
+ goto irq_bus_sync_unlock;
+
if (buf != data->irqenable)
- lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+ ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+
+irq_bus_sync_unlock:
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
+ ERR_PTR(ret));
mutex_unlock(&data->irq_lock);
}
@@ -2195,7 +2466,10 @@ static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
mutex_init(&dev->domain_data.irq_lock);
- lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ if (ret < 0)
+ return ret;
+
dev->domain_data.irqenable = buf;
dev->domain_data.irqchip = &lan78xx_irqchip;
@@ -2234,46 +2508,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqdomain = NULL;
}
-static int lan8835_fixup(struct phy_device *phydev)
-{
- int buf;
- struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
-
- /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
- buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
- buf &= ~0x1800;
- buf |= 0x0800;
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
-
- /* RGMII MAC TXC Delay Enable */
- lan78xx_write_reg(dev, MAC_RGMII_ID,
- MAC_RGMII_ID_TXC_DELAY_EN_);
-
- /* RGMII TX DLL Tune Adjust */
- lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
-
- dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
-
- return 1;
-}
-
-static int ksz9031rnx_fixup(struct phy_device *phydev)
-{
- struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
-
- /* Micrel9301RNX PHY configuration */
- /* RGMII Control Signal Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
- /* RGMII RX Data Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
- /* RGMII RX Clock Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
-
- dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
-
- return 1;
-}
-
static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
{
u32 buf;
@@ -2307,22 +2541,11 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
netdev_err(dev->net, "no PHY driver found\n");
return NULL;
}
- dev->interface = PHY_INTERFACE_MODE_RGMII;
- /* external PHY fixup for KSZ9031RNX */
- ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
- ksz9031rnx_fixup);
- if (ret < 0) {
- netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
- return NULL;
- }
- /* external PHY fixup for LAN8835 */
- ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
- lan8835_fixup);
- if (ret < 0) {
- netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
- return NULL;
- }
- /* add more external PHY fixup here if needed */
+ dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ /* The PHY driver is responsible to configure proper RGMII
+ * interface delays. Disable RGMII delays on MAC side.
+ */
+ lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
phydev->is_internal = false;
}
@@ -2381,11 +2604,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
if (phy_is_pseudo_fixed_link(phydev)) {
fixed_phy_unregister(phydev);
phy_device_free(phydev);
- } else {
- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
- 0xfffffff0);
- phy_unregister_fixup_for_uid(PHY_LAN8835,
- 0xfffffff0);
}
}
return -EIO;
@@ -2437,27 +2655,36 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
{
- u32 buf;
bool rxenabled;
+ u32 buf;
+ int ret;
- lan78xx_read_reg(dev, MAC_RX, &buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ if (ret < 0)
+ return ret;
rxenabled = ((buf & MAC_RX_RXEN_) != 0);
if (rxenabled) {
buf &= ~MAC_RX_RXEN_;
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
}
/* add 4 to size for FCS */
buf &= ~MAC_RX_MAX_SIZE_MASK_;
buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
if (rxenabled) {
buf |= MAC_RX_RXEN_;
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -2523,7 +2750,10 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
- if (!ret)
+ if (ret < 0)
+ netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
+ new_mtu, netdev->mtu, ERR_PTR(ret));
+ else
WRITE_ONCE(netdev->mtu, new_mtu);
usb_autopm_put_interface(dev->intf);
@@ -2536,6 +2766,7 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
struct lan78xx_net *dev = netdev_priv(netdev);
struct sockaddr *addr = p;
u32 addr_lo, addr_hi;
+ int ret;
if (netif_running(netdev))
return -EBUSY;
@@ -2552,14 +2783,20 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
addr_hi = netdev->dev_addr[4] |
netdev->dev_addr[5] << 8;
- lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (ret < 0)
+ return ret;
/* Added to support MAC address changes */
- lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ if (ret < 0)
+ return ret;
- return 0;
+ return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
}
/* Enable or disable Rx checksum offload engine */
@@ -2592,9 +2829,7 @@ static int lan78xx_set_features(struct net_device *netdev,
spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
- lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-
- return 0;
+ return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
}
static void lan78xx_deferred_vlan_write(struct work_struct *param)
@@ -2645,13 +2880,16 @@ static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
}
-static void lan78xx_init_ltm(struct lan78xx_net *dev)
+static int lan78xx_init_ltm(struct lan78xx_net *dev)
{
+ u32 regs[6] = { 0 };
int ret;
u32 buf;
- u32 regs[6] = { 0 };
ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (ret < 0)
+ goto init_ltm_failed;
+
if (buf & USB_CFG1_LTM_ENABLE_) {
u8 temp[2];
/* Get values from EEPROM first */
@@ -2662,7 +2900,7 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
24,
(u8 *)regs);
if (ret < 0)
- return;
+ return ret;
}
} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
if (temp[0] == 24) {
@@ -2671,17 +2909,40 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
24,
(u8 *)regs);
if (ret < 0)
- return;
+ return ret;
}
}
}
- lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
- lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
- lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
- lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
- lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
- lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+ ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ return 0;
+
+init_ltm_failed:
+ netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
+ return ret;
}
static int lan78xx_urb_config_init(struct lan78xx_net *dev)
@@ -2722,156 +2983,6 @@ static int lan78xx_urb_config_init(struct lan78xx_net *dev)
return result;
}
-static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
-{
- return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
-}
-
-static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
- u32 hw_disabled)
-{
- unsigned long timeout;
- bool stopped = true;
- int ret;
- u32 buf;
-
- /* Stop the h/w block (if not already stopped) */
-
- ret = lan78xx_read_reg(dev, reg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf & hw_enabled) {
- buf &= ~hw_enabled;
-
- ret = lan78xx_write_reg(dev, reg, buf);
- if (ret < 0)
- return ret;
-
- stopped = false;
- timeout = jiffies + HW_DISABLE_TIMEOUT;
- do {
- ret = lan78xx_read_reg(dev, reg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf & hw_disabled)
- stopped = true;
- else
- msleep(HW_DISABLE_DELAY_MS);
- } while (!stopped && !time_after(jiffies, timeout));
- }
-
- ret = stopped ? 0 : -ETIME;
-
- return ret;
-}
-
-static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
-{
- return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
-}
-
-static int lan78xx_start_tx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "start tx path");
-
- /* Start the MAC transmitter */
-
- ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
- if (ret < 0)
- return ret;
-
- /* Start the Tx FIFO */
-
- ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "stop tx path");
-
- /* Stop the Tx FIFO */
-
- ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
- if (ret < 0)
- return ret;
-
- /* Stop the MAC transmitter */
-
- ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* The caller must ensure the Tx path is stopped before calling
- * lan78xx_flush_tx_fifo().
- */
-static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
-{
- return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
-}
-
-static int lan78xx_start_rx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "start rx path");
-
- /* Start the Rx FIFO */
-
- ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
- if (ret < 0)
- return ret;
-
- /* Start the MAC receiver*/
-
- ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "stop rx path");
-
- /* Stop the MAC receiver */
-
- ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
- if (ret < 0)
- return ret;
-
- /* Stop the Rx FIFO */
-
- ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* The caller must ensure the Rx path is stopped before calling
- * lan78xx_flush_rx_fifo().
- */
-static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
-{
- return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
-}
-
static int lan78xx_reset(struct lan78xx_net *dev)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
@@ -2905,7 +3016,9 @@ static int lan78xx_reset(struct lan78xx_net *dev)
}
} while (buf & HW_CFG_LRST_);
- lan78xx_init_mac_address(dev);
+ ret = lan78xx_init_mac_address(dev);
+ if (ret < 0)
+ return ret;
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
@@ -2927,7 +3040,9 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
/* Init LTM */
- lan78xx_init_ltm(dev);
+ ret = lan78xx_init_ltm(dev);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
if (ret < 0)
@@ -4242,9 +4357,6 @@ static void lan78xx_disconnect(struct usb_interface *intf)
phydev = net->phydev;
- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
- phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
-
phy_disconnect(net->phydev);
if (phy_is_pseudo_fixed_link(phydev)) {
@@ -4349,7 +4461,7 @@ static int lan78xx_probe(struct usb_interface *intf,
skb_queue_head_init(&dev->rxq_done);
skb_queue_head_init(&dev->txq_pend);
skb_queue_head_init(&dev->rxq_overflow);
- mutex_init(&dev->phy_mutex);
+ mutex_init(&dev->mdiobus_mutex);
mutex_init(&dev->dev_mutex);
ret = lan78xx_urb_config_init(dev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 07ebb800edf1..01251868a9c2 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -634,7 +634,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
break;
case XDP_TX:
orig_frame = *frame;
- xdp->rxq->mem = frame->mem;
+ xdp->rxq->mem.type = frame->mem_type;
if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
@@ -646,7 +646,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
case XDP_REDIRECT:
orig_frame = *frame;
- xdp->rxq->mem = frame->mem;
+ xdp->rxq->mem.type = frame->mem_type;
if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
frame = &orig_frame;
stats->rx_drops++;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 67d25f4f94ef..ca81b212a246 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -122,16 +122,6 @@ struct net_vrf {
int ifindex;
};
-static void vrf_rx_stats(struct net_device *dev, int len)
-{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- u64_stats_inc(&dstats->rx_packets);
- u64_stats_add(&dstats->rx_bytes, len);
- u64_stats_update_end(&dstats->syncp);
-}
-
static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
{
vrf_dev->stats.tx_errors++;
@@ -369,7 +359,7 @@ static bool qdisc_tx_is_default(const struct net_device *dev)
static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
struct dst_entry *dst)
{
- int len = skb->len;
+ unsigned int len = skb->len;
skb_orphan(skb);
@@ -382,15 +372,10 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
- vrf_rx_stats(dev, len);
- } else {
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- u64_stats_inc(&dstats->rx_drops);
- u64_stats_update_end(&dstats->syncp);
- }
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
+ dev_dstats_rx_add(dev, len);
+ else
+ dev_dstats_rx_dropped(dev);
return NETDEV_TX_OK;
}
@@ -578,20 +563,14 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- int len = skb->len;
- netdev_tx_t ret = is_ip_tx_frame(skb, dev);
-
- u64_stats_update_begin(&dstats->syncp);
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ unsigned int len = skb->len;
+ netdev_tx_t ret;
- u64_stats_inc(&dstats->tx_packets);
- u64_stats_add(&dstats->tx_bytes, len);
- } else {
- u64_stats_inc(&dstats->tx_drops);
- }
- u64_stats_update_end(&dstats->syncp);
+ ret = is_ip_tx_frame(skb, dev);
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
+ dev_dstats_tx_add(dev, len);
+ else
+ dev_dstats_tx_dropped(dev);
return ret;
}
@@ -1364,7 +1343,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (!is_ndisc) {
struct net_device *orig_dev = skb->dev;
- vrf_rx_stats(vrf_dev, skb->len);
+ dev_dstats_rx_add(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -1420,7 +1399,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
goto out;
}
- vrf_rx_stats(vrf_dev, skb->len);
+ dev_dstats_rx_add(vrf_dev, skb->len);
if (!list_empty(&vrf_dev->ptype_all)) {
int err;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 9ea63059d52d..05c10acb2a57 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -622,9 +622,9 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
-static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
+static bool vxlan_parse_gpe_proto(const struct vxlanhdr *hdr, __be16 *protocol)
{
- struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
+ const struct vxlanhdr_gpe *gpe = (const struct vxlanhdr_gpe *)hdr;
/* Need to have Next Protocol set for interfaces in GPE mode. */
if (!gpe->np_applied)
@@ -1352,6 +1352,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev, int *idx)
{
+ struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
struct vxlan_dev *vxlan = netdev_priv(dev);
unsigned int h;
int err = 0;
@@ -1364,7 +1365,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct vxlan_rdst *rd;
if (rcu_access_pointer(f->nh)) {
- if (*idx < cb->args[2])
+ if (*idx < ctx->fdb_idx)
goto skip_nh;
err = vxlan_fdb_info(skb, vxlan, f,
NETLINK_CB(cb->skb).portid,
@@ -1381,7 +1382,7 @@ skip_nh:
}
list_for_each_entry_rcu(rd, &f->remotes, list) {
- if (*idx < cb->args[2])
+ if (*idx < ctx->fdb_idx)
goto skip;
err = vxlan_fdb_info(skb, vxlan, f,
@@ -1554,18 +1555,17 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
#endif
}
-static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed,
- struct sk_buff *skb,
- u32 vxflags)
+static enum skb_drop_reason vxlan_remcsum(struct sk_buff *skb, u32 vxflags)
{
+ const struct vxlanhdr *vh = vxlan_hdr(skb);
enum skb_drop_reason reason;
size_t start, offset;
- if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
- goto out;
+ if (!(vh->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
+ return SKB_NOT_DROPPED_YET;
- start = vxlan_rco_start(unparsed->vx_vni);
- offset = start + vxlan_rco_offset(unparsed->vx_vni);
+ start = vxlan_rco_start(vh->vx_vni);
+ offset = start + vxlan_rco_offset(vh->vx_vni);
reason = pskb_may_pull_reason(skb, offset + sizeof(u16));
if (reason)
@@ -1573,22 +1573,20 @@ static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed,
skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
!!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
-out:
- unparsed->vx_flags &= ~VXLAN_HF_RCO;
- unparsed->vx_vni &= VXLAN_VNI_MASK;
-
return SKB_NOT_DROPPED_YET;
}
-static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
- struct sk_buff *skb, u32 vxflags,
+static void vxlan_parse_gbp_hdr(struct sk_buff *skb, u32 vxflags,
struct vxlan_metadata *md)
{
- struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
+ const struct vxlanhdr *vh = vxlan_hdr(skb);
+ const struct vxlanhdr_gbp *gbp;
struct metadata_dst *tun_dst;
- if (!(unparsed->vx_flags & VXLAN_HF_GBP))
- goto out;
+ gbp = (const struct vxlanhdr_gbp *)vh;
+
+ if (!(vh->vx_flags & VXLAN_HF_GBP))
+ return;
md->gbp = ntohs(gbp->policy_id);
@@ -1607,8 +1605,6 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
/* In flow-based mode, GBP is carried in dst_metadata */
if (!(vxflags & VXLAN_F_COLLECT_METADATA))
skb->mark = md->gbp;
-out:
- unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
static enum skb_drop_reason vxlan_set_mac(struct vxlan_dev *vxlan,
@@ -1672,9 +1668,9 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_vni_node *vninode = NULL;
+ const struct vxlanhdr *vh;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
- struct vxlanhdr unparsed;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 protocol = htons(ETH_P_TEB);
@@ -1689,24 +1685,21 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (reason)
goto drop;
- unparsed = *vxlan_hdr(skb);
+ vh = vxlan_hdr(skb);
/* VNI flag always required to be set */
- if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
+ if (!(vh->vx_flags & VXLAN_HF_VNI)) {
netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
- ntohl(vxlan_hdr(skb)->vx_flags),
- ntohl(vxlan_hdr(skb)->vx_vni));
+ ntohl(vh->vx_flags), ntohl(vh->vx_vni));
reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
/* Return non vxlan pkt */
goto drop;
}
- unparsed.vx_flags &= ~VXLAN_HF_VNI;
- unparsed.vx_vni &= ~VXLAN_VNI_MASK;
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
- vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
+ vni = vxlan_vni(vh->vx_vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
if (!vxlan) {
@@ -1714,13 +1707,27 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- /* For backwards compatibility, only allow reserved fields to be
- * used by VXLAN extensions if explicitly requested.
- */
- if (vs->flags & VXLAN_F_GPE) {
- if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
+ if (vh->vx_flags & vxlan->cfg.reserved_bits.vx_flags ||
+ vh->vx_vni & vxlan->cfg.reserved_bits.vx_vni) {
+ /* If the header uses bits besides those enabled by the
+ * netdevice configuration, treat this as a malformed packet.
+ * This behavior diverges from VXLAN RFC (RFC7348) which
+ * stipulates that bits in reserved in reserved fields are to be
+ * ignored. The approach here maintains compatibility with
+ * previous stack code, and also is more robust and provides a
+ * little more security in adding extensions to VXLAN.
+ */
+ reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
+ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_GPE) {
+ if (!vxlan_parse_gpe_proto(vh, &protocol))
goto drop;
- unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
raw_proto = true;
}
@@ -1730,8 +1737,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (vs->flags & VXLAN_F_REMCSUM_RX) {
- reason = vxlan_remcsum(&unparsed, skb, vs->flags);
+ if (vxlan->cfg.flags & VXLAN_F_REMCSUM_RX) {
+ reason = vxlan_remcsum(skb, vxlan->cfg.flags);
if (unlikely(reason))
goto drop;
}
@@ -1756,25 +1763,12 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
memset(md, 0, sizeof(*md));
}
- if (vs->flags & VXLAN_F_GBP)
- vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
+ if (vxlan->cfg.flags & VXLAN_F_GBP)
+ vxlan_parse_gbp_hdr(skb, vxlan->cfg.flags, md);
/* Note that GBP and GPE can never be active together. This is
* ensured in vxlan_dev_configure.
*/
- if (unparsed.vx_flags || unparsed.vx_vni) {
- /* If there are any unprocessed flags remaining treat
- * this as a malformed packet. This behavior diverges from
- * VXLAN RFC (RFC7348) which stipulates that bits in reserved
- * in reserved fields are to be ignored. The approach here
- * maintains compatibility with previous stack code, and also
- * is more robust and provides a little more security in
- * adding extensions to VXLAN.
- */
- reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
- goto drop;
- }
-
if (!raw_proto) {
reason = vxlan_set_mac(vxlan, vs, skb, vni);
if (reason)
@@ -1818,14 +1812,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
- dev_core_stats_rx_dropped_inc(vxlan->dev);
+ dev_dstats_rx_dropped(vxlan->dev);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_DROPS, 0);
reason = SKB_DROP_REASON_DEV_READY;
goto drop;
}
- dev_sw_netstats_rx_add(vxlan->dev, skb->len);
+ dev_dstats_rx_add(vxlan->dev, skb->len);
vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
@@ -1880,7 +1874,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
goto out;
@@ -1938,7 +1932,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2097,7 +2091,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2271,8 +2265,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
{
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+ unsigned int len = skb->len;
struct net_device *dev;
- int len = skb->len;
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
@@ -2299,16 +2293,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop)
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
- dev_sw_netstats_tx_add(src_vxlan->dev, 1, len);
+ dev_dstats_tx_add(src_vxlan->dev, len);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
- dev_sw_netstats_rx_add(dst_vxlan->dev, len);
+ dev_dstats_rx_add(dst_vxlan->dev, len);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);
} else {
drop:
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2621,7 +2615,7 @@ out_unlock:
return;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb_reason(skb, reason);
return;
@@ -2666,7 +2660,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
return;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2704,7 +2698,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2801,10 +2795,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
return NETDEV_TX_OK;
}
}
@@ -2827,7 +2821,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (fdst)
vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
else
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
}
return NETDEV_TX_OK;
@@ -3371,7 +3365,7 @@ static void vxlan_setup(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
INIT_LIST_HEAD(&vxlan->next);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
@@ -3435,6 +3429,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 },
[IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1),
[IFLA_VXLAN_LABEL_POLICY] = NLA_POLICY_MAX(NLA_U32, VXLAN_LABEL_MAX),
+ [IFLA_VXLAN_RESERVED_BITS] = NLA_POLICY_EXACT_LEN(sizeof(struct vxlanhdr)),
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -4070,6 +4065,10 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
struct net_device *dev, struct vxlan_config *conf,
bool changelink, struct netlink_ext_ack *extack)
{
+ struct vxlanhdr used_bits = {
+ .vx_flags = VXLAN_HF_VNI,
+ .vx_vni = VXLAN_VNI_MASK,
+ };
struct vxlan_dev *vxlan = netdev_priv(dev);
int err = 0;
@@ -4296,6 +4295,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
extack);
if (err)
return err;
+ used_bits.vx_flags |= VXLAN_HF_RCO;
+ used_bits.vx_vni |= ~VXLAN_VNI_MASK;
}
if (data[IFLA_VXLAN_GBP]) {
@@ -4303,6 +4304,7 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
VXLAN_F_GBP, changelink, false, extack);
if (err)
return err;
+ used_bits.vx_flags |= VXLAN_GBP_USED_BITS;
}
if (data[IFLA_VXLAN_GPE]) {
@@ -4311,6 +4313,46 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
extack);
if (err)
return err;
+
+ used_bits.vx_flags |= VXLAN_GPE_USED_BITS;
+ }
+
+ if (data[IFLA_VXLAN_RESERVED_BITS]) {
+ struct vxlanhdr reserved_bits;
+
+ if (changelink) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ data[IFLA_VXLAN_RESERVED_BITS],
+ "Cannot change reserved_bits");
+ return -EOPNOTSUPP;
+ }
+
+ nla_memcpy(&reserved_bits, data[IFLA_VXLAN_RESERVED_BITS],
+ sizeof(reserved_bits));
+ if (used_bits.vx_flags & reserved_bits.vx_flags ||
+ used_bits.vx_vni & reserved_bits.vx_vni) {
+ __be64 ub_be64, rb_be64;
+
+ memcpy(&ub_be64, &used_bits, sizeof(ub_be64));
+ memcpy(&rb_be64, &reserved_bits, sizeof(rb_be64));
+
+ NL_SET_ERR_MSG_ATTR_FMT(extack,
+ data[IFLA_VXLAN_RESERVED_BITS],
+ "Used bits %#018llx cannot overlap reserved bits %#018llx",
+ be64_to_cpu(ub_be64),
+ be64_to_cpu(rb_be64));
+ return -EINVAL;
+ }
+
+ conf->reserved_bits = reserved_bits;
+ } else {
+ /* For backwards compatibility, only allow reserved fields to be
+ * used by VXLAN extensions if explicitly requested.
+ */
+ conf->reserved_bits = (struct vxlanhdr) {
+ .vx_flags = ~used_bits.vx_flags,
+ .vx_vni = ~used_bits.vx_vni,
+ };
}
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
@@ -4497,6 +4539,8 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(0) + /* IFLA_VXLAN_GPE */
nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */
+ /* IFLA_VXLAN_RESERVED_BITS */
+ nla_total_size(sizeof(struct vxlanhdr)) +
0;
}
@@ -4599,6 +4643,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)))
goto nla_put_failure;
+ if (nla_put(skb, IFLA_VXLAN_RESERVED_BITS,
+ sizeof(vxlan->cfg.reserved_bits),
+ &vxlan->cfg.reserved_bits))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c
index 8735891ee128..816ab1aa0526 100644
--- a/drivers/net/vxlan/vxlan_mdb.c
+++ b/drivers/net/vxlan/vxlan_mdb.c
@@ -1712,7 +1712,7 @@ netdev_tx_t vxlan_mdb_xmit(struct vxlan_dev *vxlan,
vxlan_xmit_one(skb, vxlan->dev, src_vni,
rcu_dereference(fremote->rd), false);
else
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
index f547c22e26ac..58f5143359df 100644
--- a/drivers/net/wan/framer/framer-core.c
+++ b/drivers/net/wan/framer/framer-core.c
@@ -732,8 +732,8 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
/**
* framer_provider_simple_of_xlate() - returns the framer instance from framer provider
- * @dev: the framer provider device
- * @args: of_phandle_args (not used here)
+ * @dev: the framer provider device (not used here)
+ * @args: of_phandle_args
*
* Intended to be used by framer provider for the common case where #framer-cells is
* 0. For other cases where #framer-cells is greater than '0', the framer provider
@@ -743,21 +743,14 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
struct framer *framer_provider_simple_of_xlate(struct device *dev,
const struct of_phandle_args *args)
{
- struct class_dev_iter iter;
- struct framer *framer;
-
- class_dev_iter_init(&iter, &framer_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- framer = dev_to_framer(dev);
- if (args->np != framer->dev.of_node)
- continue;
+ struct device *target_dev;
- class_dev_iter_exit(&iter);
- return framer;
- }
+ target_dev = class_find_device_by_of_node(&framer_class, args->np);
+ if (!target_dev)
+ return ERR_PTR(-ENODEV);
- class_dev_iter_exit(&iter);
- return ERR_PTR(-ENODEV);
+ put_device(target_dev);
+ return dev_to_framer(target_dev);
}
EXPORT_SYMBOL_GPL(framer_provider_simple_of_xlate);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index be67382c00f6..c576bbba52bf 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -123,6 +123,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -207,6 +208,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "qca6390 hw2.0",
@@ -296,6 +298,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "qcn9074 hw1.0",
@@ -379,6 +382,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "wcn6855 hw2.0",
@@ -468,6 +472,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "wcn6855 hw2.1",
@@ -555,6 +560,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "wcn6750 hw1.0",
@@ -637,6 +643,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
.support_dual_stations = false,
+ .pdev_suspend = true,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -719,6 +726,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "qca2066 hw2.1",
@@ -809,6 +817,94 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = true,
},
+ {
+ .name = "qca6698aq hw2.1",
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ .fw = {
+ .dir = "QCA6698AQ/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
+ .pdev_suspend = false,
+ },
};
static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
@@ -1669,11 +1765,47 @@ err_pdev_debug:
return ret;
}
+static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ unsigned long time_left;
+ int ret;
+ int i;
+
+ if (!ab->hw_params.pdev_suspend)
+ return;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ab, "could not suspend target :%d\n", ret);
+ /* pointless to try other pdevs */
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+
+ if (!time_left) {
+ ath11k_warn(ab, "suspend timed out - target pause event never came\n");
+ /* pointless to try other pdevs */
+ return;
+ }
+ }
+}
+
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
ath11k_spectral_deinit(ab);
ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
+ ath11k_core_pdev_suspend_target(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_debugfs_pdev_destroy(ab);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 09c37e19a168..a9dc7fe7765a 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -148,6 +148,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
ATH11K_HW_QCA2066_HW21,
+ ATH11K_HW_QCA6698AQ_HW21,
};
enum ath11k_firmware_mode {
@@ -340,7 +341,6 @@ struct ath11k_chan_power_info {
* @ap_power_type: type of power (SP/LPI/VLP)
* @num_pwr_levels: number of power levels
* @reg_max: Array of maximum TX power (dBm) per PSD value
- * @ap_constraint_power: AP constraint power (dBm)
* @tpe: TPE values processed from TPE IE
* @chan_power_info: power info to send to firmware
*/
@@ -350,7 +350,6 @@ struct ath11k_reg_tpc_power_info {
enum wmi_reg_6ghz_ap_type ap_power_type;
u8 num_pwr_levels;
u8 reg_max[ATH11K_NUM_PWR_LEVELS];
- u8 ap_constraint_power;
s8 tpe[ATH11K_NUM_PWR_LEVELS];
struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
};
@@ -370,7 +369,6 @@ struct ath11k_vif {
struct ath11k *ar;
struct ieee80211_vif *vif;
- u16 tx_seq_no;
struct wmi_wmm_params_all_arg wmm_params;
struct list_head list;
union {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 65d2bc0687c8..f777314db8b3 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -165,7 +165,6 @@ struct ath11k_mon_data {
struct ath11k_pdev_mon_stats rx_mon_stats;
/* lock for monitor data */
spinlock_t mon_lock;
- struct sk_buff_head rx_status_q;
};
struct ath11k_pdev_dp {
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 40088e62572e..029ecf51c9ef 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -3872,6 +3872,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW1_BM &&
rbm != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
@@ -4690,11 +4691,12 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
}
}
-static u32
-ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
- void *ring_entry, struct sk_buff **head_msdu,
- struct sk_buff **tail_msdu, u32 *npackets,
- u32 *ppdu_id)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu, u32 *npackets,
+ u32 *ppdu_id)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
@@ -5705,8 +5707,6 @@ static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- skb_queue_head_init(&pmon->rx_status_q);
-
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
memset(&pmon->rx_mon_stats, 0,
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index dc8bbe073017..601542410c75 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -700,7 +700,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
#define HAL_REO_CMD_UPD0_VLD BIT(9)
#define HAL_REO_CMD_UPD0_ALDC BIT(10)
@@ -725,7 +725,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
#define HAL_REO_CMD_UPD0_PN BIT(30)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
#define HAL_REO_CMD_UPD1_VLD BIT(16)
#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
@@ -741,7 +741,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
#define HAL_REO_CMD_UPD2_SVLD BIT(10)
#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 8f7dd43dc1bd..753bd93f0212 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -372,7 +372,8 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1);
- if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
+ if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM &&
+ ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 300322535766..52d9f4c13b13 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -227,6 +227,7 @@ struct ath11k_hw_params {
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
bool support_dual_stations;
+ bool pdev_suspend;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index e6acbff06749..1556392f7ad4 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1697,8 +1697,6 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
return;
}
- arvif->tx_seq_no = 0x1000;
-
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
@@ -2230,7 +2228,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
/* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
- * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
+ * VHT mcs rate 10 and 11 is not supported in 11ac standard.
* so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
*/
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
@@ -6952,7 +6950,7 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
- /* TODO: recal traffic pause state based on the available vdevs */
+ /* TODO: recalc traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
}
@@ -9356,6 +9354,7 @@ static int ath11k_fw_stats_request(struct ath11k *ar,
static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ unsigned int link_id,
int *dbm)
{
struct ath11k *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6974a551883f..6e45f464a429 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -398,6 +398,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
case ATH11K_HW_QCA2066_HW21:
+ case ATH11K_HW_QCA6698AQ_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index be9d2c69cc41..b93f04973ad7 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -846,6 +846,9 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
case 0x1019D0E1:
ab->hw_rev = ATH11K_HW_QCA2066_HW21;
break;
+ case 0x001e60e1:
+ ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21;
+ break;
default:
ab->hw_rev = ATH11K_HW_WCN6855_HW21;
}
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index debe7c5919ef..3fe77310c71f 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -126,6 +126,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_QCA2066_HW21,
},
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ },
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 7a22483b35cd..5759fc521316 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1704,7 +1704,9 @@ static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
},
};
-static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
@@ -2570,7 +2572,9 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
m3_mem->size = 0;
}
-static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
index 827085a926b2..b6f08755129f 100644
--- a/drivers/net/wireless/ath/ath11k/wow.c
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -148,8 +148,10 @@ static int ath11k_wow_cleanup(struct ath11k *ar)
* 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
* +--+------------+----+-----------+---------------+-----------+
*/
-static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
- const struct cfg80211_pkt_pattern *old)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
{
u8 hdr_8023_pattern[ETH_HLEN] = {};
u8 hdr_8023_bit_mask[ETH_HLEN] = {};
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index c57322221e1d..0606116d6b9c 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -9,6 +9,7 @@
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
@@ -22,6 +23,11 @@ unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
+/* protected with ath12k_hw_group_mutex */
+static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
+
+static DEFINE_MUTEX(ath12k_hw_group_mutex);
+
static int ath12k_core_rfkill_config(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -79,11 +85,17 @@ int ath12k_core_suspend(struct ath12k_base *ab)
ar = ab->pdevs[i].ar;
if (!ar)
continue;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
ret = ath12k_mac_wait_tx_complete(ar);
if (ret) {
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
+
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
/* PM framework skips suspend_late/resume_early callbacks
@@ -593,14 +605,17 @@ u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
static void ath12k_core_stop(struct ath12k_base *ab)
{
+ ath12k_core_stopped(ab);
+
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
ath12k_acpi_stop(ab);
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
- ath12k_dp_rx_pdev_reo_cleanup(ab);
+ ath12k_dp_free(ab);
/* De-Init of components as needed */
}
@@ -702,7 +717,7 @@ err_qmi_deinit:
static void ath12k_core_soc_destroy(struct ath12k_base *ab)
{
- ath12k_dp_free(ab);
+ ath12k_hif_power_down(ab, false);
ath12k_reg_free(ab);
ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
@@ -712,30 +727,17 @@ static int ath12k_core_pdev_create(struct ath12k_base *ab)
{
int ret;
- ret = ath12k_mac_register(ab);
- if (ret) {
- ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
- return ret;
- }
-
ret = ath12k_dp_pdev_alloc(ab);
if (ret) {
ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
- goto err_mac_unregister;
+ return ret;
}
return 0;
-
-err_mac_unregister:
- ath12k_mac_unregister(ab);
-
- return ret;
}
static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
{
- ath12k_mac_unregister(ab);
- ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
}
@@ -744,6 +746,8 @@ static int ath12k_core_start(struct ath12k_base *ab,
{
int ret;
+ lockdep_assert_held(&ab->core_lock);
+
ret = ath12k_wmi_attach(ab);
if (ret) {
ath12k_err(ab, "failed to attach wmi: %d\n", ret);
@@ -793,19 +797,12 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_hif_stop;
}
- ret = ath12k_mac_allocate(ab);
- if (ret) {
- ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
- ret);
- goto err_hif_stop;
- }
-
ath12k_dp_cc_config(ab);
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
- goto err_mac_destroy;
+ goto err_hif_stop;
}
ath12k_dp_hal_rx_desc_init(ab);
@@ -844,12 +841,14 @@ static int ath12k_core_start(struct ath12k_base *ab,
/* ACPI is optional so continue in case of an error */
ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+ if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
+ /* Indicate the core start in the appropriate group */
+ ath12k_core_started(ab);
+
return 0;
err_reo_cleanup:
ath12k_dp_rx_pdev_reo_cleanup(ab);
-err_mac_destroy:
- ath12k_mac_destroy(ab);
err_hif_stop:
ath12k_hif_stop(ab);
err_wmi_detach:
@@ -857,6 +856,169 @@ err_wmi_detach:
return ret;
}
+static void ath12k_core_device_cleanup(struct ath12k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath12k_hif_irq_disable(ab);
+ ath12k_core_pdev_destroy(ab);
+
+ mutex_unlock(&ab->core_lock);
+}
+
+static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+ ath12k_mac_unregister(ag);
+
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+ ath12k_core_device_cleanup(ab);
+ }
+
+ ath12k_mac_destroy(ag);
+}
+
+static int __ath12k_mac_mlo_ready(struct ath12k *ar)
+{
+ int ret;
+
+ ret = ath12k_wmi_mlo_ready(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
+ ar->pdev_idx);
+
+ return 0;
+}
+
+int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret;
+ int i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_ready(ar);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
+{
+ int ret, i;
+
+ if (!ag->mlo_capable || ag->num_devices == 1)
+ return 0;
+
+ ret = ath12k_mac_mlo_setup(ag);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ag->num_devices; i++)
+ ath12k_dp_partner_cc_init(ag->ab[i]);
+
+ ret = ath12k_mac_mlo_ready(ag);
+ if (ret)
+ goto err_mlo_teardown;
+
+ return 0;
+
+err_mlo_teardown:
+ ath12k_mac_mlo_teardown(ag);
+
+ return ret;
+}
+
+static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int ret, i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
+ goto core_pdev_create;
+
+ ret = ath12k_mac_allocate(ag);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = ath12k_core_mlo_setup(ag);
+ if (WARN_ON(ret))
+ goto err_mac_destroy;
+
+ ret = ath12k_mac_register(ag);
+ if (WARN_ON(ret))
+ goto err_mlo_teardown;
+
+ set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+core_pdev_create:
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ ret = ath12k_core_pdev_create(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create pdev core %d\n", ret);
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ ath12k_hif_irq_enable(ab);
+
+ ret = ath12k_core_rfkill_config(ab);
+ if (ret && ret != -EOPNOTSUPP) {
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+
+err:
+ ath12k_core_hw_group_stop(ag);
+ return ret;
+
+err_mlo_teardown:
+ ath12k_mac_mlo_teardown(ag);
+
+err_mac_destroy:
+ ath12k_mac_destroy(ag);
+
+ return ret;
+}
+
static int ath12k_core_start_firmware(struct ath12k_base *ab,
enum ath12k_firmware_mode mode)
{
@@ -874,9 +1036,37 @@ static int ath12k_core_start_firmware(struct ath12k_base *ab,
return ret;
}
+static inline
+bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
+{
+ lockdep_assert_held(&ag->mutex);
+
+ return (ag->num_started == ag->num_devices);
+}
+
+static void ath12k_core_trigger_partner(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_base *partner_ab;
+ bool found = false;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+ if (!partner_ab)
+ continue;
+
+ if (found)
+ ath12k_qmi_trigger_host_cap(partner_ab);
+
+ found = (partner_ab == ab);
+ }
+}
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
{
- int ret;
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ int ret, i;
ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
@@ -896,41 +1086,52 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
goto err_firmware_stop;
}
+ mutex_lock(&ag->mutex);
mutex_lock(&ab->core_lock);
+
ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
}
- ret = ath12k_core_pdev_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create pdev core: %d\n", ret);
- goto err_core_stop;
- }
- ath12k_hif_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
- ret = ath12k_core_rfkill_config(ab);
- if (ret && ret != -EOPNOTSUPP) {
- ath12k_err(ab, "failed to config rfkill: %d\n", ret);
- goto err_core_pdev_destroy;
+ if (ath12k_core_hw_group_start_ready(ag)) {
+ ret = ath12k_core_hw_group_start(ag);
+ if (ret) {
+ ath12k_warn(ab, "unable to start hw group\n");
+ goto err_core_stop;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
+ } else {
+ ath12k_core_trigger_partner(ab);
}
- mutex_unlock(&ab->core_lock);
+ mutex_unlock(&ag->mutex);
return 0;
-err_core_pdev_destroy:
- ath12k_core_pdev_destroy(ab);
err_core_stop:
- ath12k_core_stop(ab);
- ath12k_mac_destroy(ab);
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+ goto exit;
+
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
+exit:
+ mutex_unlock(&ag->mutex);
return ret;
}
@@ -972,6 +1173,7 @@ err_hal_srng_deinit:
static void ath12k_rfkill_work(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_hw *ah;
struct ieee80211_hw *hw;
@@ -982,8 +1184,8 @@ static void ath12k_rfkill_work(struct work_struct *work)
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -1023,6 +1225,7 @@ void ath12k_core_halt(struct ath12k *ar)
static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_hw *ah;
int i, j;
@@ -1034,8 +1237,8 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
@@ -1069,12 +1272,13 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k_hw *ah;
struct ath12k *ar;
int i, j;
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
@@ -1117,6 +1321,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
static void ath12k_core_restart(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k_hw *ah;
int ret, i;
@@ -1127,8 +1332,16 @@ static void ath12k_core_restart(struct work_struct *work)
}
if (ab->is_reset) {
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
+ }
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ab->ag, i);
ieee80211_restart_hw(ah->hw);
}
}
@@ -1142,7 +1355,7 @@ static void ath12k_core_reset(struct work_struct *work)
int reset_count, fail_cont_count;
long time_left;
- if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
+ if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
return;
}
@@ -1241,38 +1454,430 @@ static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
&ab->panic_nb);
}
-int ath12k_core_init(struct ath12k_base *ab)
+static inline
+bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
{
- int ret;
+ lockdep_assert_held(&ag->mutex);
- ret = ath12k_core_soc_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create soc core: %d\n", ret);
- return ret;
+ return (ag->num_probed == ag->num_devices);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int count = 0;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ list_for_each_entry(ag, &ath12k_hw_group_list, list)
+ count++;
+
+ ag = kzalloc(sizeof(*ag), GFP_KERNEL);
+ if (!ag)
+ return NULL;
+
+ ag->id = count;
+ list_add(&ag->list, &ath12k_hw_group_list);
+ mutex_init(&ag->mutex);
+ ag->mlo_capable = false;
+
+ return ag;
+}
+
+static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
+{
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ list_del(&ag->list);
+ kfree(ag);
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int i;
+
+ if (!ab->dev->of_node)
+ return NULL;
+
+ list_for_each_entry(ag, &ath12k_hw_group_list, list)
+ for (i = 0; i < ag->num_devices; i++)
+ if (ag->wsi_node[i] == ab->dev->of_node)
+ return ag;
+
+ return NULL;
+}
+
+static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
+ struct ath12k_base *ab)
+{
+ struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
+ struct device_node *tx_endpoint, *next_rx_endpoint;
+ int device_count = 0;
+
+ next_wsi_dev = wsi_dev;
+
+ if (!next_wsi_dev)
+ return -ENODEV;
+
+ do {
+ ag->wsi_node[device_count] = next_wsi_dev;
+
+ tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
+ if (!tx_endpoint) {
+ of_node_put(next_wsi_dev);
+ return -ENODEV;
+ }
+
+ next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
+ if (!next_rx_endpoint) {
+ of_node_put(next_wsi_dev);
+ of_node_put(tx_endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(tx_endpoint);
+ of_node_put(next_wsi_dev);
+
+ next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
+ if (!next_wsi_dev) {
+ of_node_put(next_rx_endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(next_rx_endpoint);
+
+ device_count++;
+ if (device_count > ATH12K_MAX_SOCS) {
+ ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
+ device_count, ATH12K_MAX_SOCS);
+ of_node_put(next_wsi_dev);
+ return -EINVAL;
+ }
+ } while (wsi_dev != next_wsi_dev);
+
+ of_node_put(next_wsi_dev);
+ ag->num_devices = device_count;
+
+ return 0;
+}
+
+static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
+ struct ath12k_base *ab)
+{
+ int i, wsi_controller_index = -1, node_index = -1;
+ bool control;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
+ if (control)
+ wsi_controller_index = i;
+
+ if (ag->wsi_node[i] == ab->dev->of_node)
+ node_index = i;
+ }
+
+ if (wsi_controller_index == -1) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
+ return -EINVAL;
+ }
+
+ if (node_index == -1) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
+ return -EINVAL;
+ }
+
+ ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
+ ag->num_devices;
+
+ return 0;
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
+{
+ struct ath12k_wsi_info *wsi = &ab->wsi_info;
+ struct ath12k_hw_group *ag;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ /* The grouping of multiple devices will be done based on device tree file.
+ * The platforms that do not have any valid group information would have
+ * each device to be part of its own invalid group.
+ *
+ * We use group id ATH12K_INVALID_GROUP_ID for single device group
+ * which didn't have dt entry or wrong dt entry, there could be many
+ * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
+ * default group id of ATH12K_INVALID_GROUP_ID combined with
+ * num devices in ath12k_hw_group determines if the group is
+ * multi device or single device group
+ */
+
+ ag = ath12k_core_hw_group_find_by_dt(ab);
+ if (!ag) {
+ ag = ath12k_core_hw_group_alloc(ab);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
+ }
+
+ if (ath12k_core_get_wsi_info(ag, ab) ||
+ ath12k_core_get_wsi_index(ag, ab)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "unable to get wsi info from dt, grouping single device");
+ ag->id = ATH12K_INVALID_GROUP_ID;
+ ag->num_devices = 1;
+ memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
+ wsi->index = 0;
+ }
+
+ goto exit;
+ } else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
+ ag->id);
+ goto invalid_group;
+ } else {
+ if (ath12k_core_get_wsi_index(ag, ab))
+ goto invalid_group;
+ goto exit;
+ }
+
+invalid_group:
+ ag = ath12k_core_hw_group_alloc(ab);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
+ }
+
+ ag->id = ATH12K_INVALID_GROUP_ID;
+ ag->num_devices = 1;
+ wsi->index = 0;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
+
+exit:
+ if (ag->num_probed >= ag->num_devices) {
+ ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
+ goto invalid_group;
+ }
+
+ ab->device_id = ag->num_probed++;
+ ag->ab[ab->device_id] = ab;
+ ab->ag = ag;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
+ ag->id, ag->num_devices, wsi->index);
+
+ return ag;
+}
+
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ u8 device_id = ab->device_id;
+ int num_probed;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ if (WARN_ON(device_id >= ag->num_devices)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ if (WARN_ON(ag->ab[device_id] != ab)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ ag->ab[device_id] = NULL;
+ ab->ag = NULL;
+ ab->device_id = ATH12K_INVALID_DEVICE_ID;
+
+ if (ag->num_probed)
+ ag->num_probed--;
+
+ num_probed = ag->num_probed;
+
+ mutex_unlock(&ag->mutex);
+
+ if (!num_probed)
+ ath12k_core_hw_group_free(ag);
+}
+
+static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (WARN_ON(!ag))
+ return;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_core_soc_destroy(ab);
+ }
+}
+
+static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
+
+ ath12k_core_hw_group_stop(ag);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+
+ mutex_unlock(&ag->mutex);
+}
+
+static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i, ret;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ ret = ath12k_core_soc_create(ab);
+ if (ret) {
+ mutex_unlock(&ab->core_lock);
+ ath12k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+}
+
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ /* If more than one devices are grouped, then inter MLO
+ * functionality can work still independent of whether internally
+ * each device supports single_chip_mlo or not.
+ * Only when there is one device, then it depends whether the
+ * device can support intra chip MLO or not
+ */
+ if (ag->num_devices > 1) {
+ ag->mlo_capable = true;
+ } else {
+ ab = ag->ab[0];
+ ag->mlo_capable = ab->single_chip_mlo_supp;
+
+ /* WCN chipsets does not advertise in firmware features
+ * hence skip checking
+ */
+ if (ab->hw_params->def_num_link)
+ return;
}
+ if (!ag->mlo_capable)
+ return;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ /* even if 1 device's firmware feature indicates MLO
+ * unsupported, make MLO unsupported for the whole group
+ */
+ if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) {
+ ag->mlo_capable = false;
+ return;
+ }
+ }
+}
+
+int ath12k_core_init(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int ret;
+
ret = ath12k_core_panic_notifier_register(ab);
if (ret)
ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ ag = ath12k_core_hw_group_assign(ab);
+ if (!ag) {
+ mutex_unlock(&ath12k_hw_group_mutex);
+ ath12k_warn(ab, "unable to get hw group\n");
+ return -ENODEV;
+ }
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+
+ mutex_lock(&ag->mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
+ ag->num_devices, ag->num_probed);
+
+ if (ath12k_core_hw_group_create_ready(ag)) {
+ ret = ath12k_core_hw_group_create(ag);
+ if (ret) {
+ mutex_unlock(&ag->mutex);
+ ath12k_warn(ab, "unable to create hw group\n");
+ goto err;
+ }
+ }
+
+ mutex_unlock(&ag->mutex);
+
return 0;
+
+err:
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_hw_group_unassign(ab);
+ return ret;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
ath12k_core_panic_notifier_unregister(ab);
-
- mutex_lock(&ab->core_lock);
-
- ath12k_core_pdev_destroy(ab);
- ath12k_core_stop(ab);
-
- mutex_unlock(&ab->core_lock);
-
- ath12k_hif_power_down(ab, false);
- ath12k_mac_destroy(ab);
- ath12k_core_soc_destroy(ab);
- ath12k_fw_unmap(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_hw_group_unassign(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1322,7 +1927,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
- ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ ab->single_chip_mlo_supp = false;
/* Device index used to identify the devices in a group.
*
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 3bf31ee5b9fa..ee595794a7ae 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CORE_H
@@ -63,6 +63,14 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_MAX_SOCS 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
+#define ATH12K_INVALID_GROUP_ID 0xFF
+#define ATH12K_INVALID_DEVICE_ID 0xFF
+
+#define ATH12K_MAX_MLO_PEERS 256
+#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF
+
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@@ -115,6 +123,7 @@ struct ath12k_skb_cb {
dma_addr_t paddr_ext_desc;
u32 cipher;
u8 flags;
+ u8 link_id;
};
struct ath12k_skb_rxcb {
@@ -127,7 +136,7 @@ struct ath12k_skb_rxcb {
struct hal_rx_desc *rx_desc;
u8 err_rel_src;
u8 err_code;
- u8 mac_id;
+ u8 hw_link_id;
u8 unmapped;
u8 is_frag;
u8 tid;
@@ -208,8 +217,13 @@ enum ath12k_scan_state {
ATH12K_SCAN_ABORTING,
};
+enum ath12k_hw_group_flags {
+ ATH12K_GROUP_FLAG_REGISTERED,
+ ATH12K_GROUP_FLAG_UNREGISTER,
+};
+
enum ath12k_dev_flags {
- ATH12K_CAC_RUNNING,
+ ATH12K_FLAG_CAC_RUNNING,
ATH12K_FLAG_CRASH_FLUSH,
ATH12K_FLAG_RAW_MODE,
ATH12K_FLAG_HW_CRYPTO_DISABLED,
@@ -220,6 +234,7 @@ enum ath12k_dev_flags {
ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
ATH12K_FLAG_CE_IRQ_ENABLED,
ATH12K_FLAG_EXT_IRQ_ENABLED,
+ ATH12K_FLAG_QMI_FW_READY_COMPLETE,
};
struct ath12k_tx_conf {
@@ -314,10 +329,11 @@ struct ath12k_vif {
bool ps;
struct ath12k_link_vif deflink;
- struct ath12k_link_vif __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link vif created in FW */
u16 links_map;
+ u8 last_scan_link;
/* Must be last - ends in a flexible-array member.
*
@@ -365,10 +381,6 @@ struct ath12k_rx_peer_stats {
u64 non_ampdu_msdu_count;
u64 stbc_count;
u64 beamformed_count;
- u64 mcs_count[HAL_RX_MAX_MCS + 1];
- u64 nss_count[HAL_RX_MAX_NSS];
- u64 bw_count[HAL_RX_BW_MAX];
- u64 gi_count[HAL_RX_GI_MAX];
u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
u64 tid_count[IEEE80211_NUM_TIDS + 1];
u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
@@ -469,6 +481,9 @@ struct ath12k_link_sta {
struct ath12k_link_vif *arvif;
struct ath12k_sta *ahsta;
+ /* link address similar to ieee80211_link_sta */
+ u8 addr[ETH_ALEN];
+
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
@@ -485,14 +500,26 @@ struct ath12k_link_sta {
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
+
+ /* For now the assoc link will be considered primary */
+ bool is_assoc_link;
+
+ /* for firmware use only */
+ u8 link_idx;
};
struct ath12k_sta {
+ struct ath12k_vif *ahvif;
enum hal_pn_type pn_type;
struct ath12k_link_sta deflink;
struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link sta created in FW */
u16 links_map;
+ u8 assoc_link_id;
+ u16 ml_peer_id;
+ u8 num_peer;
+
+ enum ieee80211_sta_state state;
};
#define ATH12K_MIN_5G_FREQ 4150
@@ -572,9 +599,10 @@ struct ath12k {
struct delayed_work timeout;
enum ath12k_scan_state state;
bool is_roc;
- int vdev_id;
int roc_freq;
bool roc_notify;
+ struct wiphy_work vdev_clean_wk;
+ struct ath12k_link_vif *arvif;
} scan;
struct {
@@ -657,7 +685,7 @@ struct ath12k {
struct work_struct regd_update_work;
- struct work_struct wmi_mgmt_tx_work;
+ struct wiphy_work wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
struct ath12k_wow wow;
@@ -680,14 +708,18 @@ struct ath12k {
bool monitor_started;
int monitor_vdev_id;
- u32 freq_low;
- u32 freq_high;
+ struct wiphy_radio_freq_range freq_range;
bool nlo_enabled;
+
+ struct completion mlo_setup_done;
+ u32 mlo_setup_status;
};
struct ath12k_hw {
struct ieee80211_hw *hw;
+ struct device *dev;
+
/* Protect the write operation of the hardware state ath12k_hw::state
* between hardware start<=>reconfigure<=>stop transitions.
*/
@@ -698,6 +730,11 @@ struct ath12k_hw {
u8 num_radio;
+ DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
+
+ /* protected by wiphy_lock() */
+ struct list_head ml_peers;
+
/* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
};
@@ -732,6 +769,8 @@ struct ath12k_pdev_cap {
u32 tx_chain_mask_shift;
u32 rx_chain_mask_shift;
struct ath12k_band_cap band[NUM_NL80211_BANDS];
+ u32 eml_cap;
+ u32 mld_cap;
};
struct mlo_timestamp {
@@ -784,19 +823,51 @@ struct ath12k_soc_dp_stats {
struct ath12k_soc_dp_tx_err_stats tx_err;
};
-/**
- * enum ath12k_link_capable_flags - link capable flags
- *
- * Single/Multi link capability information
- *
- * @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- * the links (radios) present within a device.
- * @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- * the links (radios) present across the devices.
+struct ath12k_mlo_memory {
+ struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ int mlo_mem_size;
+ bool init_done;
+};
+
+struct ath12k_hw_link {
+ u8 device_id;
+ u8 pdev_idx;
+};
+
+/* Holds info on the group of devices that are registered as a single
+ * wiphy, protected with struct ath12k_hw_group::mutex.
*/
-enum ath12k_link_capable_flags {
- ATH12K_INTRA_DEVICE_MLO_SUPPORT = BIT(0),
- ATH12K_INTER_DEVICE_MLO_SUPPORT = BIT(1),
+struct ath12k_hw_group {
+ struct list_head list;
+ u8 id;
+ u8 num_devices;
+ u8 num_probed;
+ u8 num_started;
+ unsigned long flags;
+ struct ath12k_base *ab[ATH12K_MAX_SOCS];
+
+ /* protects access to this struct */
+ struct mutex mutex;
+
+ /* Holds information of wiphy (hw) registration.
+ *
+ * In Multi/Single Link Operation case, all pdevs are registered as
+ * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+ * registered as separate wiphys.
+ */
+ struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
+ u8 num_hw;
+ bool mlo_capable;
+ struct device_node *wsi_node[ATH12K_MAX_SOCS];
+ struct ath12k_mlo_memory mlo_mem;
+ struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
+ bool hw_link_id_init_done;
+};
+
+/* Holds WSI info specific to each device, excluding WSI group info */
+struct ath12k_wsi_info {
+ u32 index;
+ u32 hw_link_id_base;
};
/* Master structure to hold the hw data which may be used in core module */
@@ -862,15 +933,6 @@ struct ath12k_base {
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
- /* Holds information of wiphy (hw) registration.
- *
- * In Multi/Single Link Operation case, all pdevs are registered as
- * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
- * registered as separate wiphys.
- */
- struct ath12k_hw *ah[MAX_RADIOS];
- u8 num_hw;
-
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@@ -964,12 +1026,8 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
- /* mlo_capable_flags denotes the single/multi link operation
- * capabilities of the Device.
- *
- * See enum ath12k_link_capable_flags
- */
- u8 mlo_capable_flags;
+ /* Denotes the whether MLO is possible within the chip */
+ bool single_chip_mlo_supp;
struct completion restart_completed;
@@ -992,6 +1050,9 @@ struct ath12k_base {
struct notifier_block panic_nb;
+ struct ath12k_hw_group *ag;
+ struct ath12k_wsi_info wsi_info;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1022,6 +1083,7 @@ int ath12k_core_resume_early(struct ath12k_base *ab);
int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
@@ -1029,6 +1091,8 @@ u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
+
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
switch (state) {
@@ -1129,4 +1193,41 @@ static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
#define for_each_ar(ah, ar, index) \
for ((index) = 0; ((index) < (ah)->num_radio && \
((ar) = &(ah)->radio[(index)])); (index)++)
+
+static inline struct ath12k_hw *ath12k_ag_to_ah(struct ath12k_hw_group *ag, int idx)
+{
+ return ag->ah[idx];
+}
+
+static inline void ath12k_ag_set_ah(struct ath12k_hw_group *ag, int idx,
+ struct ath12k_hw *ah)
+{
+ ag->ah[idx] = ah;
+}
+
+static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab)
+{
+ return ab->ag;
+}
+
+static inline void ath12k_core_started(struct ath12k_base *ab)
+{
+ lockdep_assert_held(&ab->ag->mutex);
+
+ ab->ag->num_started++;
+}
+
+static inline void ath12k_core_stopped(struct ath12k_base *ab)
+{
+ lockdep_assert_held(&ab->ag->mutex);
+
+ ab->ag->num_started--;
+}
+
+static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag,
+ u8 device_id)
+{
+ return ag->ab[device_id];
+}
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/coredump.c b/drivers/net/wireless/ath/ath12k/coredump.c
index 72d675d15e64..ce1beeb54836 100644
--- a/drivers/net/wireless/ath/ath12k/coredump.c
+++ b/drivers/net/wireless/ath/ath12k/coredump.c
@@ -27,6 +27,9 @@ ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type(enum ath12k_qmi_target_m
case CALDB_MEM_REGION_TYPE:
dump_type = FW_CRASH_DUMP_NONE;
break;
+ case MLO_GLOBAL_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_MLO_GLOBAL_DATA;
+ break;
default:
dump_type = FW_CRASH_DUMP_TYPE_MAX;
break;
diff --git a/drivers/net/wireless/ath/ath12k/coredump.h b/drivers/net/wireless/ath/ath12k/coredump.h
index 5d6003b1c12d..13f46a605113 100644
--- a/drivers/net/wireless/ath/ath12k/coredump.h
+++ b/drivers/net/wireless/ath/ath12k/coredump.h
@@ -15,6 +15,7 @@ enum ath12k_fw_crash_dump_type {
FW_CRASH_DUMP_PAGEABLE_DATA,
FW_CRASH_DUMP_M3_DUMP,
FW_CRASH_DUMP_NONE,
+ FW_CRASH_DUMP_MLO_GLOBAL_DATA,
/* keep last */
FW_CRASH_DUMP_TYPE_MAX,
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
index fe5a732ba9ec..ff6eaeafa092 100644
--- a/drivers/net/wireless/ath/ath12k/debug.c
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -36,7 +36,7 @@ void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
va_end(args);
}
-void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
+void __ath12k_warn(struct device *dev, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -45,7 +45,7 @@ void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
- dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ dev_warn_ratelimited(dev, "%pV", &vaf);
/* TODO: Trace the log */
va_end(args);
}
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index f7005917362c..90e801136bc6 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -31,7 +31,10 @@ enum ath12k_debug_mask {
__printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
-__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
+
+#define ath12k_warn(ab, fmt, ...) __ath12k_warn((ab)->dev, fmt, ##__VA_ARGS__)
+#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
extern unsigned int ath12k_debug_mask;
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index c9980c0193d1..41e4ef2ef3af 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -48,6 +48,28 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header,
footer);
}
+static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26:
+ return "26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52:
+ return "52";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106:
+ return "106";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242:
+ return "242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484:
+ return "484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996:
+ return "996";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2:
+ return "996x2";
+ default:
+ return "unknown";
+ }
+}
+
static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
{
switch (ru_size) {
@@ -88,6 +110,17 @@ static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
}
}
+static const char*
+ath12k_tx_ru_size_to_str(enum ath12k_htt_stats_ru_type ru_type, u8 ru_size)
+{
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ return ath12k_htt_ax_tx_rx_ru_size_to_str(ru_size);
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ return ath12k_htt_be_tx_rx_ru_size_to_str(ru_size);
+ else
+ return "unknown";
+}
+
static void
htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
@@ -1562,7 +1595,8 @@ ath12k_htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u16 tag_len,
le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp));
len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1,
htt_stats_buf->ac_mu_mimo_brpoll,
- ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS, "\n\n");
+ ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1,
+ "\n\n");
stats_req->buf_len = len;
}
@@ -1590,7 +1624,7 @@ ath12k_htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u16 tag_len,
le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp));
len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1,
htt_stats_buf->ax_mu_mimo_brpoll,
- ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
le32_to_cpu(htt_stats_buf->ax_basic_trigger));
len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n",
@@ -2276,9 +2310,9 @@ ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(const void *tag_buf, u16 tag_len,
len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size",
htt_stats_buf->ul_mumimo_grp_best_grp_size,
ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
- len += print_array_to_buf_index(buf, len, "ul_mumimo_grp_best_num_usrs = ", 1,
- htt_stats_buf->ul_mumimo_grp_best_usrs,
- ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_num_usrs = ",
+ htt_stats_buf->ul_mumimo_grp_best_usrs,
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
len += print_array_to_buf(buf, len,
"ul_mumimo_grp_tputs_observed (per bin = 300 mbps)",
htt_stats_buf->ul_mumimo_grp_tputs,
@@ -2543,6 +2577,1050 @@ ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
}
static void
+ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u8 i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "Legacy OFDM Rates: 6 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]));
+ len += scnprintf(buf + len, buf_len - len, "9 Mbps: %u, 12 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]));
+ len += scnprintf(buf + len, buf_len - len, "18 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]));
+ len += scnprintf(buf + len, buf_len - len, "24 Mbps: %u, 36 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]));
+ len += scnprintf(buf + len, buf_len - len, "48 Mbps: %u, 54 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7]));
+
+ len += print_array_to_buf(buf, len, "tx_ol_mcs", htt_stats_buf->tx_su_ol_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_ibf_mcs", htt_stats_buf->tx_su_ibf_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_txbf_mcs", htt_stats_buf->tx_su_txbf_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf_index(buf, len, "tx_ol_nss", 1,
+ htt_stats_buf->tx_su_ol_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "tx_ibf_nss", 1,
+ htt_stats_buf->tx_su_ibf_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "tx_txbf_nss", 1,
+ htt_stats_buf->tx_su_txbf_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf(buf, len, "tx_ol_bw", htt_stats_buf->tx_su_ol_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_ol_bw" :
+ "half_tx_ol_bw",
+ htt_stats_buf->ol[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "tx_ibf_bw", htt_stats_buf->tx_su_ibf_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_ibf_bw" :
+ "half_tx_ibf_bw",
+ htt_stats_buf->ibf[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "tx_txbf_bw", htt_stats_buf->tx_su_txbf_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_txbf_bw" :
+ "half_tx_txbf_bw",
+ htt_stats_buf->txbf[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TXBF_FLAG_RETURN_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "TXBF_reason_code_stats: 0:%u, 1:%u,",
+ le32_to_cpu(htt_stats_buf->txbf_flag_set_mu_mode),
+ le32_to_cpu(htt_stats_buf->txbf_flag_set_final_status));
+ len += scnprintf(buf + len, buf_len - len, " 2:%u, 3:%u, 4:%u, 5:%u, ",
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_verified_txbf_mode),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_p2p_access),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_max_nss_in_he160),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_uldlofdma));
+ len += scnprintf(buf + len, buf_len - len, "6:%u, 7:%u\n\n",
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_mcs_threshold_val),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_final_status));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndpa_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDPA_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_tried =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_flush));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndp_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDP_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_tried =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_flush));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_brp_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_BRP_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_tied =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_flushed));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err_num_cbf_rcvd =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_num_cbf_rcvd));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_steer_arr);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_AX_STEER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_steer =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_ppdu_steer));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_prefetch =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_prefetch));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_sound =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_sound));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_force_sound =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_force_sound));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_AX_STEER_MPDU_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_tried = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_failed = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_tried = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_failed = %u\n\n",
+ le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_failed));
+
+ stats_req->buf_len = len;
+}
+
+static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info,
+ int idx, char *str_buf)
+{
+ u64 page_timestamp;
+ u16 index = 0;
+
+ page_timestamp = ath12k_le32hilo_to_u64(pg_info->ts_msb, pg_info->ts_lsb);
+
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Index - %u ; Page Number - %u ; ",
+ idx, le32_to_cpu(pg_info->page_num));
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Num of pages - %u ; Timestamp - %lluus\n",
+ le32_to_cpu(pg_info->num_pgs), page_timestamp);
+}
+
+static void
+ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_dl_pager_stats_tlv *stat_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 dword_lock, dword_unlock;
+ int i;
+ u8 *buf = stats_req->buf;
+ u8 pg_locked;
+ u8 pg_unlock;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+
+ if (tag_len < sizeof(*stat_buf))
+ return;
+
+ dword_lock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2);
+ dword_unlock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2);
+
+ pg_locked = ATH12K_HTT_STATS_PAGE_LOCKED;
+ pg_unlock = ATH12K_HTT_STATS_PAGE_UNLOCKED;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1));
+ len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1));
+
+ len += scnprintf(buf + len, buf_len - len, "\nLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n",
+ dword_lock ? dword_lock - 1 : (ATH12K_PAGER_MAX - 1));
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_locked][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nUNLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n",
+ dword_unlock ? dword_unlock - 1 : ATH12K_PAGER_MAX - 1);
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_unlock][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf, i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->nf_chain[i]));
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->runtime_nf_chain[i]));
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n",
+ le32_to_cpu(htt_stats_buf->false_radar_cnt),
+ le32_to_cpu(htt_stats_buf->fw_run_time));
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->radar_cs_cnt));
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n\n",
+ a_sle32_to_cpu(htt_stats_buf->ani_level));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_ofdma_timing_err_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_cck_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mactx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->macrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phytx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_defer_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_lstf_event_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_non_legacy_cnt));
+ len += print_array_to_buf(buf, len, "rx_pkt_cnt", htt_stats_buf->rx_pkt_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_pkt_crc_pass_cnt",
+ htt_stats_buf->rx_pkt_crc_pass_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT, "\n");
+ len += print_array_to_buf(buf, len, "per_blk_err_cnt",
+ htt_stats_buf->per_blk_err_cnt,
+ ATH12K_HTT_MAX_PER_BLK_ERR_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_ota_err_cnt",
+ htt_stats_buf->rx_ota_err_cnt,
+ ATH12K_HTT_MAX_RX_OTA_ERR_CNT, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_mhz));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq1));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq2));
+ len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_phy_mode));
+ len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->chan_flags));
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_num));
+ len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->prev_reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_warm_reset_src));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_tbl_mode));
+ len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->xbar_val));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrf_mode));
+ len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_homechan));
+ len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_tx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_rx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phybb_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phyrf_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_dfs_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_sscan_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_synth_sel_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_adfs_freq));
+ len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cck_fir_settings));
+ len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_dyn_pri_chan));
+ len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cca_thresh));
+ len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
+ le32_to_cpu(htt_stats_buf->dyn_cca_status));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_hw));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_sw));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_pass_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_off_through_vreg_cnt));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rf_mode_switch_phy_off_cnt));
+ len += scnprintf(buf + len, buf_len - len, "temperature_recal_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->temperature_recal_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_tpc_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_tpc_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale_db = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale_db));
+ len += scnprintf(buf + len, buf_len - len, "min_negative_tx_power = %d\n",
+ le32_to_cpu(htt_stats_buf->min_negative_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "reg_ctl_domain = %u\n",
+ le32_to_cpu(htt_stats_buf->reg_ctl_domain));
+ len += scnprintf(buf + len, buf_len - len, "twice_max_rd_power = %u\n",
+ le32_to_cpu(htt_stats_buf->twice_max_rd_power));
+ len += scnprintf(buf + len, buf_len - len, "max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "home_max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->home_max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "psd_power = %d\n",
+ le32_to_cpu(htt_stats_buf->psd_power));
+ len += scnprintf(buf + len, buf_len - len, "eirp_power = %u\n",
+ le32_to_cpu(htt_stats_buf->eirp_power));
+ len += scnprintf(buf + len, buf_len - len, "power_type_6ghz = %u\n",
+ le32_to_cpu(htt_stats_buf->power_type_6ghz));
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power",
+ htt_stats_buf->max_reg_allowed_power,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power_6ghz",
+ htt_stats_buf->max_reg_allowed_power_6ghz,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_cfreq",
+ htt_stats_buf->sub_band_cfreq,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_txpower",
+ htt_stats_buf->sub_band_txpower,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_soc_txrx_stats_common_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf;
+ u64 drop_count;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ drop_count = ath12k_le32hilo_to_u64(htt_stats_buf->inv_peers_msdu_drop_count_hi,
+ htt_stats_buf->inv_peers_msdu_drop_count_lo);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "soc_drop_count = %llu\n\n",
+ drop_count);
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_per_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_per_rate_stats_tlv *stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 ru_size_cnt = 0;
+ u32 rc_mode, ru_type;
+ u8 *buf = stats_req->buf, i;
+ const char *mode_prefix;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ rc_mode = le32_to_cpu(stats_buf->rc_mode);
+ ru_type = le32_to_cpu(stats_buf->ru_type);
+
+ switch (rc_mode) {
+ case ATH12K_HTT_STATS_RC_MODE_DLSU:
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_SU:\n");
+ mode_prefix = "su";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n");
+ mode_prefix = "mu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_OFDMA:\n");
+ mode_prefix = "ofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_MUMIMO:\n");
+ mode_prefix = "ulmu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_OFDMA:\n");
+ mode_prefix = "ulofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ default:
+ return;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_tried));
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_ack_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_tried));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per MCS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if ((rc_mode == ATH12K_HTT_STATS_RC_MODE_DLOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) &&
+ ru_type != ATH12K_HTT_STATS_RU_TYPE_INVALID) {
+ len += scnprintf(buf + len, buf_len - len, "\nPER per RU:\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len,
+ "non_data_ppdus_%s = ", mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len,
+ "ppdus_ack_failed_%s = ", mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+ }
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_DLMUMIMO) {
+ len += scnprintf(buf + len, buf_len - len, "\nlast_probed_bw = %u\n",
+ le32_to_cpu(stats_buf->last_probed_bw));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n",
+ le32_to_cpu(stats_buf->last_probed_nss));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n",
+ le32_to_cpu(stats_buf->last_probed_mcs));
+ len += print_array_to_buf(buf, len, "MU Probe count per RC MODE",
+ stats_buf->probe_cnt,
+ ATH12K_HTT_RC_MODE_2D_COUNT, "\n\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ast_entry_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_ast_entry_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_addr_l32;
+ u32 mac_addr_h16;
+ u32 ast_info;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_addr_l32 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_l32);
+ mac_addr_h16 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_h16);
+ ast_info = le32_to_cpu(htt_stats_buf->info);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_AST_ENTRY_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ast_index = %u\n",
+ le32_to_cpu(htt_stats_buf->ast_index));
+ len += scnprintf(buf + len, buf_len - len,
+ "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_0),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_1),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_2),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_3),
+ u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_0),
+ u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_1));
+
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ le32_to_cpu(htt_stats_buf->sw_peer_id));
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_PDEV_ID_INFO));
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_VDEV_ID_INFO));
+ len += scnprintf(buf + len, buf_len - len, "next_hop = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_NEXT_HOP_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mcast = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MCAST_INFO));
+ len += scnprintf(buf + len, buf_len - len, "monitor_direct = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MONITOR_DIRECT_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mesh_sta = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MESH_STA_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mec = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MEC_INFO));
+ len += scnprintf(buf + len, buf_len - len, "intra_bss = %u\n\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_INTRA_BSS_INFO));
+
+ stats_req->buf_len = len;
+}
+
+static const char*
+ath12k_htt_get_punct_dir_type_str(enum ath12k_htt_stats_direction direction)
+{
+ switch (direction) {
+ case ATH12K_HTT_STATS_DIRECTION_TX:
+ return "tx";
+ case ATH12K_HTT_STATS_DIRECTION_RX:
+ return "rx";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_htt_get_punct_ppdu_type_str(enum ath12k_htt_stats_ppdu_type ppdu_type)
+{
+ switch (ppdu_type) {
+ case ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU:
+ return "su";
+ case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO:
+ return "dl_mu_mimo";
+ case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO:
+ return "ul_mu_mimo";
+ case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA:
+ return "dl_mu_ofdma";
+ case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA:
+ return "ul_mu_ofdma";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_htt_get_punct_pream_type_str(enum ath12k_htt_stats_param_type pream_type)
+{
+ switch (pream_type) {
+ case ATH12K_HTT_STATS_PREAM_OFDM:
+ return "ofdm";
+ case ATH12K_HTT_STATS_PREAM_CCK:
+ return "cck";
+ case ATH12K_HTT_STATS_PREAM_HT:
+ return "ht";
+ case ATH12K_HTT_STATS_PREAM_VHT:
+ return "ac";
+ case ATH12K_HTT_STATS_PREAM_HE:
+ return "ax";
+ case ATH12K_HTT_STATS_PREAM_EHT:
+ return "be";
+ default:
+ return "unknown";
+ }
+}
+
+static void
+ath12k_htt_print_puncture_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_puncture_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ const char *direction;
+ const char *ppdu_type;
+ const char *preamble;
+ u32 mac_id__word;
+ u32 subband_limit;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ mac_id__word = le32_to_cpu(stats_buf->mac_id__word);
+ subband_limit = min(le32_to_cpu(stats_buf->subband_cnt),
+ ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT);
+
+ direction = ath12k_htt_get_punct_dir_type_str(le32_to_cpu(stats_buf->direction));
+ ppdu_type = ath12k_htt_get_punct_ppdu_type_str(le32_to_cpu(stats_buf->ppdu_type));
+ preamble = ath12k_htt_get_punct_pream_type_str(le32_to_cpu(stats_buf->preamble));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_PUNCTURE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id__word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_%s_%s_last_used_pattern_mask = 0x%08x\n",
+ direction, preamble, ppdu_type,
+ le32_to_cpu(stats_buf->last_used_pattern_mask));
+
+ for (i = 0; i < subband_limit; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_%s_%s_num_subbands_used_cnt_%02d = %u\n",
+ direction, preamble, ppdu_type, i + 1,
+ le32_to_cpu(stats_buf->num_subbands_used_cnt[i]));
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
@@ -2561,7 +3639,6 @@ ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms,
htt_stats_buf->reset_time_lo_ms);
len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time);
-
time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms,
htt_stats_buf->disengage_time_lo_ms);
len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time);
@@ -2680,7 +3757,7 @@ ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_le
len += scnprintf(buf + len, buf_len - len, "\n");
len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1,
htt_stats_buf->be_ofdma_tx_nss,
- ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS,
"\n");
len += print_array_to_buf(buf, len, "be_ofdma_tx_bw",
htt_stats_buf->be_ofdma_tx_bw,
@@ -2696,6 +3773,45 @@ ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_le
stats_req->buf_len = len;
}
+static void
+ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_MBSSID_CTRL_FRAME_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_trigger_within_bss = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_within_bss));
+
+ stats_req->buf_len = len;
+}
+
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -2869,6 +3985,55 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_PDEV_OBSS_PD_TAG:
ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+ ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_DLPAGER_STATS_TAG:
+ ath12k_htt_print_dlpager_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_STATS_TAG:
+ ath12k_htt_print_phy_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_COUNTERS_TAG:
+ ath12k_htt_print_phy_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_STATS_TAG:
+ ath12k_htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_COUNTERS_TAG:
+ ath12k_htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_TPC_STATS_TAG:
+ ath12k_htt_print_phy_tpc_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
+ ath12k_htt_print_soc_txrx_stats_common_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PER_RATE_STATS_TAG:
+ ath12k_htt_print_tx_per_rate_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_AST_ENTRY_TAG:
+ ath12k_htt_print_ast_entry_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_PUNCTURE_STATS_TAG:
+ ath12k_htt_print_puncture_stats_tlv(tag_buf, len, stats_req);
+ break;
case HTT_STATS_DMAC_RESET_STATS_TAG:
ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req);
break;
@@ -2878,6 +4043,10 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG:
ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG:
+ ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
index ac86cab234ec..4b59976fbc35 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -135,9 +135,18 @@ enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31,
+ ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
+ ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
+ ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
+ ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
+ ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41,
ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
+ ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46,
ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
+ ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@@ -192,16 +201,33 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_HW_WAR_TAG = 89,
HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
+ HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111,
HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112,
+ HTT_STATS_DLPAGER_STATS_TAG = 120,
+ HTT_STATS_PHY_COUNTERS_TAG = 121,
+ HTT_STATS_PHY_STATS_TAG = 122,
+ HTT_STATS_PHY_RESET_COUNTERS_TAG = 123,
+ HTT_STATS_PHY_RESET_STATS_TAG = 124,
+ HTT_STATS_SOC_TXRX_STATS_COMMON_TAG = 125,
+ HTT_STATS_PER_RATE_STATS_TAG = 128,
HTT_STATS_MU_PPDU_DIST_TAG = 129,
HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130,
+ HTT_STATS_AST_ENTRY_TAG = 132,
HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135,
HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG = 137,
HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138,
HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139,
+ HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG = 147,
+ HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG = 148,
+ HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG = 149,
+ HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG = 150,
HTT_STATS_DMAC_RESET_STATS_TAG = 155,
+ HTT_STATS_PHY_TPC_STATS_TAG = 157,
+ HTT_STATS_PDEV_PUNCTURE_STATS_TAG = 158,
HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165,
+ HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG = 172,
+ HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG = 176,
HTT_STATS_MAX_TAG,
};
@@ -1054,6 +1080,275 @@ struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_sr_ppdu_abort_flush_cnt;
} __packed;
+#define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_TXBF_NUM_BW_CNTRS 5
+#define ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES 2
+
+struct ath12k_htt_pdev_txrate_txbf_stats_tlv {
+ __le32 tx_su_txbf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_ibf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_ol_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_txbf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_su_ibf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_su_ol_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_su_txbf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 tx_su_ibf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 tx_su_ol_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ __le32 txbf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 ibf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 ol[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 txbf_flag_set_mu_mode;
+ __le32 txbf_flag_set_final_status;
+ __le32 txbf_flag_not_set_verified_txbf_mode;
+ __le32 txbf_flag_not_set_disable_p2p_access;
+ __le32 txbf_flag_not_set_max_nss_in_he160;
+ __le32 txbf_flag_not_set_disable_uldlofdma;
+ __le32 txbf_flag_not_set_mcs_threshold_val;
+ __le32 txbf_flag_not_set_final_status;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t {
+ __le32 ax_ofdma_ndpa_queued;
+ __le32 ax_ofdma_ndpa_tried;
+ __le32 ax_ofdma_ndpa_flush;
+ __le32 ax_ofdma_ndpa_err;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv {
+ __le32 num_elems_ax_ndpa_arr;
+ __le32 arr_elem_size_ax_ndpa;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t, ax_ndpa);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t {
+ __le32 ax_ofdma_ndp_queued;
+ __le32 ax_ofdma_ndp_tried;
+ __le32 ax_ofdma_ndp_flush;
+ __le32 ax_ofdma_ndp_err;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv {
+ __le32 num_elems_ax_ndp_arr;
+ __le32 arr_elem_size_ax_ndp;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t, ax_ndp);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t {
+ __le32 ax_ofdma_brp_queued;
+ __le32 ax_ofdma_brp_tried;
+ __le32 ax_ofdma_brp_flushed;
+ __le32 ax_ofdma_brp_err;
+ __le32 ax_ofdma_num_cbf_rcvd;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv {
+ __le32 num_elems_ax_brp_arr;
+ __le32 arr_elem_size_ax_brp;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t, ax_brp);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t {
+ __le32 num_ppdu_steer;
+ __le32 num_ppdu_ol;
+ __le32 num_usr_prefetch;
+ __le32 num_usr_sound;
+ __le32 num_usr_force_sound;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv {
+ __le32 num_elems_ax_steer_arr;
+ __le32 arr_elem_size_ax_steer;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t, ax_steer);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv {
+ __le32 ax_ofdma_rbo_steer_mpdus_tried;
+ __le32 ax_ofdma_rbo_steer_mpdus_failed;
+ __le32 ax_ofdma_sifs_steer_mpdus_tried;
+ __le32 ax_ofdma_sifs_steer_mpdus_failed;
+} __packed;
+
+enum ath12k_htt_stats_page_lock_state {
+ ATH12K_HTT_STATS_PAGE_LOCKED = 0,
+ ATH12K_HTT_STATS_PAGE_UNLOCKED = 1,
+ ATH12K_NUM_PG_LOCK_STATE
+};
+
+#define ATH12K_PAGER_MAX 10
+
+#define ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0 GENMASK(7, 0)
+#define ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0 GENMASK(15, 8)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1 GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1 GENMASK(31, 16)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2 GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2 GENMASK(31, 16)
+
+struct ath12k_htt_pgs_info {
+ __le32 page_num;
+ __le32 num_pgs;
+ __le32 ts_lsb;
+ __le32 ts_msb;
+} __packed;
+
+struct ath12k_htt_dl_pager_stats_tlv {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ struct ath12k_htt_pgs_info pgs_info[ATH12K_NUM_PG_LOCK_STATE][ATH12K_PAGER_MAX];
+} __packed;
+
+#define ATH12K_HTT_STATS_MAX_CHAINS 8
+#define ATH12K_HTT_MAX_RX_PKT_CNT 8
+#define ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define ATH12K_HTT_MAX_PER_BLK_ERR_CNT 20
+#define ATH12K_HTT_MAX_RX_OTA_ERR_CNT 14
+#define ATH12K_HTT_MAX_CH_PWR_INFO_SIZE 16
+
+struct ath12k_htt_phy_stats_tlv {
+ a_sle32 nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 false_radar_cnt;
+ __le32 radar_cs_cnt;
+ a_sle32 ani_level;
+ __le32 fw_run_time;
+ a_sle32 runtime_nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+} __packed;
+
+struct ath12k_htt_phy_counters_tlv {
+ __le32 rx_ofdma_timing_err_cnt;
+ __le32 rx_cck_fail_cnt;
+ __le32 mactx_abort_cnt;
+ __le32 macrx_abort_cnt;
+ __le32 phytx_abort_cnt;
+ __le32 phyrx_abort_cnt;
+ __le32 phyrx_defer_abort_cnt;
+ __le32 rx_gain_adj_lstf_event_cnt;
+ __le32 rx_gain_adj_non_legacy_cnt;
+ __le32 rx_pkt_cnt[ATH12K_HTT_MAX_RX_PKT_CNT];
+ __le32 rx_pkt_crc_pass_cnt[ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT];
+ __le32 per_blk_err_cnt[ATH12K_HTT_MAX_PER_BLK_ERR_CNT];
+ __le32 rx_ota_err_cnt[ATH12K_HTT_MAX_RX_OTA_ERR_CNT];
+} __packed;
+
+struct ath12k_htt_phy_reset_stats_tlv {
+ __le32 pdev_id;
+ __le32 chan_mhz;
+ __le32 chan_band_center_freq1;
+ __le32 chan_band_center_freq2;
+ __le32 chan_phy_mode;
+ __le32 chan_flags;
+ __le32 chan_num;
+ __le32 reset_cause;
+ __le32 prev_reset_cause;
+ __le32 phy_warm_reset_src;
+ __le32 rx_gain_tbl_mode;
+ __le32 xbar_val;
+ __le32 force_calibration;
+ __le32 phyrf_mode;
+ __le32 phy_homechan;
+ __le32 phy_tx_ch_mask;
+ __le32 phy_rx_ch_mask;
+ __le32 phybb_ini_mask;
+ __le32 phyrf_ini_mask;
+ __le32 phy_dfs_en_mask;
+ __le32 phy_sscan_en_mask;
+ __le32 phy_synth_sel_mask;
+ __le32 phy_adfs_freq;
+ __le32 cck_fir_settings;
+ __le32 phy_dyn_pri_chan;
+ __le32 cca_thresh;
+ __le32 dyn_cca_status;
+ __le32 rxdesense_thresh_hw;
+ __le32 rxdesense_thresh_sw;
+} __packed;
+
+struct ath12k_htt_phy_reset_counters_tlv {
+ __le32 pdev_id;
+ __le32 cf_active_low_fail_cnt;
+ __le32 cf_active_low_pass_cnt;
+ __le32 phy_off_through_vreg_cnt;
+ __le32 force_calibration_cnt;
+ __le32 rf_mode_switch_phy_off_cnt;
+ __le32 temperature_recal_cnt;
+} __packed;
+
+struct ath12k_htt_phy_tpc_stats_tlv {
+ __le32 pdev_id;
+ __le32 tx_power_scale;
+ __le32 tx_power_scale_db;
+ __le32 min_negative_tx_power;
+ __le32 reg_ctl_domain;
+ __le32 max_reg_allowed_power[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 max_reg_allowed_power_6ghz[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 twice_max_rd_power;
+ __le32 max_tx_power;
+ __le32 home_max_tx_power;
+ __le32 psd_power;
+ __le32 eirp_power;
+ __le32 power_type_6ghz;
+ __le32 sub_band_cfreq[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+ __le32 sub_band_txpower[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+} __packed;
+
+struct ath12k_htt_t2h_soc_txrx_stats_common_tlv {
+ __le32 inv_peers_msdu_drop_count_hi;
+ __le32 inv_peers_msdu_drop_count_lo;
+} __packed;
+
+#define ATH12K_HTT_AST_PDEV_ID_INFO GENMASK(1, 0)
+#define ATH12K_HTT_AST_VDEV_ID_INFO GENMASK(9, 2)
+#define ATH12K_HTT_AST_NEXT_HOP_INFO BIT(10)
+#define ATH12K_HTT_AST_MCAST_INFO BIT(11)
+#define ATH12K_HTT_AST_MONITOR_DIRECT_INFO BIT(12)
+#define ATH12K_HTT_AST_MESH_STA_INFO BIT(13)
+#define ATH12K_HTT_AST_MEC_INFO BIT(14)
+#define ATH12K_HTT_AST_INTRA_BSS_INFO BIT(15)
+
+struct ath12k_htt_ast_entry_tlv {
+ __le32 sw_peer_id;
+ __le32 ast_index;
+ struct htt_mac_addr mac_addr;
+ __le32 info;
+} __packed;
+
+enum ath12k_htt_stats_direction {
+ ATH12K_HTT_STATS_DIRECTION_TX,
+ ATH12K_HTT_STATS_DIRECTION_RX
+};
+
+enum ath12k_htt_stats_ppdu_type {
+ ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU,
+ ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO,
+ ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO,
+ ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA,
+ ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA
+};
+
+enum ath12k_htt_stats_param_type {
+ ATH12K_HTT_STATS_PREAM_OFDM,
+ ATH12K_HTT_STATS_PREAM_CCK,
+ ATH12K_HTT_STATS_PREAM_HT,
+ ATH12K_HTT_STATS_PREAM_VHT,
+ ATH12K_HTT_STATS_PREAM_HE,
+ ATH12K_HTT_STATS_PREAM_EHT,
+ ATH12K_HTT_STATS_PREAM_RSVD1,
+ ATH12K_HTT_STATS_PREAM_COUNT,
+};
+
+#define ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT 32
+
+struct ath12k_htt_pdev_puncture_stats_tlv {
+ __le32 mac_id__word;
+ __le32 direction;
+ __le32 preamble;
+ __le32 ppdu_type;
+ __le32 subband_cnt;
+ __le32 last_used_pattern_mask;
+ __le32 num_subbands_used_cnt[ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT];
+} __packed;
+
struct ath12k_htt_dmac_reset_stats_tlv {
__le32 reset_count;
__le32 reset_time_lo_ms;
@@ -1085,6 +1380,10 @@ struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv {
__le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM];
} __packed;
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS 4
+#define ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS 14
+
enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52,
@@ -1105,7 +1404,65 @@ enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS,
};
-#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+enum ATH12K_HTT_RC_MODE {
+ ATH12K_HTT_RC_MODE_SU_OL,
+ ATH12K_HTT_RC_MODE_SU_BF,
+ ATH12K_HTT_RC_MODE_MU1_INTF,
+ ATH12K_HTT_RC_MODE_MU2_INTF,
+ ATH12K_HTT_RC_MODE_MU3_INTF,
+ ATH12K_HTT_RC_MODE_MU4_INTF,
+ ATH12K_HTT_RC_MODE_MU5_INTF,
+ ATH12K_HTT_RC_MODE_MU6_INTF,
+ ATH12K_HTT_RC_MODE_MU7_INTF,
+ ATH12K_HTT_RC_MODE_2D_COUNT
+};
+
+enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
+ ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS
+};
+
+enum ath12k_htt_stats_rc_mode {
+ ATH12K_HTT_STATS_RC_MODE_DLSU = 0,
+ ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1,
+ ATH12K_HTT_STATS_RC_MODE_DLOFDMA = 2,
+ ATH12K_HTT_STATS_RC_MODE_ULMUMIMO = 3,
+ ATH12K_HTT_STATS_RC_MODE_ULOFDMA = 4,
+};
+
+enum ath12k_htt_stats_ru_type {
+ ATH12K_HTT_STATS_RU_TYPE_INVALID,
+ ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY,
+ ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU,
+};
+
+struct ath12k_htt_tx_rate_stats {
+ __le32 ppdus_tried;
+ __le32 ppdus_ack_failed;
+ __le32 mpdus_tried;
+ __le32 mpdus_failed;
+} __packed;
+
+struct ath12k_htt_tx_per_rate_stats_tlv {
+ __le32 rc_mode;
+ __le32 last_probed_mcs;
+ __le32 last_probed_nss;
+ __le32 last_probed_bw;
+ struct ath12k_htt_tx_rate_stats per_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS];
+ struct ath12k_htt_tx_rate_stats per_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
+ struct ath12k_htt_tx_rate_stats per_mcs[ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS];
+ struct ath12k_htt_tx_rate_stats per_bw320;
+ __le32 probe_cnt[ATH12K_HTT_RC_MODE_2D_COUNT];
+ __le32 ru_type;
+ struct ath12k_htt_tx_rate_stats ru[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
+} __packed;
+
#define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS 16
#define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS 5
#define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS 4
@@ -1115,11 +1472,23 @@ struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv {
__le32 mac_id__word;
__le32 be_ofdma_tx_ldpc;
__le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
- __le32 be_ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 be_ofdma_tx_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
__le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS];
__le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
__le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
__le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS];
} __packed;
+struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv {
+ __le32 mac_id__word;
+ __le32 basic_trigger_across_bss;
+ __le32 basic_trigger_within_bss;
+ __le32 bsr_trigger_across_bss;
+ __le32 bsr_trigger_within_bss;
+ __le32 mu_rts_across_bss;
+ __le32 mu_rts_within_bss;
+ __le32 ul_mumimo_trigger_across_bss;
+ __le32 ul_mumimo_trigger_within_bss;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index c99e9ceb1a6e..9e5a4e75f2f6 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -41,6 +41,11 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
return;
}
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
crypto_free_shash(peer->tfm_mmic);
peer->dp_setup_done = false;
@@ -977,27 +982,23 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
int i;
+ if (!ab->mon_reap_timer.function)
+ return;
+
del_timer_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
}
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar)
{
- struct ath12k *ar;
- struct ath12k_pdev_dp *dp;
- int i;
+ struct ath12k_pdev_dp *dp = &ar->dp;
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- dp = &ar->dp;
- dp->mac_id = i;
- atomic_set(&dp->num_tx_pending, 0);
- init_waitqueue_head(&dp->tx_empty_waitq);
-
- /* TODO: Add any RXDMA setup required per pdev */
- }
+ dp->mac_id = ar->pdev_idx;
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+ /* TODO: Add any RXDMA setup required per pdev */
}
bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
@@ -1260,15 +1261,23 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
if (!ab->hw_params->reoq_lut_support)
return;
- if (!dp->reoq_lut.vaddr)
- return;
-
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
- dp->reoq_lut.vaddr = NULL;
+ if (dp->reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ }
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ if (dp->ml_reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE1(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
+ dp->ml_reoq_lut.vaddr = NULL;
+ }
}
void ath12k_dp_free(struct ath12k_base *ab)
@@ -1276,6 +1285,9 @@ void ath12k_dp_free(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
int i;
+ if (!dp->ab)
+ return;
+
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
@@ -1293,6 +1305,7 @@ void ath12k_dp_free(struct ath12k_base *ab)
ath12k_dp_rx_free(ab);
/* Deinit any SOC level resource */
+ dp->ab = NULL;
}
void ath12k_dp_cc_config(struct ath12k_base *ab)
@@ -1432,6 +1445,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
+ rx_descs[j].device_id = ab->device_id;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
/* Update descriptor VA in SPT */
@@ -1508,6 +1522,19 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab,
return 0;
}
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ if (ag->ab[i] == ab)
+ continue;
+
+ ath12k_dp_cmem_init(ab, &ag->ab[i]->dp, ATH12K_DP_RX_DESC);
+ }
+}
+
static int ath12k_dp_cc_init(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
@@ -1594,8 +1621,23 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return -ENOMEM;
}
+ dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
+ DP_REOQ_LUT_SIZE,
+ &dp->ml_reoq_lut.paddr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!dp->ml_reoq_lut.vaddr) {
+ ath12k_warn(ab, "failed to allocate memory for ML reoq table");
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ return -ENOMEM;
+ }
+
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
dp->reoq_lut.paddr);
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
+ dp->ml_reoq_lut.paddr >> 8);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 2e05fc19410e..7ac3143de016 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -287,7 +287,8 @@ struct ath12k_rx_desc_info {
u32 cookie;
u32 magic;
u8 in_use : 1,
- reserved : 7;
+ device_id : 3,
+ reserved : 4;
};
struct ath12k_tx_desc_info {
@@ -368,6 +369,7 @@ struct ath12k_dp {
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
struct ath12k_reo_q_addr_lut reoq_lut;
+ struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
/* HTT definitions */
@@ -694,9 +696,9 @@ enum htt_stats_internal_ppdu_frametype {
*
* The message would appear as follows:
*
- * |31 26|25|24|23 16|15 8|7 0|
- * |-----------------+----------------+----------------+---------------|
- * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |31 29|28|27|26|25|24|23 16|15 8|7 0|
+ * |-------+--+--+--+--+--+-----------+----------------+---------------|
+ * | rsvd1 |ED|DT|OV|PS|SS| ring_id | pdev_id | msg_type |
* |-------------------------------------------------------------------|
* | rsvd2 | ring_buffer_size |
* |-------------------------------------------------------------------|
@@ -723,7 +725,13 @@ enum htt_stats_internal_ppdu_frametype {
* More details can be got from enum htt_srng_ring_id
* b'24 - status_swap: 1 is to swap status TLV
* b'25 - pkt_swap: 1 is to swap packet TLV
- * b'26:31 - rsvd1: reserved for future use
+ * b'26 - rx_offset_valid (OV): flag to indicate rx offsets
+ * configuration fields are valid
+ * b'27 - drop_thresh_valid (DT): flag to indicate if the
+ * rx_drop_threshold field is valid
+ * b'28 - rx_mon_global_en: Enable/Disable global register
+ * configuration in Rx monitor module.
+ * b'29:31 - rsvd1: reserved for future use
* dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring,
* in byte units.
* Valid only for HW_TO_SW_RING and SW_TO_HW_RING
@@ -1790,6 +1798,18 @@ enum vdev_stats_offload_timer_duration {
ATH12K_STATS_TIMER_DUR_2SEC = 3,
};
+#define ATH12K_HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
+#define ATH12K_HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
+#define ATH12K_HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
+#define ATH12K_HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+
+struct htt_mac_addr {
+ __le32 mac_addr_l32;
+ __le32 mac_addr_h16;
+} __packed;
+
static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
{
memcpy(addr, &addr_l32, 4);
@@ -1804,8 +1824,9 @@ void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif);
void ath12k_dp_free(struct ath12k_base *ab);
int ath12k_dp_alloc(struct ath12k_base *ab);
void ath12k_dp_cc_config(struct ath12k_base *ab);
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab);
int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab);
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar);
void ath12k_dp_pdev_free(struct ath12k_base *ab);
int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type);
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 494984133a91..5a21961cfd46 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -10,11 +10,10 @@
#include "dp_tx.h"
#include "peer.h"
-static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv,
- struct hal_rx_user_status *rx_user_status)
+static void
+ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user,
+ struct hal_rx_user_status *rx_user_status)
{
- struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv;
-
rx_user_status->ul_ofdma_user_v0_word0 =
__le32_to_cpu(ppdu_end_user->usr_resp_ref);
rx_user_status->ul_ofdma_user_v0_word1 =
@@ -35,7 +34,7 @@ ath12k_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *st
}
static void
-ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
+ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *rx_tlv,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *rx_user_status)
{
@@ -73,11 +72,9 @@ ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
}
-static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
+static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_vht_sig_a_info *vht_sig =
- (struct hal_rx_vht_sig_a_info *)tlv_data;
u32 nsts, group_id, info0, info1;
u8 gi_setting;
@@ -119,11 +116,9 @@ static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
}
-static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
+static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_ht_sig_info *ht_sig =
- (struct hal_rx_ht_sig_info *)tlv_data;
u32 info0 = __le32_to_cpu(ht_sig->info0);
u32 info1 = __le32_to_cpu(ht_sig->info1);
@@ -136,11 +131,9 @@ static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
-static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
+static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_lsig_b_info *lsigb =
- (struct hal_rx_lsig_b_info *)tlv_data;
u32 info0 = __le32_to_cpu(lsigb->info0);
u8 rate;
@@ -170,11 +163,9 @@ static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
-static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
+static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_lsig_a_info *lsiga =
- (struct hal_rx_lsig_a_info *)tlv_data;
u32 info0 = __le32_to_cpu(lsiga->info0);
u8 rate;
@@ -212,14 +203,13 @@ static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
-static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *ofdma,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
- (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
u32 info0, value;
- info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+ info0 = __le32_to_cpu(ofdma->info0);
ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN;
@@ -250,11 +240,10 @@ static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
}
-static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
- (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
u32 info0, value;
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
@@ -277,11 +266,10 @@ static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS);
}
-static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
- (struct hal_rx_he_sig_b1_mu_info *)tlv_data;
u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0);
u16 ru_tones;
@@ -292,11 +280,10 @@ static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
-static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
- (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
u32 info0, info1, value;
u16 he_gi = 0, he_ltf = 0;
@@ -427,11 +414,9 @@ static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
-static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
+static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_a_su_info *he_sig_a =
- (struct hal_rx_he_sig_a_su_info *)tlv_data;
u32 info0, info1, value;
u32 dcm;
u8 he_dcm = 0, he_stbc = 0;
@@ -580,15 +565,15 @@ static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
static enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
struct ath12k_mon_data *pmon,
- u32 tlv_tag, u8 *tlv_data, u32 userid)
+ u32 tlv_tag, const void *tlv_data,
+ u32 userid)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
u32 info[7];
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
- struct hal_rx_ppdu_start *ppdu_start =
- (struct hal_rx_ppdu_start *)tlv_data;
+ const struct hal_rx_ppdu_start *ppdu_start = tlv_data;
u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32,
ppdu_start->ppdu_start_ts_31_0);
@@ -615,8 +600,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_PPDU_END_USER_STATS: {
- struct hal_rx_ppdu_end_user_stats *eu_stats =
- (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+ const struct hal_rx_ppdu_end_user_stats *eu_stats = tlv_data;
+ u32 tid_bitmap;
info[0] = __le32_to_cpu(eu_stats->info0);
info[1] = __le32_to_cpu(eu_stats->info1);
@@ -629,10 +614,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX);
ppdu_info->fc_valid =
u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
- ppdu_info->tid =
- ffs(u32_get_bits(info[6],
- HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP)
- - 1);
+ tid_bitmap = u32_get_bits(info[6],
+ HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP);
+ ppdu_info->tid = ffs(tid_bitmap) - 1;
ppdu_info->tcp_msdu_count =
u32_get_bits(info[4],
HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
@@ -673,8 +657,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
&ppdu_info->userstats[userid];
ppdu_info->num_users += 1;
- ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats);
- ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats);
+ ath12k_dp_mon_rx_populate_mu_user_info(eu_stats, ppdu_info,
rxuser_stats);
}
ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]);
@@ -682,8 +666,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_PPDU_END_USER_STATS_EXT: {
- struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
- (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ const struct hal_rx_ppdu_end_user_stats_ext *eu_stats = tlv_data;
+
ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1);
ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2);
ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3);
@@ -729,8 +713,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
case HAL_PHYRX_RSSI_LEGACY: {
- struct hal_rx_phyrx_rssi_legacy_info *rssi =
- (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+ const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
info[0] = __le32_to_cpu(rssi->info0);
info[1] = __le32_to_cpu(rssi->info1);
@@ -748,8 +731,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RXPCU_PPDU_END_INFO: {
- struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
- (struct hal_rx_ppdu_end_duration *)tlv_data;
+ const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data;
info[0] = __le32_to_cpu(ppdu_rx_duration->info0);
ppdu_info->rx_duration =
@@ -760,8 +742,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_MPDU_START: {
- struct hal_rx_mpdu_start *mpdu_start =
- (struct hal_rx_mpdu_start *)tlv_data;
+ const struct hal_rx_mpdu_start *mpdu_start = tlv_data;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
u16 peer_id;
@@ -790,8 +771,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
case HAL_MON_BUF_ADDR: {
struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
- struct dp_mon_packet_info *packet_info =
- (struct dp_mon_packet_info *)tlv_data;
+ const struct dp_mon_packet_info *packet_info = tlv_data;
int buf_id = u32_get_bits(packet_info->cookie,
DP_RXDMA_BUF_COOKIE_BUF_ID);
struct sk_buff *msdu;
@@ -823,8 +803,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_MSDU_END: {
- struct rx_msdu_end_qcn9274 *msdu_end =
- (struct rx_msdu_end_qcn9274 *)tlv_data;
+ const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data;
bool is_first_msdu_in_mpdu;
u16 msdu_end_info;
@@ -1093,8 +1072,14 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
- if (peer && peer->sta)
+ if (peer && peer->sta) {
pubsta = peer->sta;
+ if (pubsta->valid_links) {
+ status->link_valid = 1;
+ status->link_id = peer->link_id;
+ }
+ }
+
spin_unlock_bh(&ar->ab->base_lock);
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
@@ -1199,19 +1184,19 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
struct sk_buff *skb)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- struct hal_tlv_hdr *tlv;
+ struct hal_tlv_64_hdr *tlv;
enum hal_rx_mon_status hal_status;
- u32 tlv_userid = 0;
+ u32 tlv_userid;
u16 tlv_tag, tlv_len;
u8 *ptr = skb->data;
memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
do {
- tlv = (struct hal_tlv_hdr *)ptr;
- tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
- tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
- tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
+ tlv = (struct hal_tlv_64_hdr *)ptr;
+ tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
+ tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
+ tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
@@ -1226,7 +1211,7 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
- ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+ ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
break;
@@ -1603,7 +1588,7 @@ ath12k_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
static enum dp_mon_tx_tlv_status
ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
struct ath12k_mon_data *pmon,
- u16 tlv_tag, u8 *tlv_data, u32 userid)
+ u16 tlv_tag, const void *tlv_data, u32 userid)
{
struct dp_mon_tx_ppdu_info *tx_ppdu_info;
enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
@@ -1613,8 +1598,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
switch (tlv_tag) {
case HAL_TX_FES_SETUP: {
- struct hal_tx_fes_setup *tx_fes_setup =
- (struct hal_tx_fes_setup *)tlv_data;
+ const struct hal_tx_fes_setup *tx_fes_setup = tlv_data;
info[0] = __le32_to_cpu(tx_fes_setup->info0);
tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id);
@@ -1625,8 +1609,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_END: {
- struct hal_tx_fes_status_end *tx_fes_status_end =
- (struct hal_tx_fes_status_end *)tlv_data;
+ const struct hal_tx_fes_status_end *tx_fes_status_end = tlv_data;
u32 tst_15_0, tst_31_16;
info[0] = __le32_to_cpu(tx_fes_status_end->info0);
@@ -1643,8 +1626,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_RX_RESPONSE_REQUIRED_INFO: {
- struct hal_rx_resp_req_info *rx_resp_req_info =
- (struct hal_rx_resp_req_info *)tlv_data;
+ const struct hal_rx_resp_req_info *rx_resp_req_info = tlv_data;
u32 addr_32;
u16 addr_16;
@@ -1689,8 +1671,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_PCU_PPDU_SETUP_INIT: {
- struct hal_tx_pcu_ppdu_setup_init *ppdu_setup =
- (struct hal_tx_pcu_ppdu_setup_init *)tlv_data;
+ const struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = tlv_data;
u32 addr_32;
u16 addr_16;
@@ -1736,8 +1717,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_QUEUE_EXTENSION: {
- struct hal_tx_queue_exten *tx_q_exten =
- (struct hal_tx_queue_exten *)tlv_data;
+ const struct hal_tx_queue_exten *tx_q_exten = tlv_data;
info[0] = __le32_to_cpu(tx_q_exten->info0);
@@ -1749,8 +1729,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_START: {
- struct hal_tx_fes_status_start *tx_fes_start =
- (struct hal_tx_fes_status_start *)tlv_data;
+ const struct hal_tx_fes_status_start *tx_fes_start = tlv_data;
info[0] = __le32_to_cpu(tx_fes_start->info0);
@@ -1761,8 +1740,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_PROT: {
- struct hal_tx_fes_status_prot *tx_fes_status =
- (struct hal_tx_fes_status_prot *)tlv_data;
+ const struct hal_tx_fes_status_prot *tx_fes_status = tlv_data;
u32 start_timestamp;
u32 end_timestamp;
@@ -1789,8 +1767,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
case HAL_TX_FES_STATUS_START_PPDU:
case HAL_TX_FES_STATUS_START_PROT: {
- struct hal_tx_fes_status_start_prot *tx_fes_stat_start =
- (struct hal_tx_fes_status_start_prot *)tlv_data;
+ const struct hal_tx_fes_status_start_prot *tx_fes_stat_start = tlv_data;
u64 ppdu_ts;
info[0] = __le32_to_cpu(tx_fes_stat_start->info0);
@@ -1805,8 +1782,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_USER_PPDU: {
- struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu =
- (struct hal_tx_fes_status_user_ppdu *)tlv_data;
+ const struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = tlv_data;
info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0);
@@ -1849,8 +1825,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
break;
case HAL_RX_FRAME_BITMAP_ACK: {
- struct hal_rx_frame_bitmap_ack *fbm_ack =
- (struct hal_rx_frame_bitmap_ack *)tlv_data;
+ const struct hal_rx_frame_bitmap_ack *fbm_ack = tlv_data;
u32 addr_32;
u16 addr_16;
@@ -1868,8 +1843,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_MACTX_PHY_DESC: {
- struct hal_tx_phy_desc *tx_phy_desc =
- (struct hal_tx_phy_desc *)tlv_data;
+ const struct hal_tx_phy_desc *tx_phy_desc = tlv_data;
info[0] = __le32_to_cpu(tx_phy_desc->info0);
info[1] = __le32_to_cpu(tx_phy_desc->info1);
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 9ae579e50557..dad35bfd83f6 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -740,15 +740,22 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(lower_32_bits(paddr),
BUFFER_ADDR_INFO0_ADDR);
@@ -761,15 +768,22 @@ static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
@@ -802,7 +816,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
rx_tid->vaddr = NULL;
}
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
+ else
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
rx_tid->active = false;
}
@@ -940,7 +957,13 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return -ENOENT;
}
- if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ if (ab->hw_params->reoq_lut_support &&
+ (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "reo qref table is not setup\n");
return -EINVAL;
@@ -1021,7 +1044,11 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
- ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr);
+ else
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
@@ -1038,15 +1065,25 @@ err_mem_free:
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
int ret;
- ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
+ ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
params->tid, params->buf_size,
params->ssn, arsta->ahsta->pn_type);
if (ret)
@@ -1056,19 +1093,29 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
}
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
bool active;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
+ peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
@@ -1650,7 +1697,11 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
rcu_read_lock();
ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
- ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+ /* It is possible that the ar is not yet active (started).
+ * The above function will only look for the active pdev
+ * and hence %NULL return is possible. Just silently
+ * discard this message
+ */
goto exit;
}
@@ -2427,6 +2478,11 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
pubsta = peer ? peer->sta : NULL;
+ if (pubsta && pubsta->valid_links) {
+ status->link_valid = 1;
+ status->link_id = peer->link_id;
+ }
+
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
@@ -2548,11 +2604,14 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
struct sk_buff_head *msdu_list,
int ring_id)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ieee80211_rx_status rx_status = {0};
struct ath12k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath12k *ar;
- u8 mac_id, pdev_id;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, pdev_id;
int ret;
if (skb_queue_empty(msdu_list))
@@ -2562,15 +2621,18 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
while ((msdu = __skb_dequeue(msdu_list))) {
rxcb = ATH12K_SKB_RXCB(msdu);
- mac_id = rxcb->mac_id;
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
- if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
+ hw_link_id = rxcb->hw_link_id;
+ partner_ab = ath12k_ag_to_ab(ag,
+ hw_links[hw_link_id].device_id);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
+ if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
@@ -2615,23 +2677,29 @@ static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
- LIST_HEAD(rx_desc_used_list);
+ struct ath12k_hw_group *ag = ab->ag;
+ struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
struct hal_reo_dest_ring *desc;
- int num_buffs_reaped = 0;
+ struct ath12k_base *partner_ab;
struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
+ u8 hw_link_id, device_id;
struct hal_srng *srng;
struct sk_buff *msdu;
bool done = false;
- int mac_id;
u64 desc_va;
__skb_queue_head_init(&msdu_list);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
@@ -2648,18 +2716,29 @@ try_again:
cookie = le32_get_bits(desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_SW_COOKIE);
- mac_id = le32_get_bits(desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ hw_link_id = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
le32_to_cpu(desc->buf_va_lo));
desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ if (desc_info->skb) {
+ dev_kfree_skb_any(desc_info->skb);
+ desc_info->skb = NULL;
+ }
+
+ continue;
+ }
+
/* retry manual desc retrieval */
if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+ desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
cookie);
continue;
}
@@ -2671,14 +2750,14 @@ try_again:
msdu = desc_info->skb;
desc_info->skb = NULL;
- list_add_tail(&desc_info->list, &rx_desc_used_list);
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
+ dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[device_id]++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
@@ -2698,7 +2777,7 @@ try_again:
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
- rxcb->mac_id = mac_id;
+ rxcb->hw_link_id = hw_link_id;
rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
mpdu_info->peer_meta_data);
rxcb->tid = le32_get_bits(mpdu_info->info0,
@@ -2735,8 +2814,17 @@ try_again:
if (!total_msdu_reaped)
goto exit;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
- num_buffs_reaped);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
@@ -2781,6 +2869,12 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
return -ENOENT;
}
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
+ return 0;
+ }
+
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
rx_tid->ab = ab;
@@ -3390,7 +3484,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
goto exit;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
goto exit;
}
@@ -3420,7 +3514,10 @@ exit:
int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int budget)
{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
@@ -3428,11 +3525,11 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct hal_reo_dest_ring *reo_desc;
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
- LIST_HEAD(rx_desc_used_list);
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, device_id;
u32 desc_bank, num_msdus;
struct hal_srng *srng;
- struct ath12k_dp *dp;
- int mac_id;
struct ath12k *ar;
dma_addr_t paddr;
bool is_frag;
@@ -3442,9 +3539,10 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
tot_n_bufs_reaped = 0;
quota = budget;
- dp = &ab->dp;
- reo_except = &dp->reo_except_ring;
- link_desc_banks = dp->link_desc_banks;
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
+ reo_except = &ab->dp.reo_except_ring;
srng = &ab->hal.srng_list[reo_except->ring_id];
@@ -3464,16 +3562,27 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
ret);
continue;
}
+
+ hw_link_id = le32_get_bits(reo_desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
+
+ link_desc_banks = partner_ab->dp.link_desc_banks;
link_desc_va = link_desc_banks[desc_bank].vaddr +
(paddr - link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
- if (rbm != dp->idle_link_rbm &&
+ if (rbm != partner_ab->dp.idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
- rbm != ab->hw_params->hal_params->rx_buf_rbm) {
+ rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
- ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
@@ -3483,26 +3592,26 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
/* Process only rx fragments with one msdu per link desc below, and drop
* msdu's indicated due to error reasons.
+ * Dynamic fragmentation not supported in Multi-link client, so drop the
+ * partner device buffers.
*/
- if (!is_frag || num_msdus > 1) {
+ if (!is_frag || num_msdus > 1 ||
+ partner_ab->device_id != ab->device_id) {
drop = true;
+
/* Return the link desc back to wbm idle list */
- ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
for (i = 0; i < num_msdus; i++) {
- mac_id = le32_get_bits(reo_desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
-
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
-
if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
- &rx_desc_used_list,
+ &rx_desc_used_list[device_id],
drop,
- msdu_cookies[i]))
+ msdu_cookies[i])) {
+ num_buffs_reaped[device_id]++;
tot_n_bufs_reaped++;
+ }
}
if (tot_n_bufs_reaped >= quota) {
@@ -3518,10 +3627,17 @@ exit:
spin_unlock_bh(&srng->lock);
- rx_ring = &dp->rx_refill_buf_ring;
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
- tot_n_bufs_reaped);
+ ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
return tot_n_bufs_reaped;
}
@@ -3738,7 +3854,8 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
- LIST_HEAD(rx_desc_used_list);
+ struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
@@ -3748,17 +3865,22 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- u8 mac_id;
- int num_buffs_reaped = 0;
+ int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int total_num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, device_id;
int ret, pdev_id;
struct hal_rx_desc *msdu_data;
__skb_queue_head_init(&msdu_list);
__skb_queue_head_init(&scatter_msdu_list);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
- rx_ring = &dp->rx_refill_buf_ring;
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
@@ -3794,14 +3916,27 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
msdu = desc_info->skb;
desc_info->skb = NULL;
- list_add_tail(&desc_info->list, &rx_desc_used_list);
+ device_id = desc_info->device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
+ dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[device_id]++;
+ total_num_buffs_reaped++;
if (!err_info.continuation)
budget--;
@@ -3825,9 +3960,9 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
continue;
}
- mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
- msdu_data);
- if (mac_id >= MAX_RADIOS) {
+ hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab,
+ msdu_data);
+ if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
dev_kfree_skb_any(msdu);
/* In any case continuation bit is set
@@ -3842,7 +3977,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
skb_queue_walk(&scatter_msdu_list, msdu) {
rxcb = ATH12K_SKB_RXCB(msdu);
- rxcb->mac_id = mac_id;
+ rxcb->hw_link_id = hw_link_id;
}
skb_queue_splice_tail_init(&scatter_msdu_list,
@@ -3850,7 +3985,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
rxcb = ATH12K_SKB_RXCB(msdu);
- rxcb->mac_id = mac_id;
+ rxcb->hw_link_id = hw_link_id;
__skb_queue_tail(&msdu_list, msdu);
}
@@ -3863,26 +3998,46 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
spin_unlock_bh(&srng->lock);
- if (!num_buffs_reaped)
+ if (!total_num_buffs_reaped)
goto done;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
- num_buffs_reaped);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
rcu_read_lock();
while ((msdu = __skb_dequeue(&msdu_list))) {
rxcb = ATH12K_SKB_RXCB(msdu);
- mac_id = rxcb->mac_id;
+ hw_link_id = rxcb->hw_link_id;
+
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
+ hw_link_id, device_id);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
- if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) {
dev_kfree_skb_any(msdu);
continue;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
@@ -3890,7 +4045,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
rcu_read_unlock();
done:
- return num_buffs_reaped;
+ return total_num_buffs_reaped;
}
void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
@@ -3912,7 +4067,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
ath12k_hal_srng_access_begin(ab, srng);
while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
- tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
+ tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
switch (tag) {
case HAL_REO_GET_QUEUE_STATS_STATUS:
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index bfd4f814553e..1ce82088c954 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -85,9 +85,11 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
- struct ieee80211_ampdu_params *params);
+ struct ieee80211_ampdu_params *params,
+ u8 link_id);
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
- struct ieee80211_ampdu_params *params);
+ struct ieee80211_ampdu_params *params,
+ u8 link_id);
int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
index 3ff041f15fa0..273c003eff3b 100644
--- a/drivers/net/wireless/ath/ath12k/fw.h
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -23,6 +23,9 @@ enum ath12k_fw_features {
*/
ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0,
+ /* The firmware supports MLO capability */
+ ATH12K_FW_FEATURE_MLO,
+
/* keep last */
ATH12K_FW_FEATURE_COUNT,
};
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index fd98fac16dd5..cd59ff8e6c7b 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -181,7 +181,7 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
},
[HAL_TX_MONITOR_BUF] = {
- .start_ring_id = HAL_SRNG_SW2TXMON_BUF0,
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
.max_rings = 1,
.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 8a78bb9a10bc..94e2e8735958 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -485,8 +485,8 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
- HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+ HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
HAL_SRNG_RING_ID_PMAC1_ID_END,
};
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 739f73370015..7b0403d245e5 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -522,7 +522,7 @@ enum hal_tlv_tag {
HAL_PHYRXHT_SIG_USR_SU = 468 /* 0x1d4 */,
HAL_PHYRXHT_SIG_USR_MU_MIMO = 469 /* 0x1d5 */,
HAL_PHYRX_GENERIC_U_SIG = 470 /* 0x1d6 */,
- HAL_PHYRX_GENERICHT_SIG = 471 /* 0x1d7 */,
+ HAL_PHYRX_GENERIC_EHT_SIG = 471 /* 0x1d7 */,
HAL_OVERWRITE_RESP_START = 472 /* 0x1d8 */,
HAL_OVERWRITE_RESP_PREAMBLE_INFO = 473 /* 0x1d9 */,
HAL_OVERWRITE_RESP_FRAME_INFO = 474 /* 0x1da */,
@@ -579,9 +579,11 @@ struct hal_tlv_hdr {
#define HAL_TLV_64_HDR_TAG GENMASK(9, 1)
#define HAL_TLV_64_HDR_LEN GENMASK(21, 10)
+#define HAL_TLV_64_USR_ID GENMASK(31, 26)
+#define HAL_TLV_64_ALIGN 8
struct hal_tlv_64_hdr {
- u64 tl;
+ __le64 tl;
u8 value[];
} __packed;
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index f7c1aaa3b5d4..ac17d6223fa7 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -26,8 +26,8 @@ static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_get_queue_stats *desc;
- tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_get_queue_stats *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
@@ -59,8 +59,8 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
hal->current_blk_index = avail_slot;
}
- tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_flush_cache *)tlv->value;
memset_startat(desc, 0, cache_addr_lo);
@@ -97,8 +97,8 @@ static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_update_rx_queue *desc;
- tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_update_rx_queue *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index 2de7b0eba9f2..b08aa2e79f41 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -19,7 +19,7 @@ struct hal_rx_wbm_rel_info {
bool hw_cc_done;
};
-#define HAL_INVALID_PEERID 0xffff
+#define HAL_INVALID_PEERID 0x3fff
#define VHT_SIG_SU_NSS_MASK 0x7
#define HAL_RX_MAX_MCS 12
@@ -245,6 +245,8 @@ struct hal_rx_ppdu_start {
__le32 rsvd[2];
} __packed;
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID GENMASK(13, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_DEVICE_ID GENMASK(15, 14)
#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(26, 16)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(10, 0)
@@ -299,6 +301,7 @@ struct hal_rx_ppdu_end_user_stats_ext {
__le32 info4;
__le32 info5;
__le32 info6;
+ __le32 rsvd;
} __packed;
#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
@@ -395,11 +398,9 @@ struct hal_rx_he_sig_a_su_info {
#define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION BIT(25)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION GENMASK(6, 0)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING BIT(7)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA BIT(11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC BIT(12)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF BIT(10)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM BIT(15)
@@ -425,7 +426,7 @@ struct hal_rx_he_sig_b2_mu_info {
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
-#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(14)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
@@ -453,7 +454,8 @@ struct hal_rx_phyrx_rssi_legacy_info {
} __packed;
#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
-#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16)
+#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(29, 16)
+#define HAL_RX_MPDU_START_INFO1_DEVICE_ID GENMASK(31, 30)
#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0)
struct hal_rx_mpdu_start {
__le32 rsvd0[9];
@@ -468,7 +470,7 @@ struct hal_rx_mpdu_start {
struct hal_rx_ppdu_end_duration {
__le32 rsvd0[9];
__le32 info0;
- __le32 rsvd1[4];
+ __le32 rsvd1[18];
} __packed;
struct hal_rx_rxpcu_classification_overview {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index d493ec812055..2d062b5904a8 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
+#include <net/cfg80211.h>
#include <linux/etherdevice.h>
#include "mac.h"
@@ -501,6 +502,41 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id,
return 0;
}
+static struct ieee80211_bss_conf *
+ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif)
+{
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ link_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ vif->link_conf[arvif->link_id]);
+
+ return link_conf;
+}
+
+static struct ieee80211_link_sta *ath12k_mac_get_link_sta(struct ath12k_link_sta *arsta)
+{
+ struct ath12k_sta *ahsta = arsta->ahsta;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ieee80211_link_sta *link_sta;
+
+ lockdep_assert_wiphy(ahsta->ahvif->ah->hw->wiphy);
+
+ if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ link_sta = wiphy_dereference(ahsta->ahvif->ah->hw->wiphy,
+ sta->link[arsta->link_id]);
+
+ return link_sta;
+}
+
static bool ath12k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -648,6 +684,18 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
return NULL;
}
+static bool ath12k_mac_is_ml_arvif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+ if (ahvif->vif->valid_links & BIT(arvif->link_id))
+ return true;
+
+ return false;
+}
+
static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
struct ieee80211_channel *channel)
{
@@ -661,8 +709,8 @@ static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
return ar;
for_each_ar(ah, ar, i) {
- if (channel->center_freq >= ar->freq_low &&
- channel->center_freq <= ar->freq_high)
+ if (channel->center_freq >= KHZ_TO_MHZ(ar->freq_range.start_freq) &&
+ channel->center_freq <= KHZ_TO_MHZ(ar->freq_range.end_freq))
return ar;
}
return NULL;
@@ -678,11 +726,14 @@ static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw,
}
static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ u8 link_id)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif = &ahvif->deflink;
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(hw->wiphy);
/* If there is one pdev within ah, then we return
* ar directly.
@@ -690,12 +741,27 @@ static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
if (ah->num_radio == 1)
return ah->radio;
- if (arvif->is_created)
+ if (!(ahvif->links_map & BIT(link_id)))
+ return NULL;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (arvif && arvif->is_created)
return arvif->ar;
return NULL;
}
+void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct ath12k_mac_get_any_chanctx_conf_arg *arg = data;
+ struct ath12k *ctx_ar = ath12k_get_ar_by_ctx(hw, conf);
+
+ if (ctx_ar == arg->ar)
+ arg->chanctx_conf = conf;
+}
+
static struct ath12k_link_vif *ath12k_mac_get_vif_up(struct ath12k *ar)
{
struct ath12k_link_vif *arvif;
@@ -1239,7 +1305,7 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ath12k *ar = arvif->ar;
@@ -1269,8 +1335,8 @@ static int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
ahvif->vif->addr, arvif->vdev_id);
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
arvif->vdev_id);
}
@@ -1486,7 +1552,7 @@ static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_bu
static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_bss_conf *bss_conf = &ahvif->vif->bss_conf;
+ struct ieee80211_bss_conf *bss_conf;
struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
struct ieee80211_ema_beacons *beacons;
struct ath12k_link_vif *tx_arvif;
@@ -1495,10 +1561,19 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
int ret = 0;
u8 i;
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(arvif->ar->ab,
+ "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
tx_arvif = &tx_ahvif->deflink;
beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
- tx_ahvif->vif, 0);
+ tx_ahvif->vif,
+ tx_arvif->link_id);
if (!beacons || !beacons->cnt) {
ath12k_warn(arvif->ar->ab,
"failed to get ema beacon templates from mac80211\n");
@@ -1540,6 +1615,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_link_vif *tx_arvif = arvif;
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
@@ -1552,18 +1628,25 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
if (ahvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf to set bcn tmpl for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
if (vif->mbssid_tx_vif) {
tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
tx_arvif = &tx_ahvif->deflink;
if (tx_arvif != arvif && arvif->is_up)
return 0;
- if (vif->bss_conf.ema_ap)
+ if (link_conf->ema_ap)
return ath12k_mac_setup_bcn_tmpl_ema(arvif);
}
bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif,
- &offs, 0);
+ &offs, tx_arvif->link_id);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
@@ -1573,7 +1656,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL);
} else {
ath12k_mac_set_arvif_ies(arvif, bcn,
- ahvif->vif->bss_conf.bssid_index,
+ link_conf->bssid_index,
&nontx_profile_found);
if (!nontx_profile_found)
ath12k_warn(ab,
@@ -1644,7 +1727,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
ahvif->aid = 0;
- ether_addr_copy(arvif->bssid, info->bssid);
+ ether_addr_copy(arvif->bssid, info->addr);
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
@@ -1749,6 +1832,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ieee80211_bss_conf *bss_conf;
u32 aid;
lockdep_assert_wiphy(hw->wiphy);
@@ -1758,14 +1842,22 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
else
aid = sta->aid;
- ether_addr_copy(arg->peer_mac, sta->addr);
+ ether_addr_copy(arg->peer_mac, arsta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
arg->peer_listen_intval = hw->conf.listen_interval;
arg->peer_nss = 1;
- arg->peer_caps = vif->bss_conf.assoc_capability;
+
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ arg->peer_caps = bss_conf->assoc_capability;
}
static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
@@ -1775,7 +1867,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct ieee80211_bss_conf *info;
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
@@ -1784,6 +1876,13 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
lockdep_assert_wiphy(hw->wiphy);
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf for peer assoc crypto for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
@@ -1839,6 +1938,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
@@ -1853,9 +1953,16 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc rates for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
band = def.chan->band;
sband = hw->wiphy->bands[band];
- ratemask = sta->deflink.supp_rates[band];
+ ratemask = link_sta->supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -1902,7 +2009,8 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -1915,6 +2023,14 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc ht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ ht_cap = &link_sta->ht_cap;
if (!ht_cap->ht_supported)
return;
@@ -1938,7 +2054,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->ldpc_flag = true;
- if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+ if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->bw_40 = true;
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
@@ -1988,7 +2104,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -2064,7 +2180,8 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u16 *vht_mcs_mask;
@@ -2078,6 +2195,14 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc vht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ vht_cap = &link_sta->vht_cap;
if (!vht_cap->vht_supported)
return;
@@ -2110,10 +2235,10 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
/* Calculate peer NSS capability from VHT capabilities if STA
@@ -2127,7 +2252,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
vht_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
@@ -2150,7 +2275,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
arg->tx_max_mcs_nss = 0xFF;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ arsta->addr, arg->peer_max_mpdu, arg->peer_flags);
/* TODO: rxnss_override */
}
@@ -2162,7 +2287,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
int i;
u8 ampdu_factor, max_nss;
u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
@@ -2171,6 +2298,21 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
bool support_160;
u16 v;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc he for vif %pM link %u",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ he_cap = &link_sta->he_cap;
if (!he_cap->has_he)
return;
@@ -2208,13 +2350,13 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
else
max_nss = rx_mcs_80;
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
sizeof(he_cap->he_cap_elem.mac_cap_info));
memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
sizeof(he_cap->he_cap_elem.phy_cap_info));
- arg->peer_he_ops = vif->bss_conf.he_oper.params;
+ arg->peer_he_ops = link_conf->he_oper.params;
/* the top most byte is used to indicate BSS color info */
arg->peer_he_ops &= 0xffffff;
@@ -2235,10 +2377,10 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
- if (sta->deflink.vht_cap.vht_supported)
+ if (link_sta->vht_cap.vht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
- else if (sta->deflink.ht_cap.ht_supported)
+ else if (link_sta->ht_cap.ht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
}
@@ -2279,7 +2421,7 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
@@ -2319,7 +2461,8 @@ static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
u8 ampdu_factor, mpdu_density;
@@ -2329,22 +2472,31 @@ static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
band = def.chan->band;
- if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he 6ghz for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
return;
+ }
+
+ he_cap = &link_sta->he_cap;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !link_sta->he_6ghz_capa.capa)
+ return;
+
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
arg->bw_40 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
arg->bw_320 = true;
- arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
+ arg->peer_he_caps_6ghz = le16_to_cpu(link_sta->he_6ghz_capa.capa);
mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
@@ -2388,10 +2540,23 @@ static void ath12k_peer_assoc_h_smps(struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa;
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+ struct ath12k_link_vif *arvif = arsta->arvif;
+ const struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_link_sta *link_sta;
+ struct ath12k *ar = arvif->ar;
int smps;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ he_6ghz_capa = &link_sta->he_6ghz_capa;
+ ht_cap = &link_sta->ht_cap;
+
if (!ht_cap->ht_supported && !he_6ghz_capa->capa)
return;
@@ -2446,7 +2611,7 @@ static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
- sta->addr, arg->qos_flag);
+ arsta->addr, arg->qos_flag);
}
static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
@@ -2486,26 +2651,26 @@ static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
arg.param = WMI_AP_PS_PEER_PARAM_UAPSD;
arg.value = uapsd;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
arg.value = max_sp;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
/* TODO: revisit during testing */
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
@@ -2517,17 +2682,17 @@ err:
return ret;
}
-static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_link_sta *sta)
{
- return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
ATH12K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (sta->deflink.vht_cap.cap &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (link_sta->vht_cap.cap &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
@@ -2539,74 +2704,74 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
}
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11AX_HE160;
- else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ else if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11AX_HE80_80;
/* not sure if this is a valid case? */
return MODE_11AX_HE160;
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AX_HE80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AX_HE40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AX_HE20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
- if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
+ if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[0] &
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
return MODE_11BE_EHT320;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11BE_EHT160;
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11BE_EHT80_80;
ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n",
- sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]);
+ link_sta->he_cap.he_cap_elem.phy_cap_info[0]);
return MODE_11BE_EHT160;
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11BE_EHT80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11BE_EHT40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11BE_EHT20;
return MODE_UNKNOWN;
@@ -2617,6 +2782,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -2635,33 +2801,40 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->deflink.eht_cap.has_eht) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->eht_cap.has_eht) {
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11BE_EHT40_2G;
else
phymode = MODE_11BE_EHT20_2G;
- } else if (sta->deflink.he_cap.has_he) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ } else if (link_sta->he_cap.has_he) {
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
- else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ else if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AX_HE40_2G;
else
phymode = MODE_11AX_HE20_2G;
- } else if (sta->deflink.vht_cap.vht_supported &&
+ } else if (link_sta->vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->deflink.ht_cap.ht_supported &&
+ } else if (link_sta->ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
- } else if (ath12k_mac_sta_has_ofdm_only(sta)) {
+ } else if (ath12k_mac_sta_has_ofdm_only(link_sta)) {
phymode = MODE_11G;
} else {
phymode = MODE_11B;
@@ -2670,16 +2843,16 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check EHT first */
- if (sta->deflink.eht_cap.has_eht) {
- phymode = ath12k_mac_get_phymode_eht(ar, sta);
- } else if (sta->deflink.he_cap.has_he) {
- phymode = ath12k_mac_get_phymode_he(ar, sta);
- } else if (sta->deflink.vht_cap.vht_supported &&
+ if (link_sta->eht_cap.has_eht) {
+ phymode = ath12k_mac_get_phymode_eht(ar, link_sta);
+ } else if (link_sta->he_cap.has_he) {
+ phymode = ath12k_mac_get_phymode_he(ar, link_sta);
+ } else if (link_sta->vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- phymode = ath12k_mac_get_phymode_vht(ar, sta);
- } else if (sta->deflink.ht_cap.ht_supported &&
+ phymode = ath12k_mac_get_phymode_vht(ar, link_sta);
+ } else if (link_sta->ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -2692,7 +2865,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
- sta->addr, ath12k_mac_phymode_str(phymode));
+ arsta->addr, ath12k_mac_phymode_str(phymode));
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
@@ -2767,15 +2940,25 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20;
const struct ieee80211_eht_mcs_nss_supp_bw *bw;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_link_sta *link_sta;
u32 *rx_mcs, *tx_mcs;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht)
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc eht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ eht_cap = &link_sta->eht_cap;
+ he_cap = &link_sta->he_cap;
+ if (!he_cap->has_he || !eht_cap->has_eht)
return;
arg->eht_flag = true;
@@ -2794,7 +2977,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
rx_mcs = arg->peer_eht_rx_mcs_set;
tx_mcs = arg->peer_eht_tx_mcs_set;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_320:
bw = &eht_cap->eht_mcs_nss_supp.bw._320;
ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss,
@@ -2846,6 +3029,67 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
arg->punct_bitmap = ~arvif->punct_bitmap;
}
+static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct peer_assoc_mlo_params *ml = &arg->ml;
+ struct ath12k_sta *ahsta = arsta->ahsta;
+ struct ath12k_link_sta *arsta_p;
+ struct ath12k_link_vif *arvif;
+ unsigned long links;
+ u8 link_id;
+ int i;
+
+ if (!sta->mlo || ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID)
+ return;
+
+ ml->enabled = true;
+ ml->assoc_link = arsta->is_assoc_link;
+
+ /* For now considering the primary umac based on assoc link */
+ ml->primary_umac = arsta->is_assoc_link;
+ ml->peer_id_valid = true;
+ ml->logical_link_idx_valid = true;
+
+ ether_addr_copy(ml->mld_addr, sta->addr);
+ ml->logical_link_idx = arsta->link_idx;
+ ml->ml_peer_id = ahsta->ml_peer_id;
+ ml->ieee_link_id = arsta->link_id;
+ ml->num_partner_links = 0;
+ links = ahsta->links_map;
+
+ rcu_read_lock();
+
+ i = 0;
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (i >= ATH12K_WMI_MLO_MAX_LINKS)
+ break;
+
+ arsta_p = rcu_dereference(ahsta->link[link_id]);
+ arvif = rcu_dereference(ahsta->ahvif->link[link_id]);
+
+ if (arsta_p == arsta)
+ continue;
+
+ if (!arvif->is_started)
+ continue;
+
+ ml->partner_info[i].vdev_id = arvif->vdev_id;
+ ml->partner_info[i].hw_link_id = arvif->ar->pdev->hw_link_id;
+ ml->partner_info[i].assoc_link = arsta_p->is_assoc_link;
+ ml->partner_info[i].primary_umac = arsta_p->is_assoc_link;
+ ml->partner_info[i].logical_link_idx_valid = true;
+ ml->partner_info[i].logical_link_idx = arsta_p->link_idx;
+ ml->num_partner_links++;
+
+ i++;
+ }
+
+ rcu_read_unlock();
+}
+
static void ath12k_peer_assoc_prepare(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta,
@@ -2870,6 +3114,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
ath12k_peer_assoc_h_qos(ar, arvif, arsta, arg);
ath12k_peer_assoc_h_phymode(ar, arvif, arsta, arg);
ath12k_peer_assoc_h_smps(arsta, arg);
+ ath12k_peer_assoc_h_mlo(arsta, arg);
/* TODO: amsdu_disable req? */
}
@@ -2900,7 +3145,8 @@ static void ath12k_bss_assoc(struct ath12k *ar,
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_wmi_vdev_up_params params = {};
- struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ieee80211_link_sta *link_sta;
+ u8 link_id = bss_conf->link_id;
struct ath12k_link_sta *arsta;
struct ieee80211_sta *ap_sta;
struct ath12k_sta *ahsta;
@@ -2910,32 +3156,48 @@ static void ath12k_bss_assoc(struct ath12k *ar,
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
- arvif->vdev_id, arvif->bssid, ahvif->aid);
+ struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+ kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+ if (!peer_arg)
+ return;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac vdev %i link id %u assoc bssid %pM aid %d\n",
+ arvif->vdev_id, link_id, arvif->bssid, ahvif->aid);
rcu_read_lock();
- ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ /* During ML connection, cfg.ap_addr has the MLD address. For
+ * non-ML connection, it has the BSSID.
+ */
+ ap_sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!ap_sta) {
ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
- bss_conf->bssid, arvif->vdev_id);
+ vif->cfg.ap_addr, arvif->vdev_id);
rcu_read_unlock();
return;
}
ahsta = ath12k_sta_to_ahsta(ap_sta);
- arsta = &ahsta->deflink;
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
if (WARN_ON(!arsta)) {
rcu_read_unlock();
return;
}
- ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, false);
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (WARN_ON(!link_sta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, false);
rcu_read_unlock();
- ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
@@ -2949,8 +3211,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->deflink.ht_cap,
- &ap_sta->deflink.he_6ghz_capa);
+ &link_sta->ht_cap, &link_sta->he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3058,6 +3319,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const struct ieee80211_supported_band *sband;
+ struct ieee80211_bss_conf *bss_conf;
u8 basic_rate_idx;
int hw_rate_code;
u32 vdev_param;
@@ -3066,8 +3328,15 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
lockdep_assert_wiphy(hw->wiphy);
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in mgmt rate calc for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
sband = hw->wiphy->bands[def->chan->band];
- basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ basic_rate_idx = ffs(bss_conf->basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
@@ -3151,6 +3420,7 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long links = ahvif->links_map;
+ struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
struct ath12k *ar;
u8 link_id;
@@ -3171,10 +3441,15 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
ar = arvif->ar;
- if (vif->cfg.assoc)
- ath12k_bss_assoc(ar, arvif, &vif->bss_conf);
- else
+ if (vif->cfg.assoc) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info)
+ continue;
+
+ ath12k_bss_assoc(ar, arvif, info);
+ } else {
ath12k_bss_disassoc(ar, arvif);
+ }
}
}
}
@@ -3185,6 +3460,7 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
struct ieee80211_vif *vif = arvif->ahvif->vif;
struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
enum wmi_sta_powersave_param param;
+ struct ieee80211_bss_conf *info;
enum wmi_sta_ps_mode psmode;
int ret;
int timeout;
@@ -3202,8 +3478,15 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
timeout = conf->dynamic_ps_timeout;
if (timeout == 0) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
/* firmware doesn't like 0 */
- timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
}
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
@@ -3314,8 +3597,8 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
ath12k_control_beaconing(arvif, info);
- if (arvif->is_up && vif->bss_conf.he_support &&
- vif->bss_conf.he_oper.params) {
+ if (arvif->is_up && info->he_support &&
+ info->he_oper.params) {
/* TODO: Extend to support 1024 BA Bitmap size */
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BA_MODE,
@@ -3326,7 +3609,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
arvif->vdev_id);
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
- param_value = vif->bss_conf.he_oper.params;
+ param_value = info->he_oper.params;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
@@ -3418,12 +3701,12 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
if (changed & BSS_CHANGED_MCAST_RATE &&
!ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) {
band = def.chan->band;
- mcast_rate = vif->bss_conf.mcast_rate[band];
+ mcast_rate = info->mcast_rate[band];
if (mcast_rate > 0)
rateidx = mcast_rate - 1;
else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ rateidx = ffs(info->basic_rates) - 1;
if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
@@ -3537,6 +3820,9 @@ static void ath12k_ahvif_put_link_key_cache(struct ath12k_vif_cache *cache)
static void ath12k_ahvif_put_link_cache(struct ath12k_vif *ahvif, u8 link_id)
{
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return;
+
ath12k_ahvif_put_link_key_cache(ahvif->cache[link_id]);
kfree(ahvif->cache[link_id]);
ahvif->cache[link_id] = NULL;
@@ -3597,9 +3883,9 @@ static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
arvif = &ahvif->deflink;
} else {
/* If this is the first link arvif being created for an ML VIF
- * use the preallocated deflink memory
+ * use the preallocated deflink memory except for scan arvifs
*/
- if (!ahvif->links_map) {
+ if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
arvif = &ahvif->deflink;
} else {
arvif = (struct ath12k_link_vif *)
@@ -3730,22 +4016,9 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
ieee80211_remain_on_channel_expired(hw);
fallthrough;
case ATH12K_SCAN_STARTING:
- if (!ar->scan.is_roc) {
- struct cfg80211_scan_info info = {
- .aborted = ((ar->scan.state ==
- ATH12K_SCAN_ABORTING) ||
- (ar->scan.state ==
- ATH12K_SCAN_STARTING)),
- };
-
- ieee80211_scan_completed(hw, &info);
- }
-
- ar->scan.state = ATH12K_SCAN_IDLE;
- ar->scan_channel = NULL;
- ar->scan.roc_freq = 0;
cancel_delayed_work(&ar->scan.timeout);
complete(&ar->scan.completed);
+ wiphy_work_queue(ar->ah->hw->wiphy, &ar->scan.vdev_clean_wk);
break;
}
}
@@ -3786,15 +4059,15 @@ static int ath12k_scan_stop(struct ath12k *ar)
}
out:
- /* Scan state should be updated upon scan completion but in case
- * firmware fails to deliver the event (for whatever reason) it is
- * desired to clean up scan state anyway. Firmware may have just
- * dropped the scan completion event delivery due to transport pipe
- * being overflown with data and/or it can recover on its own before
- * next scan request is submitted.
+ /* Scan state should be updated in scan completion worker but in
+ * case firmware fails to deliver the event (for whatever reason)
+ * it is desired to clean up scan state anyway. Firmware may have
+ * just dropped the scan completion event delivery due to transport
+ * pipe being overflown with data and/or it can recover on its own
+ * before next scan request is submitted.
*/
spin_lock_bh(&ar->data_lock);
- if (ar->scan.state != ATH12K_SCAN_IDLE)
+ if (ret)
__ath12k_mac_scan_finish(ar);
spin_unlock_bh(&ar->data_lock);
@@ -3845,6 +4118,53 @@ static void ath12k_scan_timeout_work(struct work_struct *work)
wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
+static void ath12k_scan_vdev_clean_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k,
+ scan.vdev_clean_wk);
+ struct ath12k_hw *ah = ar->ah;
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(wiphy);
+
+ arvif = ar->scan.arvif;
+
+ /* The scan vdev has already been deleted. This can occur when a
+ * new scan request is made on the same vif with a different
+ * frequency, causing the scan arvif to move from one radio to
+ * another. Or, scan was abrupted and via remove interface, the
+ * arvif is already deleted. Alternatively, if the scan vdev is not
+ * being used as an actual vdev, then do not delete it.
+ */
+ if (!arvif || arvif->is_started)
+ goto work_complete;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac clean scan vdev (link id %u)",
+ arvif->link_id);
+
+ ath12k_mac_remove_link_interface(ah->hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+
+work_complete:
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.arvif = NULL;
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = ((ar->scan.state ==
+ ATH12K_SCAN_ABORTING) ||
+ (ar->scan.state ==
+ ATH12K_SCAN_STARTING)),
+ };
+
+ ieee80211_scan_completed(ar->ah->hw, &info);
+ }
+
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
static int ath12k_start_scan(struct ath12k *ar,
struct ath12k_wmi_scan_req_arg *arg)
{
@@ -3899,10 +4219,10 @@ ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
return link_id;
}
- /* input ar is not assigned to any of the links, use link id
- * 0 for scan vdev creation.
+ /* input ar is not assigned to any of the links of ML VIF, use scan
+ * link (15) for scan vdev creation.
*/
- return 0;
+ return ATH12K_DEFAULT_SCAN_LINK;
}
static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
@@ -3933,11 +4253,14 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
/* check if any of the links of ML VIF is already started on
* radio(ar) correpsondig to given scan frequency and use it,
- * if not use deflink(link 0) for scan purpose.
+ * if not use scan link (link 15) for scan purpose.
*/
link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar);
arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac link ID %d selected for scan",
+ arvif->link_id);
+
/* If the vif is already assigned to a specific vdev of an ar,
* check whether its already started, vdev which is started
* are not allowed to switch to a new radio.
@@ -3961,6 +4284,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
create = false;
}
}
+
if (create) {
/* Previous arvif would've been cleared in radio switch block
* above, assign arvif again for create.
@@ -3981,7 +4305,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
reinit_completion(&ar->scan.completed);
ar->scan.state = ATH12K_SCAN_STARTING;
ar->scan.is_roc = false;
- ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.arvif = arvif;
ret = 0;
break;
case ATH12K_SCAN_STARTING:
@@ -4043,6 +4367,15 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
}
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started");
+
+ /* As per cfg80211/mac80211 scan design, it allows only one
+ * scan at a time. Hence last_scan link id is used for
+ * tracking the link id on which the scan is been done on
+ * this vif.
+ */
+ ahvif->last_scan_link = arvif->link_id;
+
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
msecs_to_jiffies(arg->max_scan_time +
@@ -4062,14 +4395,14 @@ static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ u16 link_id = ahvif->last_scan_link;
struct ath12k_link_vif *arvif;
struct ath12k *ar;
lockdep_assert_wiphy(hw->wiphy);
- arvif = &ahvif->deflink;
-
- if (!arvif->is_created)
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || arvif->is_started)
return;
ar = arvif->ar;
@@ -4203,6 +4536,7 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_sta *sta = NULL;
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
@@ -4219,12 +4553,19 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
return 1;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
if (sta)
- peer_addr = sta->addr;
+ peer_addr = arsta->addr;
else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
- peer_addr = vif->bss_conf.bssid;
+ peer_addr = link_conf->bssid;
else
- peer_addr = vif->addr;
+ peer_addr = link_conf->addr;
key->hw_key_idx = key->keyidx;
@@ -4316,7 +4657,23 @@ static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath12k_key_conf *key_conf = NULL, *tmp;
+ struct ath12k_key_conf *key_conf, *tmp;
+
+ list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
+ if (key_conf->key != key)
+ continue;
+
+ /* If SET key entry is already present in cache, nothing to do,
+ * just return
+ */
+ if (cmd == SET_KEY)
+ return 0;
+
+ /* DEL key for an old SET key which driver hasn't flushed yet.
+ */
+ list_del(&key_conf->list);
+ kfree(key_conf);
+ }
if (cmd == SET_KEY) {
key_conf = kzalloc(sizeof(*key_conf), GFP_KERNEL);
@@ -4330,17 +4687,7 @@ static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache,
list_add_tail(&key_conf->list,
&cache->key_conf.list);
}
- if (list_empty(&cache->key_conf.list))
- return 0;
- list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
- if (key_conf->key == key) {
- /* DEL key for an old SET key which driver hasn't flushed yet.
- */
- list_del(&key_conf->list);
- kfree(key_conf);
- break;
- }
- }
+
return 0;
}
@@ -4372,6 +4719,7 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta) {
ahsta = ath12k_sta_to_ahsta(sta);
+
/* For an ML STA Pairwise key is same for all associated link Stations,
* hence do set key for all link STAs which are active.
*/
@@ -4394,41 +4742,47 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (ret)
break;
}
- } else {
- arsta = &ahsta->deflink;
- arvif = arsta->arvif;
- if (WARN_ON(!arvif)) {
- ret = -EINVAL;
- goto out;
- }
- ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
- }
- } else {
- if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
- link_id = key->link_id;
- arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
- } else {
- link_id = 0;
- arvif = &ahvif->deflink;
+ return 0;
}
- if (!arvif || !arvif->is_created) {
- cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
- if (!cache)
- return -ENOSPC;
+ arsta = &ahsta->deflink;
+ arvif = arsta->arvif;
+ if (WARN_ON(!arvif))
+ return -EINVAL;
- ret = ath12k_mac_update_key_cache(cache, cmd, sta, key);
+ ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
+ if (ret)
+ return ret;
+ return 0;
+ }
+
+ if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
+ link_id = key->link_id;
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ } else {
+ link_id = 0;
+ arvif = &ahvif->deflink;
+ }
+
+ if (!arvif || !arvif->is_created) {
+ cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
+ if (!cache)
+ return -ENOSPC;
+
+ ret = ath12k_mac_update_key_cache(cache, cmd, sta, key);
+ if (ret)
return ret;
- }
- ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key);
+ return 0;
}
-out:
+ ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
static int
@@ -4451,7 +4805,6 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
const struct cfg80211_bitrate_mask *mask,
enum nl80211_band band)
{
- struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k *ar = arvif->ar;
u8 vht_rate, nss;
u32 rate_code;
@@ -4470,67 +4823,76 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
if (!nss) {
ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
- sta->addr);
+ arsta->addr);
return -EINVAL;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
- sta->addr);
+ arsta->addr);
rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1,
WMI_RATE_PREAMBLE_VHT);
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
rate_code);
if (ret)
ath12k_warn(ar->ab,
"failed to update STA %pM Fixed Rate %d: %d\n",
- sta->addr, rate_code, ret);
+ arsta->addr, rate_code, ret);
return ret;
}
-static int ath12k_station_assoc(struct ath12k *ar,
- struct ath12k_link_vif *arvif,
- struct ath12k_link_sta *arsta,
- bool reassoc)
+static int ath12k_mac_station_assoc(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ bool reassoc)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ieee80211_link_sta *link_sta;
int ret;
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
u8 num_vht_rates;
+ u8 link_id = arvif->link_id;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return -EPERM;
+ if (WARN_ON(!rcu_access_pointer(sta->link[link_id])))
+ return -EINVAL;
+
band = def.chan->band;
mask = &arvif->bitrate_mask;
- ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, reassoc);
+ struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+ kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+ if (!peer_arg)
+ return -ENOMEM;
+
+ ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, reassoc);
- if (peer_arg.peer_nss < 1) {
+ if (peer_arg->peer_nss < 1) {
ath12k_warn(ar->ab,
- "invalid peer NSS %d\n", peer_arg.peer_nss);
+ "invalid peer NSS %d\n", peer_arg->peer_nss);
return -EINVAL;
}
- ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
return ret;
}
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
return -ETIMEDOUT;
}
@@ -4541,7 +4903,13 @@ static int ath12k_station_assoc(struct ath12k *ar,
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
- if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in station assoc\n");
+ return -EINVAL;
+ }
+
+ if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
if (ret)
@@ -4554,9 +4922,8 @@ static int ath12k_station_assoc(struct ath12k *ar,
if (reassoc)
return 0;
- ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->deflink.ht_cap,
- &sta->deflink.he_6ghz_capa);
+ ret = ath12k_setup_peer_smps(ar, arvif, arsta->addr,
+ &link_sta->ht_cap, &link_sta->he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -4574,7 +4941,7 @@ static int ath12k_station_assoc(struct ath12k *ar,
ret = ath12k_peer_assoc_qos_ap(ar, arvif, arsta);
if (ret) {
ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
return ret;
}
}
@@ -4582,33 +4949,25 @@ static int ath12k_station_assoc(struct ath12k *ar,
return 0;
}
-static int ath12k_station_disassoc(struct ath12k *ar,
- struct ath12k_link_vif *arvif,
- struct ath12k_link_sta *arsta)
+static int ath12k_mac_station_disassoc(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (!sta->wme) {
arvif->num_legacy_stations--;
- ret = ath12k_recalc_rtscts_prot(arvif);
- if (ret)
- return ret;
+ return ath12k_recalc_rtscts_prot(arvif);
}
- ret = ath12k_clear_peer_keys(arvif, sta->addr);
- if (ret) {
- ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
return 0;
}
static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
+ struct ieee80211_link_sta *link_sta;
struct ath12k *ar;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@@ -4619,7 +4978,6 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
u32 changed, bw, nss, smps, bw_prev;
int err, num_vht_rates;
const struct cfg80211_bitrate_mask *mask;
- struct ath12k_wmi_peer_assoc_arg peer_arg;
enum wmi_phy_mode peer_phymode;
struct ath12k_link_sta *arsta;
struct ieee80211_vif *vif;
@@ -4655,9 +5013,14 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask),
ath12k_mac_max_vht_nss(vht_mcs_mask)));
+ struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+ kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+ if (!peer_arg)
+ return;
+
if (changed & IEEE80211_RC_BW_CHANGED) {
- ath12k_peer_assoc_h_phymode(ar, arvif, arsta, &peer_arg);
- peer_phymode = peer_arg.peer_phymode;
+ ath12k_peer_assoc_h_phymode(ar, arvif, arsta, peer_arg);
+ peer_phymode = peer_arg->peer_phymode;
if (bw > bw_prev) {
/* Phymode shows maximum supported channel width, if we
@@ -4666,65 +5029,65 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
* WMI_PEER_CHWIDTH
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n",
- sta->addr, bw, bw_prev);
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arsta->addr, bw, bw_prev);
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
- sta->addr, peer_phymode, err);
+ arsta->addr, peer_phymode, err);
return;
}
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n",
- sta->addr, bw, err);
+ arsta->addr, bw, err);
} else {
/* When we downgrade bandwidth this will conflict with phymode
* and cause to trigger firmware crash. In this case we send
* WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n",
- sta->addr, bw, bw_prev);
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arsta->addr, bw, bw_prev);
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n",
- sta->addr, bw, err);
+ arsta->addr, bw, err);
return;
}
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
- sta->addr, peer_phymode, err);
+ arsta->addr, peer_phymode, err);
}
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
- sta->addr, nss);
+ arsta->addr, nss);
- err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_NSS, nss);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
- sta->addr, nss, err);
+ arsta->addr, nss, err);
}
if (changed & IEEE80211_RC_SMPS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
- sta->addr, smps);
+ arsta->addr, smps);
- err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE, smps);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
- sta->addr, smps, err);
+ arsta->addr, smps, err);
}
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
@@ -4743,7 +5106,14 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
* TODO: Check RATEMASK_CMDID to support auto rates selection
* across HT/VHT and for multiple VHT MCS support.
*/
- if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
} else {
@@ -4752,20 +5122,49 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
* other rates using peer_assoc command.
*/
ath12k_peer_assoc_prepare(ar, arvif, arsta,
- &peer_arg, true);
+ peer_arg, true);
- err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ err = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (err)
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
- sta->addr, arvif->vdev_id, err);
+ arsta->addr, arvif->vdev_id, err);
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
}
}
}
+static void ath12k_mac_free_unassign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ u8 link_id)
+{
+ struct ath12k_link_sta *arsta;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return;
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (WARN_ON(!arsta))
+ return;
+
+ ahsta->links_map &= ~BIT(link_id);
+ rcu_assign_pointer(ahsta->link[link_id], NULL);
+ synchronize_rcu();
+
+ if (arsta == &ahsta->deflink) {
+ arsta->link_id = ATH12K_INVALID_LINK_ID;
+ arsta->ahsta = NULL;
+ arsta->arvif = NULL;
+ return;
+ }
+
+ kfree(arsta);
+}
+
static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta)
{
@@ -4799,6 +5198,144 @@ static void ath12k_mac_dec_num_stations(struct ath12k_link_vif *arvif,
ar->num_stations--;
}
+static void ath12k_mac_station_post_remove(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k_peer *peer;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ ath12k_mac_dec_num_stations(arvif, arsta);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer && peer->sta == sta) {
+ ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+}
+
+static int ath12k_mac_station_unauthorize(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ath12k_peer *peer;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ /* Driver must clear the keys during the state change from
+ * IEEE80211_STA_AUTHORIZED to IEEE80211_STA_ASSOC, since after
+ * returning from here, mac80211 is going to delete the keys
+ * in __sta_info_destroy_part2(). This will ensure that the driver does
+ * not retain stale key references after mac80211 deletes the keys.
+ */
+ ret = ath12k_clear_peer_keys(arvif, arsta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_station_authorize(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ath12k_peer *peer;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret) {
+ ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ arsta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_station_remove(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ int ret = 0;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ wiphy_work_cancel(ar->ah->hw->wiphy, &arsta->update_wk);
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (sta->mlo)
+ return ret;
+
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+
+ ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+ if (sta->valid_links)
+ ath12k_mac_free_unassign_link_sta(ahvif->ah,
+ arsta->ahsta, arsta->link_id);
+
+ return ret;
+}
+
static int ath12k_mac_station_add(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta)
@@ -4806,7 +5343,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
struct ath12k_base *ab = ar->ab;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ath12k_wmi_peer_create_arg peer_param;
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -4824,34 +5361,35 @@ static int ath12k_mac_station_add(struct ath12k *ar,
}
peer_param.vdev_id = arvif->vdev_id;
- peer_param.peer_addr = sta->addr;
+ peer_param.peer_addr = arsta->addr;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ peer_param.ml_enabled = sta->mlo;
ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
if (ret) {
ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
goto free_peer;
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
if (ieee80211_vif_is_mesh(vif)) {
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret) {
ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
- sta->addr, ret);
+ arsta->addr, ret);
goto free_peer;
}
}
- ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, arsta->addr);
if (ret) {
ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
goto free_peer;
}
@@ -4868,7 +5406,9 @@ static int ath12k_mac_station_add(struct ath12k *ar,
return 0;
free_peer:
- ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
dec_num_station:
ath12k_mac_dec_num_stations(arvif, arsta);
exit:
@@ -4906,101 +5446,131 @@ static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
return bw;
}
-static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
+static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ struct ath12k_link_sta *arsta,
+ struct ath12k_vif *ahvif,
+ u8 link_id)
{
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k *ar;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ieee80211_link_sta *link_sta;
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!arsta || link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return -EINVAL;
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ if (!arvif)
+ return -EINVAL;
+
+ memset(arsta, 0, sizeof(*arsta));
+
+ link_sta = wiphy_dereference(ah->hw->wiphy, sta->link[link_id]);
+ if (!link_sta)
+ return -EINVAL;
+
+ ether_addr_copy(arsta->addr, link_sta->addr);
+
+ /* logical index of the link sta in order of creation */
+ arsta->link_idx = ahsta->num_peer++;
+
+ arsta->link_id = link_id;
+ ahsta->links_map |= BIT(arsta->link_id);
+ arsta->arvif = arvif;
+ arsta->ahsta = ahsta;
+ ahsta->ahvif = ahvif;
+
+ wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk);
+
+ rcu_assign_pointer(ahsta->link[link_id], arsta);
+
+ return 0;
+}
+
+static void ath12k_mac_ml_station_remove(struct ath12k_vif *ahvif,
+ struct ath12k_sta *ahsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ath12k_hw *ah = ahvif->ah;
struct ath12k_link_vif *arvif;
struct ath12k_link_sta *arsta;
- struct ath12k_peer *peer;
+ unsigned long links;
+ struct ath12k *ar;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ ath12k_peer_mlo_link_peers_delete(ahvif, ahsta);
+
+ /* validate link station removal and clear arsta links */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+
+ ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+ ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+ }
+
+ ath12k_peer_ml_delete(ah, sta);
+}
+
+static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k *ar = arvif->ar;
int ret = 0;
lockdep_assert_wiphy(hw->wiphy);
- arvif = &ahvif->deflink;
- arsta = &ahsta->deflink;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
+ arsta->link_id, arsta->addr, old_state, new_state);
- ar = ath12k_get_ar_by_vif(hw, vif);
- if (!ar) {
- WARN_ON_ONCE(1);
- return -EINVAL;
+ /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station
+ * from driver
+ */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ret = ath12k_mac_station_remove(ar, arvif, arsta);
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ goto exit;
+ }
}
+ /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE: Add new station to driver */
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
- memset(arsta, 0, sizeof(*arsta));
- rcu_assign_pointer(ahsta->link[0], arsta);
- /* TODO use appropriate link id once MLO support is added */
- arsta->link_id = ATH12K_DEFAULT_LINK_ID;
- ahsta->links_map = BIT(arsta->link_id);
- arsta->ahsta = ahsta;
- arsta->arvif = arvif;
- wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk);
-
- synchronize_rcu();
-
ret = ath12k_mac_station_add(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- } else if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- wiphy_work_cancel(hw->wiphy, &arsta->update_wk);
+ arsta->addr, arvif->vdev_id);
- if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
- ath12k_bss_disassoc(ar, arvif);
- ret = ath12k_mac_vdev_stop(arvif);
- if (ret)
- ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
- }
- ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
-
- ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
-
- ath12k_mac_dec_num_stations(arvif, arsta);
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer && peer->sta == sta) {
- ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
- vif->addr, arvif->vdev_id);
- peer->sta = NULL;
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
- }
- spin_unlock_bh(&ar->ab->base_lock);
-
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
-
- if (arsta->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
- rcu_assign_pointer(ahsta->link[arsta->link_id], NULL);
- synchronize_rcu();
- ahsta->links_map &= ~(BIT(arsta->link_id));
- arsta->link_id = ATH12K_INVALID_LINK_ID;
- arsta->ahsta = NULL;
- }
+ /* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for
+ * peer associated to AP/Mesh/ADHOC vif type.
+ */
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath12k_station_assoc(ar, arvif, arsta, false);
+ ret = ath12k_mac_station_assoc(ar, arvif, arsta, false);
if (ret)
ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
- sta->addr);
+ arsta->addr);
spin_lock_bh(&ar->data_lock);
@@ -5008,45 +5578,154 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
arsta->bw_prev = sta->deflink.bandwidth;
spin_unlock_bh(&ar->data_lock);
+
+ /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
+ * authorized
+ */
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = true;
-
- spin_unlock_bh(&ar->ab->base_lock);
+ ret = ath12k_mac_station_authorize(ar, arvif, arsta);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to authorize station: %pM\n",
+ arsta->addr);
- if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_AUTHORIZE,
- 1);
- if (ret)
- ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
- sta->addr, arvif->vdev_id, ret);
- }
+ /* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal,
+ * deauthorize it.
+ */
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = false;
+ ath12k_mac_station_unauthorize(ar, arvif, arsta);
- spin_unlock_bh(&ar->ab->base_lock);
+ /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTH: disassoc peer connected to
+ * AP/mesh/ADHOC vif type.
+ */
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath12k_station_disassoc(ar, arvif, arsta);
+ ret = ath12k_mac_station_disassoc(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
- sta->addr);
+ arsta->addr);
+ }
+
+exit:
+ return ret;
+}
+
+static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long valid_links;
+ u8 link_id = 0;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (ieee80211_vif_is_mld(vif) && sta->valid_links) {
+ WARN_ON(!sta->mlo && hweight16(sta->valid_links) != 1);
+ link_id = ffs(sta->valid_links) - 1;
}
+ /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE:
+ * New station add received. If this is a ML station then
+ * ahsta->links_map will be zero and sta->valid_links will be 1.
+ * Assign default link to the first link sta.
+ */
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(ahsta, 0, sizeof(*ahsta));
+
+ arsta = &ahsta->deflink;
+
+ /* ML sta */
+ if (sta->mlo && !ahsta->links_map &&
+ (hweight16(sta->valid_links) == 1)) {
+ ret = ath12k_peer_ml_create(ah, sta);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to create ML peer for sta %pM",
+ sta->addr);
+ goto exit;
+ }
+ }
+
+ ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif,
+ link_id);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable assign link %d for sta %pM",
+ link_id, sta->addr);
+ goto exit;
+ }
+
+ /* above arsta will get memset, hence do this after assign
+ * link sta
+ */
+ if (sta->mlo) {
+ arsta->is_assoc_link = true;
+ ahsta->assoc_link_id = link_id;
+ }
+ }
+
+ /* In the ML station scenario, activate all partner links once the
+ * client is transitioning to the associated state.
+ *
+ * FIXME: Ideally, this activation should occur when the client
+ * transitions to the authorized state. However, there are some
+ * issues with handling this in the firmware. Until the firmware
+ * can manage it properly, activate the links when the client is
+ * about to move to the associated state.
+ */
+ if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
+ old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC)
+ ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif));
+
+ /* Handle all the other state transitions in generic way */
+ valid_links = ahsta->links_map;
+ for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
+ /* some assumptions went wrong! */
+ if (WARN_ON(!arvif || !arsta))
+ continue;
+
+ /* vdev might be in deleted */
+ if (WARN_ON(!arvif->ar))
+ continue;
+
+ ret = ath12k_mac_handle_link_sta_state(hw, arvif, arsta,
+ old_state, new_state);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to move link sta %d of sta %pM from state %d to %d",
+ link_id, arsta->addr, old_state, new_state);
+ goto exit;
+ }
+ }
+
+ /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST:
+ * Remove the station from driver (handle ML sta here since that
+ * needs special handling. Normal sta will be handled in generic
+ * handler below
+ */
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST && sta->mlo)
+ ath12k_mac_ml_station_remove(ahvif, ahsta);
+
+ ret = 0;
+
+exit:
+ /* update the state if everything went well */
+ if (!ret)
+ ahsta->state = new_state;
+
return ret;
}
@@ -5054,16 +5733,22 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k *ar;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ u8 link_id;
int ret;
s16 txpwr;
lockdep_assert_wiphy(hw->wiphy);
- arvif = &ahvif->deflink;
+ /* TODO: use link id from mac80211 once that's implemented */
+ link_id = 0;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
@@ -5080,9 +5765,9 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
goto out;
}
- ar = ath12k_ah_to_ar(ah, 0);
+ ar = arvif->ar;
- ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_USE_FIXED_PWR, txpwr);
if (ret) {
ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
@@ -5094,62 +5779,69 @@ out:
return ret;
}
-static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_link_sta *link_sta,
- u32 changed)
+static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
{
struct ieee80211_sta *sta = link_sta->sta;
struct ath12k *ar;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_link_sta *arsta;
struct ath12k_link_vif *arvif;
struct ath12k_peer *peer;
u32 bw, smps;
- /* TODO: use proper link id once link sta specific rc update support is
- * available in mac80211.
- */
- u8 link_id = ATH12K_DEFAULT_LINK_ID;
-
- ar = ath12k_get_ar_by_vif(hw, vif);
- if (!ar) {
- WARN_ON_ONCE(1);
- return;
- }
rcu_read_lock();
- arvif = rcu_dereference(ahvif->link[link_id]);
+ arvif = rcu_dereference(ahvif->link[link_sta->link_id]);
if (!arvif) {
- ath12k_warn(ar->ab, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n",
- link_id, sta->addr);
+ ath12k_hw_warn(ah, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n",
+ link_sta->link_id, sta->addr);
rcu_read_unlock();
return;
}
- arsta = rcu_dereference(ahsta->link[link_id]);
+
+ ar = arvif->ar;
+
+ arsta = rcu_dereference(ahsta->link[link_sta->link_id]);
if (!arsta) {
rcu_read_unlock();
ath12k_warn(ar->ab, "mac sta rc update failed to fetch link sta on link id %u for peer %pM\n",
- link_id, sta->addr);
+ link_sta->link_id, sta->addr);
return;
}
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
rcu_read_unlock();
ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
return;
}
spin_unlock_bh(&ar->ab->base_lock);
+ if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ rcu_read_unlock();
+ return;
+ }
+
+ link_sta = rcu_dereference(sta->link[arsta->link_id]);
+ if (!link_sta) {
+ rcu_read_unlock();
+ ath12k_warn(ar->ab, "unable to access link sta in rc update for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
- sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss,
- sta->deflink.smps_mode);
+ arsta->addr, changed, link_sta->bandwidth, link_sta->rx_nss,
+ link_sta->smps_mode);
spin_lock_bh(&ar->data_lock);
@@ -5160,12 +5852,12 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
}
if (changed & IEEE80211_RC_NSS_CHANGED)
- arsta->nss = sta->deflink.rx_nss;
+ arsta->nss = link_sta->rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->deflink.smps_mode) {
+ switch (link_sta->smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -5177,8 +5869,8 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
smps = WMI_PEER_SMPS_DYNAMIC;
break;
default:
- ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
- sta->deflink.smps_mode, sta->addr);
+ ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM link %u\n",
+ link_sta->smps_mode, arsta->addr, link_sta->link_id);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -5195,6 +5887,110 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
rcu_read_unlock();
}
+static struct ath12k_link_sta *ath12k_mac_alloc_assign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ struct ath12k_vif *ahvif,
+ u8 link_id)
+{
+ struct ath12k_link_sta *arsta;
+ int ret;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (arsta)
+ return NULL;
+
+ arsta = kmalloc(sizeof(*arsta), GFP_KERNEL);
+ if (!arsta)
+ return NULL;
+
+ ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, link_id);
+ if (ret) {
+ kfree(arsta);
+ return NULL;
+ }
+
+ return arsta;
+}
+
+static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long valid_links;
+ struct ath12k *ar;
+ u8 link_id;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (!sta->valid_links)
+ return -EINVAL;
+
+ /* Firmware does not support removal of one of link stas. All sta
+ * would be removed during ML STA delete in sta_state(), hence link
+ * sta removal is not handled here.
+ */
+ if (new_links < old_links)
+ return 0;
+
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to add link for ml sta %pM", sta->addr);
+ return -EINVAL;
+ }
+
+ /* this op is expected only after initial sta insertion with default link */
+ if (WARN_ON(ahsta->links_map == 0))
+ return -EINVAL;
+
+ valid_links = new_links;
+ for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (ahsta->links_map & BIT(link_id))
+ continue;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = ath12k_mac_alloc_assign_link_sta(ah, ahsta, ahvif, link_id);
+
+ if (!arvif || !arsta) {
+ ath12k_hw_warn(ah, "Failed to alloc/assign link sta");
+ continue;
+ }
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ret = ath12k_mac_station_add(ar, arvif, arsta);
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool ath12k_mac_op_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links)
+{
+ /* TODO: Handle recovery case */
+
+ return true;
+}
+
static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif,
u16 ac, bool enable)
{
@@ -6054,6 +6850,8 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -6115,6 +6913,8 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
int buf_id;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ATH12K_SKB_CB(skb)->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
@@ -6169,15 +6969,18 @@ static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
-static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
+static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
+ struct ath12k_hw *ah = ar->ah;
struct ath12k_skb_cb *skb_cb;
struct ath12k_vif *ahvif;
struct ath12k_link_vif *arvif;
struct sk_buff *skb;
int ret;
+ lockdep_assert_wiphy(wiphy);
+
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
skb_cb = ATH12K_SKB_CB(skb);
if (!skb_cb->vif) {
@@ -6187,7 +6990,15 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
ahvif = ath12k_vif_to_ahvif(skb_cb->vif);
- arvif = &ahvif->deflink;
+ if (!(ahvif->links_map & BIT(skb_cb->link_id))) {
+ ath12k_warn(ar->ab,
+ "invalid linkid %u in mgmt over wmi tx with linkmap 0x%x\n",
+ skb_cb->link_id, ahvif->links_map);
+ ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+ continue;
+ }
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[skb_cb->link_id]);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
@@ -6197,8 +7008,9 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
} else {
ath12k_warn(ar->ab,
- "dropping mgmt frame for vdev %d, is_started %d\n",
+ "dropping mgmt frame for vdev %d link %u is_started %d\n",
arvif->vdev_id,
+ skb_cb->link_id,
arvif->is_started);
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
@@ -6232,7 +7044,7 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
+ wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
return 0;
}
@@ -6258,6 +7070,105 @@ static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
spin_unlock_bh(&ar->data_lock);
}
+/* Note: called under rcu_read_lock() */
+static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ u8 link, struct sk_buff *skb, u32 info_flags)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath12k_sta *ahsta;
+
+ /* Use the link id passed or the default vif link */
+ if (!sta) {
+ if (link != IEEE80211_LINK_UNSPECIFIED)
+ return link;
+
+ return ahvif->deflink.link_id;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+
+ /* Below translation ensures we pass proper A2 & A3 for non ML clients.
+ * Also it assumes for now support only for MLO AP in this path
+ */
+ if (!sta->mlo) {
+ link = ahsta->deflink.link_id;
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return link;
+
+ bss_conf = rcu_dereference(vif->link_conf[link]);
+ if (bss_conf) {
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+ if (!ieee80211_has_tods(hdr->frame_control) &&
+ !ieee80211_has_fromds(hdr->frame_control))
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+
+ return link;
+ }
+
+ /* enqueue eth enacap & data frames on primary link, FW does link
+ * selection and address translation.
+ */
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+ ieee80211_is_data(hdr->frame_control))
+ return ahsta->assoc_link_id;
+
+ /* 802.11 frame cases */
+ if (link == IEEE80211_LINK_UNSPECIFIED)
+ link = ahsta->deflink.link_id;
+
+ if (!ieee80211_is_mgmt(hdr->frame_control))
+ return link;
+
+ /* Perform address conversion for ML STA Tx */
+ bss_conf = rcu_dereference(vif->link_conf[link]);
+ link_sta = rcu_dereference(sta->link[link]);
+
+ if (bss_conf && link_sta) {
+ ether_addr_copy(hdr->addr1, link_sta->addr);
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+
+ return link;
+ }
+
+ if (bss_conf) {
+ /* In certain cases where a ML sta associated and added subset of
+ * links on which the ML AP is active, but now sends some frame
+ * (ex. Probe request) on a different link which is active in our
+ * MLD but was not added during previous association, we can
+ * still honor the Tx to that ML STA via the requested link.
+ * The control would reach here in such case only when that link
+ * address is same as the MLD address or in worst case clients
+ * used MLD address at TA wrongly which would have helped
+ * identify the ML sta object and pass it here.
+ * If the link address of that STA is different from MLD address,
+ * then the sta object would be NULL and control won't reach
+ * here but return at the start of the function itself with !sta
+ * check. Also this would not need any translation at hdr->addr1
+ * from MLD to link address since the RA is the MLD address
+ * (same as that link address ideally) already.
+ */
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+
+ return link;
+}
+
+/* Note: called under rcu_read_lock() */
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -6267,13 +7178,16 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = info->control.vif;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif = &ahvif->deflink;
- struct ath12k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_sta *sta = control->sta;
u32 info_flags = info->flags;
+ struct ath12k *ar;
bool is_prb_rsp;
+ u8 link_id;
int ret;
+ link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
memset(skb_cb, 0, sizeof(*skb_cb));
skb_cb->vif = vif;
@@ -6282,6 +7196,27 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
+ /* handle only for MLO case, use deflink for non MLO case */
+ if (ieee80211_vif_is_mld(vif)) {
+ link_id = ath12k_mac_get_tx_link(sta, vif, link_id, skb, info_flags);
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ } else {
+ link_id = 0;
+ }
+
+ arvif = rcu_dereference(ahvif->link[link_id]);
+ if (!arvif || !arvif->ar) {
+ ath12k_warn(ahvif->ah, "failed to find arvif link id %u for frame transmission",
+ link_id);
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ ar = arvif->ar;
+ skb_cb->link_id = link_id;
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
@@ -6309,10 +7244,12 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
void ath12k_mac_drain_tx(struct ath12k *ar)
{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
- cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
ath12k_mgmt_over_wmi_tx_purge(ar);
}
@@ -6429,6 +7366,8 @@ static void ath12k_drain_tx(struct ath12k_hw *ah)
struct ath12k *ar;
int i;
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
for_each_ar(ah, ar, i)
ath12k_mac_drain_tx(ar);
}
@@ -6554,9 +7493,10 @@ static void ath12k_mac_stop(struct ath12k *ar)
ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
ret);
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
cancel_delayed_work_sync(&ar->scan.timeout);
+ wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->scan.vdev_clean_wk);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->rfkill_work);
@@ -6622,6 +7562,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif;
+ struct ieee80211_bss_conf *link_conf;
struct ath12k *ar = arvif->ar;
struct ath12k_link_vif *tx_arvif;
struct ath12k_vif *tx_ahvif;
@@ -6629,10 +7570,17 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
if (!tx_vif)
return 0;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in set mbssid params for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
tx_arvif = &tx_ahvif->deflink;
- if (ahvif->vif->bss_conf.nontransmitted) {
+ if (link_conf->nontransmitted) {
if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
return -EINVAL;
@@ -6644,7 +7592,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
return -EINVAL;
}
- if (ahvif->vif->bss_conf.ema_ap)
+ if (link_conf->ema_ap)
*flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE;
return 0;
@@ -6658,6 +7606,8 @@ static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
struct ath12k_vif *ahvif = arvif->ahvif;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
arg->if_id = arvif->vdev_id;
arg->type = ahvif->vdev_type;
arg->subtype = ahvif->vdev_subtype;
@@ -6689,6 +7639,17 @@ static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
}
arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
+
+ if (ath12k_mac_is_ml_arvif(arvif)) {
+ if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) {
+ ath12k_warn(ar->ab, "too many MLO links during setting up vdev: %d",
+ ahvif->vif->valid_links);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(arg->mld_addr, ahvif->vif->addr);
+ }
+
return 0;
}
@@ -6839,16 +7800,25 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
- struct ath12k_wmi_peer_create_arg peer_param;
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
struct ieee80211_bss_conf *link_conf;
u32 param_id, param_value;
u16 nss;
int i;
int ret, vdev_id;
+ u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
- link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[arvif->link_id]);
+ /* If no link is active and scan vdev is requested
+ * use a default link conf for scan address purpose.
+ */
+ if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links)
+ link_id = ffs(vif->valid_links) - 1;
+ else
+ link_id = arvif->link_id;
+
+ link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
if (!link_conf) {
ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
vif->addr, arvif->link_id);
@@ -6999,7 +7969,7 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
break;
}
- arvif->txpower = vif->bss_conf.txpower;
+ arvif->txpower = link_conf->txpower;
ret = ath12k_mac_txpower_recalc(ar);
if (ret)
goto err_peer_del;
@@ -7034,8 +8004,7 @@ err_peer_del:
ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
arvif->bssid);
if (ret)
- /* KVALO: why not goto err? */
- return ret;
+ goto err_vdev_del;
ar->num_peers--;
}
@@ -7129,7 +8098,9 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_link_vif *scan_arvif;
struct ath12k_hw *ah = hw->priv;
struct ath12k *ar;
struct ath12k_base *ab;
@@ -7148,6 +8119,19 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
if (!ar)
return NULL;
+ /* cleanup the scan vdev if we are done scan on that ar
+ * and now we want to create for actual usage.
+ */
+ if (ieee80211_vif_is_mld(vif)) {
+ scan_arvif = wiphy_dereference(hw->wiphy,
+ ahvif->link[ATH12K_DEFAULT_SCAN_LINK]);
+ if (scan_arvif && scan_arvif->ar == ar) {
+ ar->scan.arvif = NULL;
+ ath12k_mac_remove_link_interface(hw, scan_arvif);
+ ath12k_mac_unassign_link_vif(scan_arvif);
+ }
+ }
+
if (arvif->ar) {
/* This is not expected really */
if (WARN_ON(!arvif->is_created)) {
@@ -7173,9 +8157,6 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
ab = ar->ab;
- if (arvif->is_created)
- goto flush;
-
/* Assign arvif again here since previous radio switch block
* would've unassigned and cleared it.
*/
@@ -7186,6 +8167,9 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
goto unlock;
}
+ if (arvif->is_created)
+ goto flush;
+
if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
TARGET_NUM_VDEVS);
@@ -7243,14 +8227,9 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
- /* For non-ml vifs, vif->addr is the actual vdev address but for
- * ML vif link(link BSSID) address is the vdev address and it can be a
- * different one from vif->addr (i.e ML address).
- * Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
+ /* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
* will not know if this interface is an ML vif at this point.
*/
- ath12k_mac_assign_vif_to_vdev(hw, arvif, NULL);
-
return 0;
}
@@ -7348,11 +8327,12 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
+ struct ath12k *ar;
u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
- for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ for (link_id = 0; link_id < ATH12K_NUM_MAX_LINKS; link_id++) {
/* if we cached some config but never received assign chanctx,
* free the allocated cache.
*/
@@ -7361,6 +8341,31 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
if (!arvif || !arvif->is_created)
continue;
+ ar = arvif->ar;
+
+ /* Scan abortion is in progress since before this, cancel_hw_scan()
+ * is expected to be executed. Since link is anyways going to be removed
+ * now, just cancel the worker and send the scan aborted to user space
+ */
+ if (ar->scan.arvif == arvif) {
+ wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.arvif = NULL;
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(ar->ah->hw, &info);
+ }
+
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
ath12k_mac_remove_link_interface(hw, arvif);
ath12k_mac_unassign_link_vif(arvif);
}
@@ -7453,20 +8458,26 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
return ret;
}
-static int ath12k_mac_ampdu_action(struct ath12k_link_vif *arvif,
- struct ieee80211_ampdu_params *params)
+static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
- struct ath12k *ar = arvif->ar;
+ struct ath12k *ar;
int ret = -EINVAL;
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+ if (!ar)
+ return -EINVAL;
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
- ret = ath12k_dp_rx_ampdu_start(ar, params);
+ ret = ath12k_dp_rx_ampdu_start(ar, params, link_id);
break;
case IEEE80211_AMPDU_RX_STOP:
- ret = ath12k_dp_rx_ampdu_stop(ar, params);
+ ret = ath12k_dp_rx_ampdu_stop(ar, params, link_id);
break;
case IEEE80211_AMPDU_TX_START:
case IEEE80211_AMPDU_TX_STOP_CONT:
@@ -7480,6 +8491,10 @@ static int ath12k_mac_ampdu_action(struct ath12k_link_vif *arvif,
break;
}
+ if (ret)
+ ath12k_warn(ar->ab, "unable to perform ampdu action %d for vif %pM link %u ret %d\n",
+ params->action, vif->addr, link_id, ret);
+
return ret;
}
@@ -7487,27 +8502,24 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif;
+ struct ieee80211_sta *sta = params->sta;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ unsigned long links_map = ahsta->links_map;
int ret = -EINVAL;
+ u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
- ar = ath12k_get_ar_by_vif(hw, vif);
- if (!ar)
- return -EINVAL;
-
- ar = ath12k_ah_to_ar(ah, 0);
- arvif = &ahvif->deflink;
+ if (WARN_ON(!links_map))
+ return ret;
- ret = ath12k_mac_ampdu_action(arvif, params);
- if (ret)
- ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
- ar->pdev_idx, params->action, ret);
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ ret = ath12k_mac_ampdu_action(hw, vif, params, link_id);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
@@ -7627,6 +8639,58 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
return down_mode;
}
+static void
+ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
+ struct wmi_ml_arg *ml_arg)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct wmi_ml_partner_info *partner_info;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k_link_vif *arvif_p;
+ unsigned long links;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+ if (!ath12k_mac_is_ml_arvif(arvif))
+ return;
+
+ if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS)
+ return;
+
+ ml_arg->enabled = true;
+
+ /* Driver always add a new link via VDEV START, FW takes
+ * care of internally adding this link to existing
+ * link vdevs which are advertised as partners below
+ */
+ ml_arg->link_add = true;
+ partner_info = ml_arg->partner_info;
+
+ links = ahvif->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif_p = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+
+ if (WARN_ON(!arvif_p))
+ continue;
+
+ if (arvif == arvif_p)
+ continue;
+
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ ahvif->vif->link_conf[arvif_p->link_id]);
+
+ if (!link_conf)
+ continue;
+
+ partner_info->vdev_id = arvif_p->vdev_id;
+ partner_info->hw_link_id = arvif_p->ar->pdev->hw_link_id;
+ ether_addr_copy(partner_info->addr, link_conf->addr);
+ ml_arg->num_partner_links++;
+ partner_info++;
+ }
+}
+
static int
ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx,
@@ -7636,11 +8700,20 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
struct ath12k_base *ab = ar->ab;
struct wmi_vdev_start_req_arg arg = {};
const struct cfg80211_chan_def *chandef = &ctx->def;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ath12k_vif *ahvif = arvif->ahvif;
- int he_support = ahvif->vif->bss_conf.he_support;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int dfs_cac_time;
int ret;
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in vdev start for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
reinit_completion(&ar->vdev_setup_done);
@@ -7658,9 +8731,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
chandef->chan->band,
ahvif->vif->type);
arg.min_power = 0;
- arg.max_power = chandef->chan->max_power * 2;
- arg.max_reg_power = chandef->chan->max_reg_power * 2;
- arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+ arg.max_power = chandef->chan->max_power;
+ arg.max_reg_power = chandef->chan->max_reg_power;
+ arg.max_antenna_gain = chandef->chan->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
@@ -7693,7 +8766,7 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
spin_unlock_bh(&ab->base_lock);
/* TODO: Notify if secondary 80Mhz also needs radar detection */
- if (he_support) {
+ if (link_conf->he_support) {
ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
if (ret) {
ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
@@ -7705,6 +8778,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+ if (!restart)
+ ath12k_mac_mlo_get_vdev_args(arvif, &arg.ml);
+
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n",
arg.vdev_id, arg.freq,
@@ -7728,20 +8804,20 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
ahvif->vif->addr, arvif->vdev_id);
- /* Enable CAC Flag in the driver by checking the channel DFS cac time,
- * i.e dfs_cac_ms value which will be valid only for radar channels
- * and state as NL80211_DFS_USABLE which indicates CAC needs to be
- * done before channel usage. This flags is used to drop rx packets.
+ /* Enable CAC Running Flag in the driver by checking all sub-channel's DFS
+ * state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flag is used to drop rx packets.
* during CAC.
*/
/* TODO: Set the flag for other interface types as required */
- if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP &&
- chandef->chan->dfs_cac_ms &&
- chandef->chan->dfs_state == NL80211_DFS_USABLE) {
- set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled &&
+ cfg80211_chandef_dfs_usable(hw->wiphy, chandef)) {
+ set_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
+ dfs_cac_time = cfg80211_chandef_dfs_cac_time(hw->wiphy, chandef);
+
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "CAC Started in chan_freq %d for vdev %d\n",
- arg.freq, arg.vdev_id);
+ "CAC started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n",
+ dfs_cac_time, arg.freq, arg.band_center_freq1, arg.vdev_id);
}
ret = ath12k_mac_set_txbf_conf(arvif);
@@ -7778,19 +8854,32 @@ ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ u8 link_id;
lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
- arvif = &ahvif->deflink;
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ continue;
- if (arvif->ar != arg->ar)
- return;
+ if (arvif->ar != arg->ar)
+ continue;
- if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
- return;
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ vif->link_conf[link_id]);
+ if (WARN_ON(!link_conf))
+ continue;
+
+ if (rcu_access_pointer(link_conf->chanctx_conf) != arg->ctx)
+ continue;
- arg->n_vifs++;
+ arg->n_vifs++;
+ }
}
static void
@@ -7799,27 +8888,41 @@ ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_chanctx_conf *ctx;
struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ u8 link_id;
lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
- arvif = &ahvif->deflink;
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ continue;
- if (arvif->ar != arg->ar)
- return;
+ if (arvif->ar != arg->ar)
+ continue;
- ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
- if (ctx != arg->ctx)
- return;
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ vif->link_conf[arvif->link_id]);
+ if (WARN_ON(!link_conf))
+ continue;
- if (WARN_ON(arg->next_vif == arg->n_vifs))
- return;
+ ctx = rcu_access_pointer(link_conf->chanctx_conf);
+ if (ctx != arg->ctx)
+ continue;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
- arg->vifs[arg->next_vif].vif = vif;
- arg->vifs[arg->next_vif].old_ctx = ctx;
- arg->vifs[arg->next_vif].new_ctx = ctx;
- arg->next_vif++;
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->vifs[arg->next_vif].link_conf = link_conf;
+ arg->next_vif++;
+ }
}
static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width)
@@ -7879,10 +8982,12 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
int n_vifs)
{
struct ath12k_wmi_vdev_up_params params = {};
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_base *ab = ar->ab;
struct ath12k_link_vif *arvif;
struct ieee80211_vif *vif;
struct ath12k_vif *ahvif;
+ u8 link_id;
int ret;
int i;
bool monitor_vif = false;
@@ -7892,7 +8997,10 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
for (i = 0; i < n_vifs; i++) {
vif = vifs[i].vif;
ahvif = ath12k_vif_to_ahvif(vif);
- arvif = &ahvif->deflink;
+ link_conf = vifs[i].link_conf;
+ link_id = link_conf->link_id;
+ arvif = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahvif->link[link_id]);
if (vif->type == NL80211_IFTYPE_MONITOR)
monitor_vif = true;
@@ -7945,13 +9053,13 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
if (vif->mbssid_tx_vif) {
- struct ath12k_vif *ahvif =
+ struct ath12k_vif *tx_ahvif =
ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
- struct ath12k_link_vif *arvif = &ahvif->deflink;
+ struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
- params.tx_bssid = arvif->bssid;
- params.nontx_profile_idx = vif->bss_conf.bssid_index;
- params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator;
+ params.tx_bssid = tx_arvif->bssid;
+ params.nontx_profile_idx = link_conf->bssid_index;
+ params.nontx_profile_cnt = 1 << link_conf->bssid_indicator;
}
ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
@@ -8099,11 +9207,8 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
return -ENOMEM;
}
- if (!arvif->is_started) {
- ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
- if (!ar)
- return -EINVAL;
- } else {
+ ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
+ if (!ar) {
ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started",
vif->addr, link_id);
return -EINVAL;
@@ -8348,6 +9453,8 @@ static int ath12k_mac_flush(struct ath12k *ar)
int ath12k_mac_wait_tx_complete(struct ath12k *ar)
{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ath12k_mac_drain_tx(ar);
return ath12k_mac_flush(ar);
}
@@ -8356,7 +9463,11 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
u32 queues, bool drop)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_vif *ahvif;
+ unsigned long links;
struct ath12k *ar;
+ u8 link_id;
int i;
lockdep_assert_wiphy(hw->wiphy);
@@ -8371,12 +9482,18 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
return;
}
- ar = ath12k_get_ar_by_vif(hw, vif);
+ for_each_ar(ah, ar, i)
+ wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
- if (!ar)
- return;
+ ahvif = ath12k_vif_to_ahvif(vif);
+ links = ahvif->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!(arvif && arvif->ar))
+ continue;
- ath12k_mac_flush(ar);
+ ath12k_mac_flush(arvif->ar);
+ }
}
static int
@@ -8575,10 +9692,11 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
{
struct ath12k_link_vif *arvif = data;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
+ struct ath12k_link_sta *arsta;
struct ath12k *ar = arvif->ar;
- if (arsta->arvif != arvif)
+ arsta = rcu_dereference(ahsta->link[arvif->link_id]);
+ if (!arsta || arsta->arvif != arvif)
return;
spin_lock_bh(&ar->data_lock);
@@ -8593,21 +9711,26 @@ static void ath12k_mac_disable_peer_fixed_rate(void *data,
{
struct ath12k_link_vif *arvif = data;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
+ struct ath12k_link_sta *arsta;
struct ath12k *ar = arvif->ar;
int ret;
- if (arsta->arvif != arvif)
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[arvif->link_id]);
+
+ if (!arsta || arsta->arvif != arvif)
return;
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
WMI_FIXED_RATE_NONE);
if (ret)
ath12k_warn(ar->ab,
"failed to disable peer fixed rate for STA %pM ret %d\n",
- sta->addr, ret);
+ arsta->addr, ret);
}
static int
@@ -8950,6 +10073,7 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
ath12k_scan_abort(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
+ wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
return 0;
}
@@ -8962,7 +10086,6 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k_wmi_scan_req_arg arg;
struct ath12k_link_vif *arvif;
struct ath12k *ar;
u32 scan_time_msec;
@@ -8973,10 +10096,8 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
lockdep_assert_wiphy(hw->wiphy);
ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq);
- if (!ar) {
- ret = -EINVAL;
- goto exit;
- }
+ if (!ar)
+ return -EINVAL;
/* check if any of the links of ML VIF is already started on
* radio(ar) correpsondig to given scan frequency and use it,
@@ -8995,15 +10116,11 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
* always on the same band for the vif
*/
if (arvif->is_created) {
- if (WARN_ON(!arvif->ar)) {
- ret = -EINVAL;
- goto exit;
- }
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
- if (ar != arvif->ar && arvif->is_started) {
- ret = -EBUSY;
- goto exit;
- }
+ if (ar != arvif->ar && arvif->is_started)
+ return -EBUSY;
if (ar != arvif->ar) {
ath12k_mac_remove_link_interface(hw, arvif);
@@ -9020,7 +10137,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
if (ret) {
ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n",
ret);
- goto exit;
+ return ret;
}
}
@@ -9033,7 +10150,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
reinit_completion(&ar->scan.on_channel);
ar->scan.state = ATH12K_SCAN_STARTING;
ar->scan.is_roc = true;
- ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.arvif = arvif;
ar->scan.roc_freq = chan->center_freq;
ar->scan.roc_notify = true;
ret = 0;
@@ -9048,37 +10165,41 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
if (ret)
- goto exit;
+ return ret;
scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2;
- memset(&arg, 0, sizeof(arg));
- ath12k_wmi_start_scan_init(ar, &arg);
- arg.num_chan = 1;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
- if (!arg.chan_list) {
- ret = -ENOMEM;
- goto exit;
- }
+ struct ath12k_wmi_scan_req_arg *arg __free(kfree) =
+ kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH12K_SCAN_ID;
- arg.chan_list[0] = chan->center_freq;
- arg.dwell_time_active = scan_time_msec;
- arg.dwell_time_passive = scan_time_msec;
- arg.max_scan_time = scan_time_msec;
- arg.scan_f_passive = 1;
- arg.burst_duration = duration;
-
- ret = ath12k_start_scan(ar, &arg);
+ ath12k_wmi_start_scan_init(ar, arg);
+ arg->num_chan = 1;
+
+ u32 *chan_list __free(kfree) = kcalloc(arg->num_chan, sizeof(*chan_list),
+ GFP_KERNEL);
+ if (!chan_list)
+ return -ENOMEM;
+
+ arg->chan_list = chan_list;
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH12K_SCAN_ID;
+ arg->chan_list[0] = chan->center_freq;
+ arg->dwell_time_active = scan_time_msec;
+ arg->dwell_time_passive = scan_time_msec;
+ arg->max_scan_time = scan_time_msec;
+ arg->scan_f_passive = 1;
+ arg->burst_duration = duration;
+
+ ret = ath12k_start_scan(ar, arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH12K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
- goto free_chan_list;
+ return ret;
}
ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
@@ -9087,20 +10208,13 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
ret = ath12k_scan_stop(ar);
if (ret)
ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
- ret = -ETIMEDOUT;
- goto free_chan_list;
+ return -ETIMEDOUT;
}
ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
msecs_to_jiffies(duration));
- ret = 0;
-
-free_chan_list:
- kfree(arg.chan_list);
-
-exit:
- return ret;
+ return 0;
}
static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
@@ -9159,7 +10273,7 @@ static const struct ieee80211_ops ath12k_ops = {
.set_rekey_data = ath12k_mac_op_set_rekey_data,
.sta_state = ath12k_mac_op_sta_state,
.sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
- .link_sta_rc_update = ath12k_mac_op_sta_rc_update,
+ .link_sta_rc_update = ath12k_mac_op_link_sta_rc_update,
.conf_tx = ath12k_mac_op_conf_tx,
.set_antenna = ath12k_mac_op_set_antenna,
.get_antenna = ath12k_mac_op_get_antenna,
@@ -9178,7 +10292,8 @@ static const struct ieee80211_ops ath12k_ops = {
.sta_statistics = ath12k_mac_op_sta_statistics,
.remain_on_channel = ath12k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
-
+ .change_sta_links = ath12k_mac_op_change_sta_links,
+ .can_activate_links = ath12k_mac_op_can_activate_links,
#ifdef CONFIG_PM
.suspend = ath12k_wow_op_suspend,
.resume = ath12k_wow_op_resume,
@@ -9201,8 +10316,8 @@ static void ath12k_mac_update_ch_list(struct ath12k *ar,
band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
- ar->freq_low = freq_low;
- ar->freq_high = freq_high;
+ ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
+ ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
}
static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
@@ -9334,14 +10449,20 @@ static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
{
struct ath12k *ar;
int i;
- u16 interface_modes, mode;
- bool is_enable = true;
+ u16 interface_modes, mode = 0;
+ bool is_enable = false;
+
+ if (type == NL80211_IFTYPE_MESH_POINT) {
+ if (IS_ENABLED(CONFIG_MAC80211_MESH))
+ mode = BIT(type);
+ } else {
+ mode = BIT(type);
+ }
- mode = BIT(type);
for_each_ar(ah, ar, i) {
interface_modes = ar->ab->hw_params->interface_modes;
- if (!(interface_modes & mode)) {
- is_enable = false;
+ if (interface_modes & mode) {
+ is_enable = true;
break;
}
}
@@ -9349,23 +10470,20 @@ static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
return is_enable;
}
-static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+static int
+ath12k_mac_setup_radio_iface_comb(struct ath12k *ar,
+ struct ieee80211_iface_combination *comb)
{
- struct wiphy *wiphy = ah->hw->wiphy;
- struct ieee80211_iface_combination *combinations;
+ u16 interface_modes = ar->ab->hw_params->interface_modes;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
bool ap, mesh, p2p;
- ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
- p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
+ ap = interface_modes & BIT(NL80211_IFTYPE_AP);
+ p2p = interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
-
- combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
- if (!combinations)
- return -ENOMEM;
+ (interface_modes & BIT(NL80211_IFTYPE_MESH_POINT));
if ((ap || mesh) && !p2p) {
n_limits = 2;
@@ -9382,10 +10500,8 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
}
limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
- if (!limits) {
- kfree(combinations);
+ if (!limits)
return -ENOMEM;
- }
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
@@ -9401,26 +10517,181 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
if (p2p) {
limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_P2P_GO);
limits[2].max = 1;
limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
}
- combinations[0].limits = limits;
- combinations[0].n_limits = n_limits;
- combinations[0].max_interfaces = max_interfaces;
- combinations[0].num_different_channels = 1;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80);
+ comb[0].limits = limits;
+ comb[0].n_limits = n_limits;
+ comb[0].max_interfaces = max_interfaces;
+ comb[0].num_different_channels = 1;
+ comb[0].beacon_int_infra_match = true;
+ comb[0].beacon_int_min_gcd = 100;
+ comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80);
+
+ return 0;
+}
+
+static int
+ath12k_mac_setup_global_iface_comb(struct ath12k_hw *ah,
+ struct wiphy_radio *radio,
+ u8 n_radio,
+ struct ieee80211_iface_combination *comb)
+{
+ const struct ieee80211_iface_combination *iter_comb;
+ struct ieee80211_iface_limit *limits;
+ int i, j, n_limits;
+ bool ap, mesh, p2p;
+
+ if (!n_radio)
+ return 0;
+
+ ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
+ p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
+ mesh = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
+
+ if ((ap || mesh) && !p2p)
+ n_limits = 2;
+ else if (p2p)
+ n_limits = 3;
+ else
+ n_limits = 1;
+
+ limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+ if (!limits)
+ return -ENOMEM;
+
+ for (i = 0; i < n_radio; i++) {
+ iter_comb = radio[i].iface_combinations;
+ for (j = 0; j < iter_comb->n_limits && j < n_limits; j++) {
+ limits[j].types |= iter_comb->limits[j].types;
+ limits[j].max += iter_comb->limits[j].max;
+ }
+
+ comb->max_interfaces += iter_comb->max_interfaces;
+ comb->num_different_channels += iter_comb->num_different_channels;
+ comb->radar_detect_widths |= iter_comb->radar_detect_widths;
+ }
+
+ comb->limits = limits;
+ comb->n_limits = n_limits;
+ comb->beacon_int_infra_match = true;
+ comb->beacon_int_min_gcd = 100;
+
+ return 0;
+}
+
+static
+void ath12k_mac_cleanup_iface_comb(const struct ieee80211_iface_combination *iface_comb)
+{
+ kfree(iface_comb[0].limits);
+ kfree(iface_comb);
+}
+
+static void ath12k_mac_cleanup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct wiphy *wiphy = ah->hw->wiphy;
+ const struct wiphy_radio *radio;
+ int i;
+
+ if (wiphy->n_radio > 0) {
+ radio = wiphy->radio;
+ for (i = 0; i < wiphy->n_radio; i++)
+ ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations);
+
+ kfree(wiphy->radio);
+ }
+
+ ath12k_mac_cleanup_iface_comb(wiphy->iface_combinations);
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct ieee80211_iface_combination *combinations, *comb;
+ struct wiphy *wiphy = ah->hw->wiphy;
+ struct wiphy_radio *radio;
+ struct ath12k *ar;
+ int i, ret;
+
+ combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ if (ah->num_radio == 1) {
+ ret = ath12k_mac_setup_radio_iface_comb(&ah->radio[0],
+ combinations);
+ if (ret) {
+ ath12k_hw_warn(ah, "failed to setup radio interface combinations for one radio: %d",
+ ret);
+ goto err_free_combinations;
+ }
+
+ goto out;
+ }
+
+ /* there are multiple radios */
+
+ radio = kcalloc(ah->num_radio, sizeof(*radio), GFP_KERNEL);
+ if (!radio) {
+ ret = -ENOMEM;
+ goto err_free_combinations;
+ }
+
+ for_each_ar(ah, ar, i) {
+ comb = kzalloc(sizeof(*comb), GFP_KERNEL);
+ if (!comb) {
+ ret = -ENOMEM;
+ goto err_free_radios;
+ }
+
+ ret = ath12k_mac_setup_radio_iface_comb(ar, comb);
+ if (ret) {
+ ath12k_hw_warn(ah, "failed to setup radio interface combinations for radio %d: %d",
+ i, ret);
+ kfree(comb);
+ goto err_free_radios;
+ }
+
+ radio[i].freq_range = &ar->freq_range;
+ radio[i].n_freq_range = 1;
+
+ radio[i].iface_combinations = comb;
+ radio[i].n_iface_combinations = 1;
+ }
+
+ ret = ath12k_mac_setup_global_iface_comb(ah, radio, ah->num_radio, combinations);
+ if (ret) {
+ ath12k_hw_warn(ah, "failed to setup global interface combinations: %d",
+ ret);
+ goto err_free_all_radios;
+ }
+ wiphy->radio = radio;
+ wiphy->n_radio = ah->num_radio;
+
+out:
wiphy->iface_combinations = combinations;
wiphy->n_iface_combinations = 1;
return 0;
+
+err_free_all_radios:
+ i = ah->num_radio;
+
+err_free_radios:
+ while (i--)
+ ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations);
+
+ kfree(radio);
+
+err_free_combinations:
+ kfree(combinations);
+
+ return ret;
}
static const u8 ath12k_if_types_ext_capa[] = {
@@ -9444,7 +10715,7 @@ static const u8 ath12k_if_types_ext_capa_ap[] = {
[10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
};
-static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
+static struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
{
.extended_capabilities = ath12k_if_types_ext_capa,
.extended_capabilities_mask = ath12k_if_types_ext_capa,
@@ -9461,6 +10732,8 @@ static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
.extended_capabilities_mask = ath12k_if_types_ext_capa_ap,
.extended_capabilities_len =
sizeof(ath12k_if_types_ext_capa_ap),
+ .eml_capabilities = 0,
+ .mld_capa_and_ops = 0,
},
};
@@ -9477,7 +10750,6 @@ static void ath12k_mac_cleanup_unregister(struct ath12k *ar)
static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
- struct wiphy *wiphy = hw->wiphy;
struct ath12k *ar;
int i;
@@ -9491,8 +10763,7 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
for_each_ar(ah, ar, i)
ath12k_mac_cleanup_unregister(ar);
- kfree(wiphy->iface_combinations[0].limits);
- kfree(wiphy->iface_combinations);
+ ath12k_mac_cleanup_iface_combinations(ah);
SET_IEEE80211_DEV(hw, NULL);
}
@@ -9567,7 +10838,10 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
if (ret)
goto err_cleanup_unregister;
- ht_cap &= ht_cap_info;
+ /* 6 GHz does not support HT Cap, hence do not consider it */
+ if (!ar->supports_6ghz)
+ ht_cap &= ht_cap_info;
+
wiphy->max_ap_assoc_sta += ar->max_num_stations;
/* Advertise the max antenna support of all radios, driver can handle
@@ -9631,7 +10905,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(hw, REPORTS_LOW_ACK);
- if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || is_6ghz) {
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -9647,7 +10921,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
* handle it when the ht capability different for each band.
*/
if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
- (ar->supports_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
+ (is_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -9669,6 +10943,15 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
*/
wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+ /* Copy over MLO related capabilities received from
+ * WMI_SERVICE_READY_EXT2_EVENT if single_chip_mlo_supp is set.
+ */
+ if (ab->ag->mlo_capable) {
+ ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap;
+ ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap;
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ }
+
hw->queues = ATH12K_HW_MAX_QUEUES;
wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
@@ -9722,13 +11005,13 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ret = ath12k_wow_init(ar);
if (ret) {
ath12k_warn(ar->ab, "failed to init wow: %d\n", ret);
- goto err_free_if_combs;
+ goto err_cleanup_if_combs;
}
ret = ieee80211_register_hw(hw);
if (ret) {
ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
- goto err_free_if_combs;
+ goto err_cleanup_if_combs;
}
if (is_monitor_disable)
@@ -9758,9 +11041,8 @@ err_unregister_hw:
ieee80211_unregister_hw(hw);
-err_free_if_combs:
- kfree(wiphy->iface_combinations[0].limits);
- kfree(wiphy->iface_combinations);
+err_cleanup_if_combs:
+ ath12k_mac_cleanup_iface_combinations(ah);
err_complete_cleanup_unregister:
i = ah->num_radio;
@@ -9794,6 +11076,7 @@ static void ath12k_mac_setup(struct ath12k *ar)
ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+ ar->scan.arvif = NULL;
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->arvifs);
@@ -9808,40 +11091,179 @@ static void ath12k_mac_setup(struct ath12k *ar)
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
+ init_completion(&ar->mlo_setup_done);
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+ wiphy_work_init(&ar->scan.vdev_clean_wk, ath12k_scan_vdev_clean_work);
INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
}
-int ath12k_mac_register(struct ath12k_base *ab)
+static int __ath12k_mac_mlo_setup(struct ath12k *ar)
{
- struct ath12k_hw *ah;
- int i;
+ u8 num_link = 0, partner_link_id[ATH12K_GROUP_MAX_RADIO] = {};
+ struct ath12k_base *partner_ab, *ab = ar->ab;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct wmi_mlo_setup_arg mlo = {};
+ struct ath12k_pdev *pdev;
+ unsigned long time_left;
+ int i, j, ret;
+
+ lockdep_assert_held(&ag->mutex);
+
+ reinit_completion(&ar->mlo_setup_done);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+
+ for (j = 0; j < partner_ab->num_radios; j++) {
+ pdev = &partner_ab->pdevs[j];
+
+ /* Avoid the self link */
+ if (ar == pdev->ar)
+ continue;
+
+ partner_link_id[num_link] = pdev->hw_link_id;
+ num_link++;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "device %d pdev %d hw_link_id %d num_link %d\n",
+ i, j, pdev->hw_link_id, num_link);
+ }
+ }
+
+ mlo.group_id = cpu_to_le32(ag->id);
+ mlo.partner_link_id = partner_link_id;
+ mlo.num_partner_links = num_link;
+ ar->mlo_setup_status = 0;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "group id %d num_link %d\n", ag->id, num_link);
+
+ ret = ath12k_wmi_mlo_setup(ar, &mlo);
+ if (ret) {
+ ath12k_err(ab, "failed to send setup MLO WMI command for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->mlo_setup_done,
+ WMI_MLO_CMD_TIMEOUT_HZ);
+
+ if (!time_left || ar->mlo_setup_status)
+ return ar->mlo_setup_status ? : -ETIMEDOUT;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo setup done for pdev %d\n", ar->pdev_idx);
+
+ return 0;
+}
+
+static int __ath12k_mac_mlo_teardown(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
int ret;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
return 0;
- /* Initialize channel counters frequency value in hertz */
- ab->cc_freq_hz = 320000;
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ ret = ath12k_wmi_mlo_teardown(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo teardown for pdev %d\n", ar->pdev_idx);
+
+ return 0;
+}
+
+int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret;
+ int i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_setup(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to setup MLO: %d\n", ret);
+ goto err_setup;
+ }
+ }
+ }
+
+ return 0;
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+err_setup:
+ for (i = i - 1; i >= 0; i--) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for (j = j - 1; j >= 0; j--) {
+ ar = &ah->radio[j];
+ if (!ar)
+ continue;
+
+ __ath12k_mac_mlo_teardown(ar);
+ }
+ }
+
+ return ret;
+}
+
+void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret, i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_teardown(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to teardown MLO: %d\n", ret);
+ break;
+ }
+ }
+ }
+}
+
+int ath12k_mac_register(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab = ag->ab[0];
+ struct ath12k_hw *ah;
+ int i;
+ int ret;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
ret = ath12k_mac_hw_register(ah);
if (ret)
goto err;
}
+ set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
return 0;
err:
for (i = i - 1; i >= 0; i--) {
- ah = ab->ah[i];
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -9851,13 +11273,16 @@ err:
return ret;
}
-void ath12k_mac_unregister(struct ath12k_base *ab)
+void ath12k_mac_unregister(struct ath12k_hw_group *ag)
{
+ struct ath12k_base *ab = ag->ab[0];
struct ath12k_hw *ah;
int i;
- for (i = ab->num_hw - 1; i >= 0; i--) {
- ah = ab->ah[i];
+ clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+ for (i = ag->num_hw - 1; i >= 0; i--) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -9870,12 +11295,13 @@ static void ath12k_mac_hw_destroy(struct ath12k_hw *ah)
ieee80211_free_hw(ah->hw);
}
-static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
+static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_hw_group *ag,
struct ath12k_pdev_map *pdev_map,
u8 num_pdev_map)
{
struct ieee80211_hw *hw;
struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_pdev *pdev;
struct ath12k_hw *ah;
int i;
@@ -9891,6 +11317,7 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ah->num_radio = num_pdev_map;
mutex_init(&ah->hw_mutex);
+ INIT_LIST_HEAD(&ah->ml_peers);
for (i = 0; i < num_pdev_map; i++) {
ab = pdev_map[i].ab;
@@ -9905,54 +11332,116 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ar->pdev_idx = pdev_idx;
pdev->ar = ar;
+ ag->hw_links[ar->hw_link_id].device_id = ab->device_id;
+ ag->hw_links[ar->hw_link_id].pdev_idx = pdev_idx;
+
ath12k_mac_setup(ar);
+ ath12k_dp_pdev_pre_alloc(ar);
}
return ah;
}
-void ath12k_mac_destroy(struct ath12k_base *ab)
+void ath12k_mac_destroy(struct ath12k_hw_group *ag)
{
struct ath12k_pdev *pdev;
- int i;
+ struct ath12k_base *ab = ag->ab[0];
+ int i, j;
+ struct ath12k_hw *ah;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- if (!pdev->ar)
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
continue;
- pdev->ar = NULL;
+ for (j = 0; j < ab->num_radios; j++) {
+ pdev = &ab->pdevs[j];
+ if (!pdev->ar)
+ continue;
+ pdev->ar = NULL;
+ }
}
- for (i = 0; i < ab->num_hw; i++) {
- if (!ab->ah[i])
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
+ if (!ah)
continue;
- ath12k_mac_hw_destroy(ab->ah[i]);
- ab->ah[i] = NULL;
+ ath12k_mac_hw_destroy(ah);
+ ath12k_ag_set_ah(ag, i, NULL);
}
}
-int ath12k_mac_allocate(struct ath12k_base *ab)
+static void ath12k_mac_set_device_defaults(struct ath12k_base *ab)
{
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = 320000;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+}
+
+int ath12k_mac_allocate(struct ath12k_hw_group *ag)
+{
+ struct ath12k_pdev_map pdev_map[ATH12K_GROUP_MAX_RADIO];
+ int mac_id, device_id, total_radio, num_hw;
+ struct ath12k_base *ab;
struct ath12k_hw *ah;
- struct ath12k_pdev_map pdev_map[MAX_RADIOS];
int ret, i, j;
u8 radio_per_hw;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
+ total_radio = 0;
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_mac_set_device_defaults(ab);
+ total_radio += ab->num_radios;
+ }
- ab->num_hw = ab->num_radios;
- radio_per_hw = 1;
+ if (!total_radio)
+ return -EINVAL;
- for (i = 0; i < ab->num_hw; i++) {
+ if (WARN_ON(total_radio > ATH12K_GROUP_MAX_RADIO))
+ return -ENOSPC;
+
+ /* All pdev get combined and register as single wiphy based on
+ * hardware group which participate in multi-link operation else
+ * each pdev get register separately.
+ */
+ if (ag->mlo_capable)
+ radio_per_hw = total_radio;
+ else
+ radio_per_hw = 1;
+
+ num_hw = total_radio / radio_per_hw;
+
+ ag->num_hw = 0;
+ device_id = 0;
+ mac_id = 0;
+ for (i = 0; i < num_hw; i++) {
for (j = 0; j < radio_per_hw; j++) {
+ if (device_id >= ag->num_devices || !ag->ab[device_id]) {
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ ab = ag->ab[device_id];
pdev_map[j].ab = ab;
- pdev_map[j].pdev_idx = (i * radio_per_hw) + j;
+ pdev_map[j].pdev_idx = mac_id;
+ mac_id++;
+
+ /* If mac_id falls beyond the current device MACs then
+ * move to next device
+ */
+ if (mac_id >= ab->num_radios) {
+ mac_id = 0;
+ device_id++;
+ }
}
- ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw);
+ ab = pdev_map->ab;
+
+ ah = ath12k_mac_hw_allocate(ag, pdev_map, radio_per_hw);
if (!ah) {
ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
i);
@@ -9960,20 +11449,22 @@ int ath12k_mac_allocate(struct ath12k_base *ab)
goto err;
}
- ab->ah[i] = ah;
- }
+ ah->dev = ab->dev;
- ath12k_dp_pdev_pre_alloc(ab);
+ ag->ah[i] = ah;
+ ag->num_hw++;
+ }
return 0;
err:
for (i = i - 1; i >= 0; i--) {
- if (!ab->ah[i])
+ ah = ath12k_ag_to_ah(ag, i);
+ if (!ah)
continue;
- ath12k_mac_hw_destroy(ab->ah[i]);
- ab->ah[i] = NULL;
+ ath12k_mac_hw_destroy(ah);
+ ath12k_ag_set_ah(ag, i, NULL);
}
return ret;
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index d382337ba649..3594729b6397 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -14,6 +14,7 @@
struct ath12k;
struct ath12k_base;
struct ath12k_hw;
+struct ath12k_hw_group;
struct ath12k_pdev_map;
struct ath12k_generic_iter {
@@ -44,6 +45,12 @@ struct ath12k_generic_iter {
#define ATH12K_DEFAULT_LINK_ID 0
#define ATH12K_INVALID_LINK_ID 255
+/* Default link after the IEEE802.11 defined Max link id limit
+ * for driver usage purpose.
+ */
+#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
+#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1)
+
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,
@@ -52,12 +59,17 @@ enum ath12k_supported_bw {
ATH12K_BW_320 = 4,
};
+struct ath12k_mac_get_any_chanctx_conf_arg {
+ struct ath12k *ar;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+};
+
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
-void ath12k_mac_destroy(struct ath12k_base *ab);
-void ath12k_mac_unregister(struct ath12k_base *ab);
-int ath12k_mac_register(struct ath12k_base *ab);
-int ath12k_mac_allocate(struct ath12k_base *ab);
+void ath12k_mac_destroy(struct ath12k_hw_group *ag);
+void ath12k_mac_unregister(struct ath12k_hw_group *ag);
+int ath12k_mac_register(struct ath12k_hw_group *ag);
+int ath12k_mac_allocate(struct ath12k_hw_group *ag);
int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate);
u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
@@ -89,5 +101,12 @@ int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);
+int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag);
+int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag);
+void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag);
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
+void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index cf907550e6a4..06cff3849ab8 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1123,6 +1123,9 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
+ if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return;
+
__ath12k_pci_ext_irq_disable(ab);
ath12k_pci_sync_ext_irqs(ab);
}
@@ -1147,6 +1150,11 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab)
void ath12k_pci_stop(struct ath12k_base *ab)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
+ return;
+
ath12k_pci_ce_irq_disable_sync(ab);
ath12k_ce_cleanup_pipes(ab);
}
@@ -1717,6 +1725,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
+ ath12k_core_hw_group_unassign(ab);
goto qmi_fail;
}
@@ -1725,6 +1734,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
+ ath12k_fw_unmap(ab);
qmi_fail:
ath12k_mhi_unregister(ab_pci);
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
index 7a62665b8af9..792cca8a3fb1 100644
--- a/drivers/net/wireless/ath/ath12k/peer.c
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -8,6 +8,22 @@
#include "peer.h"
#include "debug.h"
+static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
+{
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ list_for_each_entry(ml_peer, &ah->ml_peers, list) {
+ if (!ether_addr_equal(ml_peer->addr, addr))
+ continue;
+
+ return ml_peer;
+ }
+
+ return NULL;
+}
+
struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
const u8 *addr)
{
@@ -63,6 +79,20 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
return NULL;
}
+static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
+ int ml_peer_id)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (ml_peer_id == peer->ml_id)
+ return peer;
+
+ return NULL;
+}
+
struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
int peer_id)
{
@@ -70,6 +100,9 @@ struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
lockdep_assert_held(&ab->base_lock);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID)
+ return ath12k_peer_find_by_ml_id(ab, peer_id);
+
list_for_each_entry(peer, &ab->peers, list)
if (peer_id == peer->peer_id)
return peer;
@@ -231,8 +264,9 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
return 0;
}
-int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
{
+ struct ath12k_base *ab = ar->ab;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -241,12 +275,25 @@ int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
- ath12k_warn(ar->ab,
+ ath12k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
}
+ return 0;
+}
+
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ ret = ath12k_peer_delete_send(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
if (ret)
return ret;
@@ -266,7 +313,11 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ath12k_wmi_peer_create_arg *arg)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_link_sta *arsta;
+ u8 link_id = arvif->link_id;
struct ath12k_peer *peer;
+ struct ath12k_sta *ahsta;
+ u16 ml_peer_id;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -332,6 +383,29 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
arvif->ast_idx = peer->hw_peer_id;
}
+ if (sta) {
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+
+ peer->link_id = arsta->link_id;
+
+ /* Fill ML info into created peer */
+ if (sta->mlo) {
+ ml_peer_id = ahsta->ml_peer_id;
+ peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID;
+ ether_addr_copy(peer->ml_addr, sta->addr);
+
+ /* the assoc link is considered primary for now */
+ peer->primary_link = arsta->is_assoc_link;
+ peer->mlo = true;
+ } else {
+ peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
+ peer->primary_link = true;
+ peer->mlo = false;
+ }
+ }
+
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
@@ -341,3 +415,150 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
return 0;
}
+
+static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
+{
+ u16 ml_peer_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
+ if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
+ continue;
+
+ set_bit(ml_peer_id, ah->free_ml_peer_id_map);
+ break;
+ }
+
+ if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
+ ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+ return ml_peer_id;
+}
+
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+ if (ml_peer) {
+ ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
+ ml_peer->id, sta->addr);
+ return -EEXIST;
+ }
+
+ ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
+ if (!ml_peer)
+ return -ENOMEM;
+
+ ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
+
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
+ sta->addr);
+ kfree(ml_peer);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(ml_peer->addr, sta->addr);
+ ml_peer->id = ahsta->ml_peer_id;
+ list_add(&ml_peer->list, &ah->ml_peers);
+
+ return 0;
+}
+
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+ ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+ ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+ if (!ml_peer) {
+ ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
+ return -EINVAL;
+ }
+
+ list_del(&ml_peer->list);
+ kfree(ml_peer);
+
+ return 0;
+}
+
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ath12k_hw *ah = ahvif->ah;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long links;
+ struct ath12k *ar;
+ int ret, err_ret = 0;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ /* FW expects delete of all link peers at once before waiting for reception
+ * of peer unmap or delete responses
+ */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+ ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ arvif->vdev_id, arsta->addr, ret);
+ err_ret = ret;
+ continue;
+ }
+ }
+
+ /* Ensure all link peers are deleted and unmapped */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
+ if (ret) {
+ err_ret = ret;
+ continue;
+ }
+ ar->num_peers--;
+ }
+
+ return err_ret;
+}
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
index b955f0cdf598..5870ee11a8c7 100644
--- a/drivers/net/wireless/ath/ath12k/peer.h
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -19,6 +19,8 @@ struct ppdu_user_delayba {
u32 resp_rate_flags;
};
+#define ATH12K_PEER_ML_ID_VALID BIT(13)
+
struct ath12k_peer {
struct list_head list;
struct ieee80211_sta *sta;
@@ -44,9 +46,28 @@ struct ath12k_peer {
struct ppdu_user_delayba ppdu_stats_delayba;
bool delayba_flag;
bool is_authorized;
-
+ bool mlo;
/* protected by ab->data_lock */
bool dp_setup_done;
+
+ u16 ml_id;
+
+ /* any other ML info common for all partners can be added
+ * here and would be same for all partner peers.
+ */
+ u8 ml_addr[ETH_ALEN];
+
+ /* To ensure only certain work related to dp is done once */
+ bool primary_link;
+
+ /* for reference to ath12k_link_sta */
+ u8 link_id;
+};
+
+struct ath12k_ml_peer {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u16 id;
};
void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
@@ -66,5 +87,8 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
const u8 *addr);
bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index b93ce9f87f61..5c3563383fab 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -2016,26 +2016,57 @@ static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
},
};
-static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
- struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+static void ath12k_host_cap_hw_link_id_init(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab, *partner_ab;
+ int i, j, hw_id_base;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ hw_id_base = 0;
+ ab = ag->ab[i];
+
+ for (j = 0; j < ag->num_devices; j++) {
+ partner_ab = ag->ab[j];
+
+ if (partner_ab->wsi_info.index >= ab->wsi_info.index)
+ continue;
+
+ hw_id_base += partner_ab->qmi.num_radios;
+ }
+
+ ab->wsi_info.hw_link_id_base = hw_id_base;
+ }
+
+ ag->hw_link_id_init_done = true;
+}
+
+static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
+ struct qmi_wlanfw_host_cap_req_msg_v01 *req)
{
struct wlfw_host_mlo_chip_info_s_v01 *info;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_base *partner_ab;
u8 hw_link_id = 0;
- int i;
+ int i, j, ret;
- if (!(ab->mlo_capable_flags & ATH12K_INTRA_DEVICE_MLO_SUPPORT)) {
+ if (!ag->mlo_capable) {
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "intra device MLO is disabled hence skip QMI MLO cap");
- return;
+ "MLO is disabled hence skip QMI MLO cap");
+ return 0;
}
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
- ab->mlo_capable_flags = 0;
+ ab->single_chip_mlo_supp = false;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
ab->qmi.num_radios);
- return;
+ return 0;
+ }
+
+ if (ab->device_id == ATH12K_INVALID_DEVICE_ID) {
+ ath12k_err(ab, "failed to send MLO cap due to invalid device id\n");
+ return -EINVAL;
}
req->mlo_capable_valid = 1;
@@ -2043,30 +2074,88 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_chip_id_valid = 1;
req->mlo_chip_id = ab->device_id;
req->mlo_group_id_valid = 1;
- req->mlo_group_id = 0;
+ req->mlo_group_id = ag->id;
req->max_mlo_peer_valid = 1;
/* Max peer number generally won't change for the same device
* but needs to be synced with host driver.
*/
req->max_mlo_peer = ab->hw_params->max_mlo_peer;
req->mlo_num_chips_valid = 1;
- req->mlo_num_chips = 1;
+ req->mlo_num_chips = ag->num_devices;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo capability advertisement device_id %d group_id %d num_devices %d",
+ req->mlo_chip_id, req->mlo_group_id, req->mlo_num_chips);
- info = &req->mlo_chip_info[0];
- info->chip_id = ab->device_id;
- info->num_local_links = ab->qmi.num_radios;
+ mutex_lock(&ag->mutex);
- for (i = 0; i < info->num_local_links; i++) {
- info->hw_link_id[i] = hw_link_id;
- info->valid_mlo_link_id[i] = 1;
+ if (!ag->hw_link_id_init_done)
+ ath12k_host_cap_hw_link_id_init(ag);
- hw_link_id++;
+ for (i = 0; i < ag->num_devices; i++) {
+ info = &req->mlo_chip_info[i];
+ partner_ab = ag->ab[i];
+
+ if (partner_ab->device_id == ATH12K_INVALID_DEVICE_ID) {
+ ath12k_err(ab, "failed to send MLO cap due to invalid partner device id\n");
+ ret = -EINVAL;
+ goto device_cleanup;
+ }
+
+ info->chip_id = partner_ab->device_id;
+ info->num_local_links = partner_ab->qmi.num_radios;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo device id %d num_link %d\n",
+ info->chip_id, info->num_local_links);
+
+ for (j = 0; j < info->num_local_links; j++) {
+ info->hw_link_id[j] = partner_ab->wsi_info.hw_link_id_base + j;
+ info->valid_mlo_link_id[j] = 1;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo hw_link_id %d\n",
+ info->hw_link_id[j]);
+
+ hw_link_id++;
+ }
}
+ if (hw_link_id <= 0)
+ ag->mlo_capable = false;
+
req->mlo_chip_info_valid = 1;
+
+ mutex_unlock(&ag->mutex);
+
+ return 0;
+
+device_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ info = &req->mlo_chip_info[i];
+
+ memset(info, 0, sizeof(*info));
+ }
+
+ req->mlo_num_chips = 0;
+ req->mlo_num_chips_valid = 0;
+
+ req->max_mlo_peer = 0;
+ req->max_mlo_peer_valid = 0;
+ req->mlo_group_id = 0;
+ req->mlo_group_id_valid = 0;
+ req->mlo_chip_id = 0;
+ req->mlo_chip_id_valid = 0;
+ req->mlo_capable = 0;
+ req->mlo_capable_valid = 0;
+
+ ag->mlo_capable = false;
+
+ mutex_unlock(&ag->mutex);
+
+ return ret;
}
-static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
@@ -2111,7 +2200,9 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
}
- ath12k_host_cap_parse_mlo(ab, &req);
+ ret = ath12k_host_cap_parse_mlo(ab, &req);
+ if (ret < 0)
+ goto out;
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
@@ -2174,12 +2265,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
- if (resp.single_chip_mlo_support_valid) {
- if (resp.single_chip_mlo_support)
- ab->mlo_capable_flags |= ATH12K_INTRA_DEVICE_MLO_SUPPORT;
- else
- ab->mlo_capable_flags &= ~ATH12K_INTRA_DEVICE_MLO_SUPPORT;
- }
+ if (resp.single_chip_mlo_support_valid &&
+ resp.single_chip_mlo_support)
+ ab->single_chip_mlo_supp = true;
if (!resp.num_phy_valid) {
ret = -ENODATA;
@@ -2275,7 +2363,9 @@ resp_out:
return ret;
}
-static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
@@ -2350,30 +2440,125 @@ out:
return ret;
}
+static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
+ struct target_mem_chunk *chunk,
+ int idx)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct target_mem_chunk *mlo_chunk;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (!ag->mlo_mem.init_done || ag->num_started)
+ return;
+
+ if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) {
+ ath12k_warn(ab, "invalid index for MLO memory chunk free: %d\n", idx);
+ return;
+ }
+
+ mlo_chunk = &ag->mlo_mem.chunk[idx];
+ if (mlo_chunk->v.addr) {
+ dma_free_coherent(ab->dev,
+ mlo_chunk->size,
+ mlo_chunk->v.addr,
+ mlo_chunk->paddr);
+ mlo_chunk->v.addr = NULL;
+ }
+
+ mlo_chunk->paddr = 0;
+ mlo_chunk->size = 0;
+ chunk->v.addr = NULL;
+ chunk->paddr = 0;
+ chunk->size = 0;
+}
+
static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
{
- int i;
+ struct ath12k_hw_group *ag = ab->ag;
+ int i, mlo_idx;
- for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
if (!ab->qmi.target_mem[i].v.addr)
continue;
- dma_free_coherent(ab->dev,
- ab->qmi.target_mem[i].prev_size,
- ab->qmi.target_mem[i].v.addr,
- ab->qmi.target_mem[i].paddr);
- ab->qmi.target_mem[i].v.addr = NULL;
+ if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
+ ath12k_qmi_free_mlo_mem_chunk(ab,
+ &ab->qmi.target_mem[i],
+ mlo_idx++);
+ } else {
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].prev_size,
+ ab->qmi.target_mem[i].v.addr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].v.addr = NULL;
+ }
+ }
+
+ if (!ag->num_started && ag->mlo_mem.init_done) {
+ ag->mlo_mem.init_done = false;
+ ag->mlo_mem.mlo_mem_size = 0;
}
}
+static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab,
+ struct target_mem_chunk *chunk)
+{
+ /* Firmware reloads in recovery/resume.
+ * In such cases, no need to allocate memory for FW again.
+ */
+ if (chunk->v.addr) {
+ if (chunk->prev_type == chunk->type &&
+ chunk->prev_size == chunk->size)
+ goto this_chunk_done;
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->prev_size,
+ chunk->v.addr, chunk->paddr);
+ chunk->v.addr = NULL;
+ }
+
+ chunk->v.addr = dma_alloc_coherent(ab->dev,
+ chunk->size,
+ &chunk->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!chunk->v.addr) {
+ if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) {
+ ab->qmi.target_mem_delayed = true;
+ ath12k_warn(ab,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath12k_qmi_free_target_mem_chunk(ab);
+ return -EAGAIN;
+ }
+ ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
+ chunk->type, chunk->size);
+ return -ENOMEM;
+ }
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
+this_chunk_done:
+ return 0;
+}
+
static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
{
- int i;
- struct target_mem_chunk *chunk;
+ struct target_mem_chunk *chunk, *mlo_chunk;
+ struct ath12k_hw_group *ag = ab->ag;
+ int i, mlo_idx, ret;
+ int mlo_size = 0;
+
+ mutex_lock(&ag->mutex);
+
+ if (!ag->mlo_mem.init_done) {
+ memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk));
+ ag->mlo_mem.init_done = true;
+ }
ab->qmi.target_mem_delayed = false;
- for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
/* Allocate memory for the region and the functionality supported
@@ -2385,42 +2570,41 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
case M3_DUMP_REGION_TYPE:
case PAGEABLE_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
- /* Firmware reloads in recovery/resume.
- * In such cases, no need to allocate memory for FW again.
- */
- if (chunk->v.addr) {
- if (chunk->prev_type == chunk->type &&
- chunk->prev_size == chunk->size)
- goto this_chunk_done;
-
- /* cannot reuse the existing chunk */
- dma_free_coherent(ab->dev, chunk->prev_size,
- chunk->v.addr, chunk->paddr);
- chunk->v.addr = NULL;
+ ret = ath12k_qmi_alloc_chunk(ab, chunk);
+ if (ret)
+ goto err;
+ break;
+ case MLO_GLOBAL_MEM_REGION_TYPE:
+ mlo_size += chunk->size;
+ if (ag->mlo_mem.mlo_mem_size &&
+ mlo_size > ag->mlo_mem.mlo_mem_size) {
+ ath12k_err(ab, "QMI MLO memory allocation failure, requested size %d is more than allocated size %d",
+ mlo_size, ag->mlo_mem.mlo_mem_size);
+ ret = -EINVAL;
+ goto err;
}
- chunk->v.addr = dma_alloc_coherent(ab->dev,
- chunk->size,
- &chunk->paddr,
- GFP_KERNEL | __GFP_NOWARN);
- if (!chunk->v.addr) {
- if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) {
- ab->qmi.target_mem_delayed = true;
- ath12k_warn(ab,
- "qmi dma allocation failed (%d B type %u), will try later with small size\n",
- chunk->size,
- chunk->type);
- ath12k_qmi_free_target_mem_chunk(ab);
- return 0;
+ mlo_chunk = &ag->mlo_mem.chunk[mlo_idx];
+ if (mlo_chunk->paddr) {
+ if (chunk->size != mlo_chunk->size) {
+ ath12k_err(ab, "QMI MLO chunk memory allocation failure for index %d, requested size %d is more than allocated size %d",
+ mlo_idx, chunk->size, mlo_chunk->size);
+ ret = -EINVAL;
+ goto err;
}
- ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
- chunk->type, chunk->size);
- return -ENOMEM;
+ } else {
+ mlo_chunk->size = chunk->size;
+ mlo_chunk->type = chunk->type;
+ ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk);
+ if (ret)
+ goto err;
+ memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
}
- chunk->prev_type = chunk->type;
- chunk->prev_size = chunk->size;
-this_chunk_done:
+ chunk->paddr = mlo_chunk->paddr;
+ chunk->v.addr = mlo_chunk->v.addr;
+ mlo_idx++;
+
break;
default:
ath12k_warn(ab, "memory type %u not supported\n",
@@ -2430,10 +2614,39 @@ this_chunk_done:
break;
}
}
+
+ if (!ag->mlo_mem.mlo_mem_size) {
+ ag->mlo_mem.mlo_mem_size = mlo_size;
+ } else if (ag->mlo_mem.mlo_mem_size != mlo_size) {
+ ath12k_err(ab, "QMI MLO memory size error, expected size is %d but requested size is %d",
+ ag->mlo_mem.mlo_mem_size, mlo_size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mutex_unlock(&ag->mutex);
+
return 0;
+
+err:
+ ath12k_qmi_free_target_mem_chunk(ab);
+
+ mutex_unlock(&ag->mutex);
+
+ /* The firmware will attempt to request memory in smaller chunks
+ * on the next try. However, the current caller should be notified
+ * that this instance of request parsing was successful.
+ * Therefore, return 0 only.
+ */
+ if (ret == -EAGAIN)
+ ret = 0;
+
+ return ret;
}
-static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req = {};
struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
@@ -2619,8 +2832,10 @@ out:
return ret;
}
-static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
- enum ath12k_qmi_bdf_type type)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
+ enum ath12k_qmi_bdf_type type)
{
struct device *dev = ab->dev;
char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE];
@@ -2791,7 +3006,9 @@ out:
return ret;
}
-static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
@@ -3023,6 +3240,8 @@ void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
+ clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags);
+
ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan mode off\n");
@@ -3079,9 +3298,69 @@ ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi,
return 0;
}
-static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab)
{
- struct ath12k_base *ab = qmi->ab;
+ struct ath12k_qmi *qmi = &ab->qmi;
+
+ spin_lock(&qmi->event_lock);
+
+ if (ath12k_qmi_get_event_block(qmi))
+ ath12k_qmi_set_event_block(qmi, false);
+
+ spin_unlock(&qmi->event_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n",
+ ab->device_id);
+
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL);
+}
+
+static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+
+ if (!(ab && ab->qmi.num_radios != U8_MAX))
+ return false;
+ }
+
+ return true;
+}
+
+static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ spin_lock(&ab->qmi.event_lock);
+
+ if (ath12k_qmi_get_event_block(&ab->qmi)) {
+ spin_unlock(&ab->qmi.event_lock);
+ return ab;
+ }
+
+ spin_unlock(&ab->qmi.event_lock);
+ }
+
+ return NULL;
+}
+
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab, *block_ab;
+ struct ath12k_hw_group *ag = ab->ag;
int ret;
ath12k_qmi_phy_cap_send(ab);
@@ -3092,16 +3371,30 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
return ret;
}
- ret = ath12k_qmi_host_cap_send(ab);
- if (ret < 0) {
- ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
- return ret;
+ spin_lock(&qmi->event_lock);
+
+ ath12k_qmi_set_event_block(qmi, true);
+
+ spin_unlock(&qmi->event_lock);
+
+ mutex_lock(&ag->mutex);
+
+ if (ath12k_qmi_hw_group_host_cap_ready(ag)) {
+ ath12k_core_hw_group_set_mlo_capable(ag);
+
+ block_ab = ath12k_qmi_hw_group_find_blocked(ag);
+ if (block_ab)
+ ath12k_qmi_trigger_host_cap(block_ab);
}
+ mutex_unlock(&ag->mutex);
+
return ret;
}
-static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@@ -3115,7 +3408,9 @@ static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
return ret;
}
-static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@@ -3280,6 +3575,21 @@ static const struct qmi_ops ath12k_qmi_ops = {
.del_server = ath12k_qmi_ops_del_server,
};
+static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n",
+ ab->device_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
static void ath12k_qmi_driver_event_work(struct work_struct *work)
{
struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
@@ -3306,7 +3616,6 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_SERVER_EXIT:
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
- set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
break;
case ATH12K_QMI_EVENT_REQUEST_MEM:
ret = ath12k_qmi_event_mem_request(qmi);
@@ -3320,20 +3629,28 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_FW_READY:
clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) {
if (ab->is_reset)
ath12k_hal_dump_srng_stats(ab);
+
+ set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
clear_bit(ATH12K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
- clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
- ath12k_core_qmi_firmware_ready(ab);
- set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+ ret = ath12k_core_qmi_firmware_ready(ab);
+ if (!ret)
+ set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+ &ab->dev_flags);
break;
+ case ATH12K_QMI_EVENT_HOST_CAP:
+ ret = ath12k_qmi_event_host_cap(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
default:
ath12k_warn(ab, "invalid event type: %d", event->type);
break;
@@ -3386,11 +3703,15 @@ int ath12k_qmi_init_service(struct ath12k_base *ab)
void ath12k_qmi_deinit_service(struct ath12k_base *ab)
{
+ if (!ab->qmi.ab)
+ return;
+
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
ath12k_qmi_m3_free(ab);
ath12k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.ab = NULL;
}
void ath12k_qmi_free_resource(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 0dfcbd8cb59b..45d7c3fcafdd 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -68,6 +68,7 @@ enum ath12k_qmi_event_type {
ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
ATH12K_QMI_EVENT_POWER_UP,
ATH12K_QMI_EVENT_POWER_DOWN,
+ ATH12K_QMI_EVENT_HOST_CAP,
ATH12K_QMI_EVENT_MAX,
};
@@ -142,6 +143,10 @@ struct ath12k_qmi {
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
+
+ /* protected with struct ath12k_qmi::event_lock */
+ bool block_event;
+
u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
@@ -167,6 +172,7 @@ enum ath12k_qmi_target_mem {
BDF_MEM_REGION_TYPE = 0x2,
M3_DUMP_REGION_TYPE = 0x3,
CALDB_MEM_REGION_TYPE = 0x4,
+ MLO_GLOBAL_MEM_REGION_TYPE = 0x8,
PAGEABLE_MEM_REGION_TYPE = 0x9,
};
@@ -594,11 +600,26 @@ struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block)
+{
+ lockdep_assert_held(&qmi->event_lock);
+
+ qmi->block_event = block;
+}
+
+static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi)
+{
+ lockdep_assert_held(&qmi->event_lock);
+
+ return qmi->block_event;
+}
+
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode);
void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
void ath12k_qmi_deinit_service(struct ath12k_base *ab);
int ath12k_qmi_init_service(struct ath12k_base *ab);
void ath12k_qmi_free_resource(struct ath12k_base *ab);
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index dced2aa9ba1a..4dd6cdf84571 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -821,6 +821,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
+ bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
+ struct wmi_vdev_create_mlo_params *ml_params;
struct wmi_tlv *tlv;
int ret, len;
void *ptr;
@@ -830,7 +832,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
* both the bands.
*/
len = sizeof(*cmd) + TLV_HDR_SIZE +
- (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
+ (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -879,6 +882,21 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
txrx_streams->supported_rx_streams =
cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
+ ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ if (is_ml_vdev) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
+ sizeof(*ml_params));
+ ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
args->if_id, args->type, args->subtype,
@@ -1020,19 +1038,27 @@ static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart)
{
+ struct wmi_vdev_start_mlo_params *ml_params;
+ struct wmi_partner_link_info *partner_info;
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_channel_params *chan;
struct wmi_tlv *tlv;
void *ptr;
- int ret, len;
+ int ret, len, i, ml_arg_size = 0;
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+ if (!restart && arg->ml.enabled) {
+ ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ len += ml_arg_size;
+ }
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1085,6 +1111,61 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
ptr += sizeof(*tlv);
+ if (ml_arg_size) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
+ sizeof(*ml_params));
+
+ ml_params->flags = le32_encode_bits(arg->ml.enabled,
+ ATH12K_WMI_FLAG_MLO_ENABLED) |
+ le32_encode_bits(arg->ml.assoc_link,
+ ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
+ le32_encode_bits(arg->ml.mcast_link,
+ ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
+ le32_encode_bits(arg->ml.link_add,
+ ATH12K_WMI_FLAG_MLO_LINK_ADD);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
+ arg->vdev_id, ml_params->flags);
+
+ ptr += sizeof(*ml_params);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ ptr += TLV_HDR_SIZE;
+
+ partner_info = ptr;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ partner_info->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+ sizeof(*partner_info));
+ partner_info->vdev_id =
+ cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ ether_addr_copy(partner_info->vdev_addr.addr,
+ arg->ml.partner_info[i].addr);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
+ partner_info->vdev_id, partner_info->hw_link_id,
+ partner_info->vdev_addr.addr);
+
+ partner_info++;
+ }
+
+ ptr = partner_info;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
restart ? "restart" : "start", arg->vdev_id,
arg->freq, arg->mode);
@@ -1149,9 +1230,14 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
- int ret;
+ int ret, len;
+ struct wmi_peer_create_mlo_params *ml_param;
+ void *ptr;
+ struct wmi_tlv *tlv;
- skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1163,9 +1249,23 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
cmd->peer_type = cpu_to_le32(arg->peer_type);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ptr = skb->data + sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_param));
+ ptr += TLV_HDR_SIZE;
+ ml_param = ptr;
+ ml_param->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
+ sizeof(*ml_param));
+ if (arg->ml_enabled)
+ ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ ptr += sizeof(*ml_param);
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
- "WMI peer create vdev_id %d peer_addr %pM\n",
- arg->vdev_id, arg->peer_addr);
+ "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
+ arg->vdev_id, arg->peer_addr, ml_param->flags);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
if (ret) {
@@ -2001,12 +2101,15 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct ath12k_wmi_vht_rate_set_params *mcs;
struct ath12k_wmi_he_rate_set_params *he_mcs;
struct ath12k_wmi_eht_rate_set_params *eht_mcs;
+ struct wmi_peer_assoc_mlo_params *ml_params;
+ struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 peer_legacy_rates_align;
u32 peer_ht_rates_align;
int i, ret, len;
+ __le32 v;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
sizeof(u32));
@@ -2018,8 +2121,13 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
sizeof(*mcs) + TLV_HDR_SIZE +
(sizeof(*he_mcs) * arg->peer_he_mcs_count) +
- TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
- TLV_HDR_SIZE + TLV_HDR_SIZE;
+ TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
+
+ if (arg->ml.enabled)
+ len += TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
+ else
+ len += (2 * TLV_HDR_SIZE);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -2143,12 +2251,38 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*he_mcs);
}
- /* MLO header tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? sizeof(*ml_params) : 0;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (!len)
+ goto skip_ml_params;
+
+ ml_params = ptr;
+ ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
+ len);
+ ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.assoc_link)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+ if (arg->ml.primary_umac)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.logical_link_idx_valid)
+ ml_params->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
+
+ if (arg->ml.peer_id_valid)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
+
+ ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
+ ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
+ ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
+ ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
+ ptr += sizeof(*ml_params);
+
+skip_ml_params:
/* Loop through the EHT rate set */
len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
tlv = ptr;
@@ -2165,12 +2299,45 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*eht_mcs);
}
- /* ML partner links tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
+ /* fill ML Partner links */
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (len == 0)
+ goto send;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
+
+ partner_info = ptr;
+ partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
+ sizeof(*partner_info));
+ partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.partner_info[i].assoc_link)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+ if (arg->ml.partner_info[i].primary_umac)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.partner_info[i].logical_link_idx_valid) {
+ v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
+ partner_info->flags |= v;
+ }
+
+ partner_info->logical_link_idx =
+ cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
+ ptr += sizeof(*partner_info);
+ }
+
+send:
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
@@ -4495,6 +4662,9 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
caps->eht_cap_info_internal);
}
+ pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
+ pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
+
return 0;
}
@@ -5192,6 +5362,9 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
info->flags |= IEEE80211_TX_STAT_ACK;
+ if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -6042,7 +6215,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
goto exit;
}
- if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
+ if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
(rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
WMI_RX_STATUS_ERR_CRC))) {
@@ -6171,7 +6344,8 @@ static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
spin_lock_bh(&ar->data_lock);
if (ar->scan.state == state &&
- ar->scan.vdev_id == vdev_id) {
+ ar->scan.arvif &&
+ ar->scan.arvif->vdev_id == vdev_id) {
spin_unlock_bh(&ar->data_lock);
return ar;
}
@@ -6687,6 +6861,7 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const u32 *vdev_ids)
{
int i;
+ struct ieee80211_bss_conf *conf;
struct ath12k_link_vif *arvif;
struct ath12k_vif *ahvif;
@@ -6705,7 +6880,20 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
}
ahvif = arvif->ahvif;
- if (arvif->is_up && ahvif->vif->bss_conf.csa_active)
+ if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
+ arvif->link_id);
+ continue;
+ }
+
+ conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+ if (!conf) {
+ ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ continue;
+ }
+
+ if (arvif->is_up && conf->csa_active)
ieee80211_csa_finish(ahvif->vif, 0);
}
rcu_read_unlock();
@@ -6750,6 +6938,7 @@ static void
ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
{
const void **tb;
+ struct ath12k_mac_get_any_chanctx_conf_arg arg;
const struct ath12k_wmi_pdev_radar_event *ev;
struct ath12k *ar;
int ret;
@@ -6785,13 +6974,22 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
+ arg.ar = ar;
+ arg.chanctx_conf = NULL;
+ ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
+ ath12k_mac_get_any_chanctx_conf_iter, &arg);
+ if (!arg.chanctx_conf) {
+ ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
+ goto exit;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
ev->pdev_id);
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
exit:
rcu_read_unlock();
@@ -7146,6 +7344,79 @@ static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
kfree(tb);
}
+static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_mlo_setup_complete_event *ev;
+ struct ath12k *ar = NULL;
+ struct ath12k_pdev *pdev;
+ const void **tb;
+ int ret, i;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
+ kfree(tb);
+ return;
+ }
+
+ if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
+ goto skip_lookup;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
+ ar = pdev->ar;
+ break;
+ }
+ }
+
+skip_lookup:
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
+ ev->pdev_id, ev->status);
+ goto out;
+ }
+
+ ar->mlo_setup_status = le32_to_cpu(ev->status);
+ complete(&ar->mlo_setup_done);
+
+out:
+ kfree(tb);
+}
+
+static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_mlo_teardown_complete_event *ev;
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch teardown complete event\n");
+ kfree(tb);
+ return;
+ }
+
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7250,13 +7521,6 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_P2P_NOA_EVENTID:
ath12k_wmi_p2p_noa_event(ab, skb);
break;
- /* add Unsupported events here */
- case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
- case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
- case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
- ath12k_dbg(ab, ATH12K_DBG_WMI,
- "ignoring unsupported event 0x%x\n", id);
- break;
case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
break;
@@ -7272,6 +7536,25 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_GTK_OFFLOAD_STATUS_EVENTID:
ath12k_wmi_gtk_offload_status_event(ab, skb);
break;
+ case WMI_MLO_SETUP_COMPLETE_EVENTID:
+ ath12k_wmi_event_mlo_setup_complete(ab, skb);
+ break;
+ case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
+ ath12k_wmi_event_teardown_complete(ab, skb);
+ break;
+ /* add Unsupported events (rare) here */
+ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+ case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+ case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "ignoring unsupported event 0x%x\n", id);
+ break;
+ /* add Unsupported events (frequent) here */
+ case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
+ case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
+ case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+ /* debug might flood hence silently ignore (no-op) */
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -8088,3 +8371,104 @@ int ath12k_wmi_sta_keepalive(struct ath12k *ar,
return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
}
+
+int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
+{
+ struct wmi_mlo_setup_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ u32 *partner_links, num_links;
+ int i, ret, buf_len, arg_len;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+
+ num_links = mlo_params->num_partner_links;
+ arg_len = num_links * sizeof(u32);
+ buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_setup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
+ sizeof(*cmd));
+ cmd->mld_group_id = mlo_params->group_id;
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
+ ptr += TLV_HDR_SIZE;
+
+ partner_links = ptr;
+ for (i = 0; i < num_links; i++)
+ partner_links[i] = mlo_params->partner_link_id[i];
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_mlo_ready(struct ath12k *ar)
+{
+ struct wmi_mlo_ready_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_ready_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_mlo_teardown(struct ath12k *ar)
+{
+ struct wmi_mlo_teardown_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 6f55dbdf629d..b6a197389277 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -285,6 +285,7 @@ enum wmi_cmd_group {
WMI_GRP_TWT = 0x3e,
WMI_GRP_MOTION_DET = 0x3f,
WMI_GRP_SPATIAL_REUSE = 0x40,
+ WMI_GRP_MLO = 0x48,
};
#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
@@ -665,6 +666,10 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+ WMI_MLO_LINK_SET_ACTIVE_CMDID = WMI_TLV_CMD(WMI_GRP_MLO),
+ WMI_MLO_SETUP_CMDID,
+ WMI_MLO_READY_CMDID,
+ WMI_MLO_TEARDOWN_CMDID,
};
enum wmi_tlv_event_id {
@@ -706,6 +711,8 @@ enum wmi_tlv_event_id {
WMI_PDEV_RAP_INFO_EVENTID,
WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
WMI_SERVICE_READY_EXT2_EVENTID,
+ WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID =
+ WMI_SERVICE_READY_EXT2_EVENTID + 4,
WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_STOPPED_EVENTID,
WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
@@ -747,6 +754,7 @@ enum wmi_tlv_event_id {
WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
WMI_HOST_FILS_DISCOVERY_EVENTID,
+ WMI_MGMT_RX_FW_CONSUMED_EVENTID = WMI_HOST_FILS_DISCOVERY_EVENTID + 3,
WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
WMI_TX_ADDBA_COMPLETE_EVENTID,
WMI_BA_RSP_SSN_EVENTID,
@@ -845,6 +853,8 @@ enum wmi_tlv_event_id {
WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
+ WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
WMI_DCC_GET_STATS_RESP_EVENTID,
@@ -874,6 +884,9 @@ enum wmi_tlv_event_id {
WMI_TWT_DEL_DIALOG_EVENTID,
WMI_TWT_PAUSE_DIALOG_EVENTID,
WMI_TWT_RESUME_DIALOG_EVENTID,
+ WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MLO),
+ WMI_MLO_SETUP_COMPLETE_EVENTID,
+ WMI_MLO_TEARDOWN_COMPLETE_EVENTID,
};
enum wmi_tlv_pdev_param {
@@ -1929,6 +1942,19 @@ enum wmi_tlv_tag {
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
+ WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
+ WMI_TAG_MLO_TX_SEND_PARAMS,
+ WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+ WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC,
+ WMI_TAG_MLO_SETUP_CMD = 0x3C9,
+ WMI_TAG_MLO_SETUP_COMPLETE_EVENT,
+ WMI_TAG_MLO_READY_CMD,
+ WMI_TAG_MLO_TEARDOWN_CMD,
+ WMI_TAG_MLO_TEARDOWN_COMPLETE,
+ WMI_TAG_MLO_PEER_ASSOC_PARAMS = 0x3D0,
+ WMI_TAG_MLO_PEER_CREATE_PARAMS = 0x3D5,
+ WMI_TAG_MLO_VDEV_START_PARAMS = 0x3D6,
+ WMI_TAG_MLO_VDEV_CREATE_PARAMS = 0x3D7,
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
@@ -2690,6 +2716,8 @@ struct ath12k_wmi_caps_ext_params {
__le32 eht_cap_info_internal;
__le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2G_SIZE];
__le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5G_SIZE];
+ __le32 eml_capability;
+ __le32 mld_capability;
} __packed;
/* 2 word representation of MAC addr */
@@ -2740,6 +2768,7 @@ struct ath12k_wmi_vdev_create_arg {
u8 if_stats_id;
u32 mbssid_flags;
u32 mbssid_tx_vdev_id;
+ u8 mld_addr[ETH_ALEN];
};
#define ATH12K_MAX_VDEV_STATS_ID 0x30
@@ -2766,6 +2795,33 @@ struct ath12k_wmi_vdev_txrx_streams_params {
__le32 supported_rx_streams;
} __packed;
+struct wmi_vdev_create_mlo_params {
+ __le32 tlv_header;
+ struct ath12k_wmi_mac_addr_params mld_macaddr;
+} __packed;
+
+#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
+#define ATH12K_WMI_FLAG_MLO_MCAST_VDEV BIT(5)
+#define ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT BIT(6)
+#define ATH12K_WMI_FLAG_MLO_FORCED_INACTIVE BIT(7)
+#define ATH12K_WMI_FLAG_MLO_LINK_ADD BIT(8)
+
+struct wmi_vdev_start_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
+} __packed;
+
+struct wmi_partner_link_info {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 hw_link_id;
+ struct ath12k_wmi_mac_addr_params vdev_addr;
+} __packed;
+
struct wmi_vdev_delete_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -2909,6 +2965,27 @@ enum wmi_phy_mode {
MODE_MAX = 33,
};
+#define ATH12K_WMI_MLO_MAX_LINKS 4
+
+struct wmi_ml_partner_info {
+ u32 vdev_id;
+ u32 hw_link_id;
+ u8 addr[ETH_ALEN];
+ bool assoc_link;
+ bool primary_umac;
+ bool logical_link_idx_valid;
+ u32 logical_link_idx;
+};
+
+struct wmi_ml_arg {
+ bool enabled;
+ bool assoc_link;
+ bool mcast_link;
+ bool link_add;
+ u8 num_partner_links;
+ struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
struct wmi_vdev_start_req_arg {
u32 vdev_id;
u32 freq;
@@ -2946,12 +3023,19 @@ struct wmi_vdev_start_req_arg {
u32 mbssid_flags;
u32 mbssid_tx_vdev_id;
u32 punct_bitmap;
+ struct wmi_ml_arg ml;
};
struct ath12k_wmi_peer_create_arg {
const u8 *peer_addr;
u32 peer_type;
u32 vdev_id;
+ bool ml_enabled;
+};
+
+struct wmi_peer_create_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
};
struct ath12k_wmi_pdev_set_regdomain_arg {
@@ -3618,6 +3702,24 @@ struct wmi_vdev_install_key_arg {
#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+#define ATH12K_WMI_MLO_MAX_PARTNER_LINKS \
+ (ATH12K_WMI_MLO_MAX_LINKS + ATH12K_MAX_NUM_BRIDGE_LINKS - 1)
+
+struct peer_assoc_mlo_params {
+ bool enabled;
+ bool assoc_link;
+ bool primary_umac;
+ bool peer_id_valid;
+ bool logical_link_idx_valid;
+ bool bridge_peer;
+ u8 mld_addr[ETH_ALEN];
+ u32 logical_link_idx;
+ u32 ml_peer_id;
+ u32 ieee_link_id;
+ u8 num_partner_links;
+ struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
struct wmi_rate_set_arg {
u32 num_rates;
u8 rates[WMI_MAX_SUPPORTED_RATES];
@@ -3692,8 +3794,36 @@ struct ath12k_wmi_peer_assoc_arg {
u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET];
struct ath12k_wmi_ppe_threshold_arg peer_eht_ppet;
u32 punct_bitmap;
+ bool is_assoc;
+ struct peer_assoc_mlo_params ml;
};
+#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LINK_ID_VALID BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
+
+struct wmi_peer_assoc_mlo_partner_info_params {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 hw_link_id;
+ __le32 flags;
+ __le32 logical_link_idx;
+} __packed;
+
+struct wmi_peer_assoc_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
+ struct ath12k_wmi_mac_addr_params mld_addr;
+ __le32 logical_link_idx;
+ __le32 ml_peer_id;
+ __le32 ieee_link_id;
+ __le32 emlsr_trans_timeout_us;
+ __le32 emlsr_trans_delay_us;
+ __le32 emlsr_padding_delay_us;
+} __packed;
+
struct wmi_peer_assoc_complete_cmd {
__le32 tlv_header;
struct ath12k_wmi_mac_addr_params peer_macaddr;
@@ -4815,6 +4945,7 @@ struct wmi_probe_tmpl_cmd {
#define MAX_RADIOS 2
+#define WMI_MLO_CMD_TIMEOUT_HZ (5 * HZ)
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
@@ -4911,6 +5042,43 @@ struct wmi_twt_disable_event {
__le32 status;
} __packed;
+struct wmi_mlo_setup_cmd {
+ __le32 tlv_header;
+ __le32 mld_group_id;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_mlo_setup_arg {
+ __le32 group_id;
+ u8 num_partner_links;
+ u8 *partner_link_id;
+};
+
+struct wmi_mlo_ready_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+} __packed;
+
+enum wmi_mlo_tear_down_reason_code_type {
+ WMI_MLO_TEARDOWN_SSR_REASON,
+};
+
+struct wmi_mlo_teardown_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 reason_code;
+} __packed;
+
+struct wmi_mlo_setup_complete_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
+struct wmi_mlo_teardown_complete_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
@@ -5636,5 +5804,8 @@ int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
struct ath12k_link_vif *arvif);
int ath12k_wmi_sta_keepalive(struct ath12k *ar,
const struct wmi_sta_keepalive_arg *arg);
+int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params);
+int ath12k_wmi_mlo_ready(struct ath12k *ar);
+int ath12k_wmi_mlo_teardown(struct ath12k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 61b2e3f15f0e..72ce321f2a77 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1441,6 +1441,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
int *dbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index acc84e6711b0..e5e274bc9e68 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -193,7 +193,7 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
struct ath_hw_antcomb_conf *conf)
{
- /* set alt to the conf with maximun ratio */
+ /* set alt to the conf with maximum ratio */
if (antcomb->first_ratio && antcomb->second_ratio) {
if (antcomb->rssi_second > antcomb->rssi_third) {
/* first alt*/
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index d08ea0b28530..b26224480041 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -395,7 +395,7 @@ static void ar9002_hw_init_hang_checks(struct ath_hw *ah)
ah->config.hw_hang_checks |= HW_MAC_HANG;
}
-/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
+/* Sets up the AR5008/AR9001/AR9002 hardware family callbacks */
int ar9002_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index e9bd13eeee92..6595eca74997 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1170,7 +1170,7 @@ exit:
return false;
}
-/* Sets up the AR9003 hardware familiy callbacks */
+/* Sets up the AR9003 hardware family callbacks */
void ar9003_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 2b9c07961cd7..3f0543e55d9b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -637,7 +637,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
* same time. Since BT's calibration doesn't happen
* that often, we'll let BT completes calibration then
* we continue to wait for cal_grant from BT.
- * Orginal: Wait BT_CAL_GRANT.
+ * Original: Wait BT_CAL_GRANT.
* New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
* BT_CAL_DONE -> Wait BT_CAL_GRANT.
*/
@@ -747,7 +747,7 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
* BT is sleeping. Check if BT wakes up during
* WLAN calibration. If BT wakes up during
* WLAN calibration, need to go through all
- * message exchanges again and recal.
+ * message exchanges again and recalibrate.
*/
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
(AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index ad72a30b67c3..e13873fb8e2f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -246,7 +246,7 @@
/*
- * MRC Feild Definitions
+ * MRC Field Definitions
*/
#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
#define AR_PHY_SGI_DSC_MAN_S 4
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 29ca65a732a6..a728cc0387df 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -338,7 +338,7 @@ struct ath_chanctx {
struct ath_beacon_config beacon;
struct ath9k_hw_cal_data caldata;
- struct timespec64 tsf_ts;
+ ktime_t tsf_ts;
u64 tsf_val;
u32 last_beacon;
@@ -592,8 +592,8 @@ void ath_txq_schedule_all(struct ath_softc *sc);
int ath_tx_init(struct ath_softc *sc, int nbufs);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
-u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
- int width, int half_gi, bool shortPreamble);
+u32 ath_pkt_duration(u8 rix, int pktlen, int width,
+ int half_gi, bool shortPreamble);
void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
void ath_assign_seq(struct ath_common *common, struct sk_buff *skb);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -1011,13 +1011,15 @@ struct ath_softc {
struct ath_offchannel offchannel;
struct ath_chanctx *next_chan;
struct completion go_beacon;
- struct timespec64 last_event_time;
+ ktime_t last_event_time;
#endif
unsigned long driver_data;
u8 gtt_cnt;
u32 intrstatus;
+ u32 rx_active_check_time;
+ u32 rx_active_count;
u16 ps_flags; /* PS_* */
bool ps_enabled;
bool ps_idle;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b399a7926ef5..4a27e3753c03 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -293,7 +293,7 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc)
/* Modify TSF as required and update the HW. */
avp->chanctx->tsf_val += tsfadjust;
if (sc->cur_chan == avp->chanctx) {
- offset = ath9k_hw_get_tsf_offset(&avp->chanctx->tsf_ts, NULL);
+ offset = ath9k_hw_get_tsf_offset(avp->chanctx->tsf_ts, 0);
ath9k_hw_settsf64(sc->sc_ah, avp->chanctx->tsf_val + offset);
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 4b331c85509c..b4ab85bd7895 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -16,29 +16,25 @@
#include "hw.h"
#include "hw-ops.h"
+#include <linux/sort.h>
#include <linux/export.h>
/* Common calibration code */
+static int rcmp_i16(const void *x, const void *y)
+{
+ /* Sort in reverse order. */
+ return *(int16_t *)y - *(int16_t *)x;
+}
static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
{
- int16_t nfval;
- int16_t sort[ATH9K_NF_CAL_HIST_MAX];
- int i, j;
-
- for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
- sort[i] = nfCalBuffer[i];
+ int16_t nfcal[ATH9K_NF_CAL_HIST_MAX];
- for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
- for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
- if (sort[j] > sort[j - 1])
- swap(sort[j], sort[j - 1]);
- }
- }
- nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
+ memcpy(nfcal, nfCalBuffer, sizeof(nfcal));
+ sort(nfcal, ATH9K_NF_CAL_HIST_MAX, sizeof(int16_t), rcmp_i16, NULL);
- return nfval;
+ return nfcal[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
}
static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 571062f2e82a..bae24e3d3168 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -17,7 +17,7 @@
#include "ath9k.h"
/* Set/change channels. If the channel is really being changed, it's done
- * by reseting the chip. To accomplish this we must first cleanup any pending
+ * by resetting the chip. To accomplish this we must first cleanup any pending
* DMA, then restart stuff.
*/
static int ath_set_channel(struct ath_softc *sc)
@@ -232,16 +232,11 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
static u32 chanctx_event_delta(struct ath_softc *sc)
{
- u64 ms;
- struct timespec64 ts, *old;
+ ktime_t ts = ktime_get_raw();
+ s64 ms = ktime_ms_delta(ts, sc->last_event_time);
- ktime_get_raw_ts64(&ts);
- old = &sc->last_event_time;
- ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
- ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
sc->last_event_time = ts;
-
- return (u32)ms;
+ return ms;
}
void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
@@ -334,8 +329,8 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
{
struct ath_chanctx *prev, *cur;
- struct timespec64 ts;
u32 cur_tsf, prev_tsf, beacon_int;
+ ktime_t ts;
s32 offset;
beacon_int = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval);
@@ -346,12 +341,12 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
if (!prev->switch_after_beacon)
return;
- ktime_get_raw_ts64(&ts);
+ ts = ktime_get_raw();
cur_tsf = (u32) cur->tsf_val +
- ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
+ ath9k_hw_get_tsf_offset(cur->tsf_ts, ts);
prev_tsf = prev->last_beacon - (u32) prev->tsf_val + cur_tsf;
- prev_tsf -= ath9k_hw_get_tsf_offset(&prev->tsf_ts, &ts);
+ prev_tsf -= ath9k_hw_get_tsf_offset(prev->tsf_ts, ts);
/* Adjust the TSF time of the AP chanctx to keep its beacons
* at half beacon interval offset relative to the STA chanctx.
@@ -691,7 +686,7 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
*/
tsf_time = sc->sched.switch_start_time;
tsf_time -= (u32) sc->cur_chan->tsf_val +
- ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
+ ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0);
tsf_time += ath9k_hw_gettsf32(ah);
sc->sched.beacon_adjust = false;
@@ -1230,10 +1225,10 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_chanctx *old_ctx;
- struct timespec64 ts;
bool measure_time = false;
bool send_ps = false;
bool queues_stopped = false;
+ ktime_t ts;
spin_lock_bh(&sc->chan_lock);
if (!sc->next_chan) {
@@ -1260,7 +1255,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_unlock_bh(&sc->chan_lock);
if (sc->next_chan == &sc->offchannel.chan) {
- ktime_get_raw_ts64(&ts);
+ ts = ktime_get_raw();
measure_time = true;
}
@@ -1277,7 +1272,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_lock_bh(&sc->chan_lock);
if (sc->cur_chan != &sc->offchannel.chan) {
- ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
+ sc->cur_chan->tsf_ts = ktime_get_raw();
sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
}
}
@@ -1303,7 +1298,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
ath_set_channel(sc);
if (measure_time)
sc->sched.channel_switch_time =
- ath9k_hw_get_tsf_offset(&ts, NULL);
+ ath9k_hw_get_tsf_offset(ts, 0);
/*
* A reset will ensure that all queues are woken up,
* so there is no need to awaken them again.
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 4b27445a5fb8..628eeec4b82f 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -734,7 +734,7 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
ATH9K_RX_FILTER_PHYRADAR |
ATH9K_RX_FILTER_PHYERR);
- /* TODO: usually this should not be neccesary, but for some reason
+ /* TODO: usually this should not be necessary, but for some reason
* (or in some mode?) the trigger must be called after the
* configuration, otherwise the register will have its values reset
* (on my ar9220 to value 0x01002310)
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index eff894958a73..74a0134075cf 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -750,6 +750,7 @@ static int read_file_reset(struct seq_file *file, void *data)
[RESET_TYPE_CALIBRATION] = "Calibration error",
[RESET_TX_DMA_ERROR] = "Tx DMA stop error",
[RESET_RX_DMA_ERROR] = "Rx DMA stop error",
+ [RESET_TYPE_RX_INACTIVE] = "Rx path inactive",
};
int i;
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 389459c04d14..cb3e75969875 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -53,6 +53,7 @@ enum ath_reset_type {
RESET_TYPE_CALIBRATION,
RESET_TX_DMA_ERROR,
RESET_RX_DMA_ERROR,
+ RESET_TYPE_RX_INACTIVE,
__RESET_TYPE_MAX
};
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 3689e12db9f7..2fb73a5e1d51 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -79,7 +79,7 @@ static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
const int DFS_UPPER_BIN_OFFSET = 64;
/* if detected radar on both channels, select the significant one */
if (is_ctl && is_ext) {
- /* first check wether channels have 'strong' bins */
+ /* first check whether channels have 'strong' bins */
is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
is_ext = fft_bitmap_weight(fft->upper_bins) != 0;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 7265766cddbd..fe9abe8cd268 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1198,7 +1198,7 @@ static int ath9k_hif_request_firmware(struct hif_device_usb *hif_dev,
filename = FIRMWARE_AR9271;
/* expected fw locations:
- * - htc_9271.fw (stable version 1.3, depricated)
+ * - htc_9271.fw (stable version 1.3, deprecated)
*/
snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name),
"%s", filename);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e2bef099adb3..f9a774bd0e13 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1847,20 +1847,11 @@ fail:
return -EINVAL;
}
-u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
+u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur)
{
- struct timespec64 ts;
- s64 usec;
-
- if (!cur) {
- ktime_get_raw_ts64(&ts);
- cur = &ts;
- }
-
- usec = cur->tv_sec * 1000000ULL + cur->tv_nsec / 1000;
- usec -= last->tv_sec * 1000000ULL + last->tv_nsec / 1000;
-
- return (u32) usec;
+ if (cur == 0)
+ cur = ktime_get_raw();
+ return ktime_us_delta(cur, last);
}
EXPORT_SYMBOL(ath9k_hw_get_tsf_offset);
@@ -1871,7 +1862,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
- struct timespec64 tsf_ts;
+ ktime_t tsf_ts;
u32 tsf_offset;
u64 tsf = 0;
int r;
@@ -1917,7 +1908,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* Save TSF before chip reset, a cold reset clears it */
- ktime_get_raw_ts64(&tsf_ts);
+ tsf_ts = ktime_get_raw();
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -1951,7 +1942,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
}
/* Restore TSF */
- tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL);
+ tsf_offset = ath9k_hw_get_tsf_offset(tsf_ts, 0);
ath9k_hw_settsf64(ah, tsf + tsf_offset);
if (AR_SREV_9280_20_OR_LATER(ah))
@@ -1975,7 +1966,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
* value after the initvals have been applied.
*/
if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
- tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL);
+ tsf_offset = ath9k_hw_get_tsf_offset(tsf_ts, 0);
ath9k_hw_settsf64(ah, tsf + tsf_offset);
}
@@ -2149,7 +2140,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah)
/* When chip goes into network sleep, it could be waken
* up by MCI_INT interrupt caused by BT's HW messages
- * (LNA_xxx, CONT_xxx) which chould be in a very fast
+ * (LNA_xxx, CONT_xxx) which could be in a very fast
* rate (~100us). This will cause chip to leave and
* re-enter network sleep mode frequently, which in
* consequence will have WLAN MCI HW to generate lots of
@@ -2544,7 +2535,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
/*
- * For AR9271 we will temporarilly uses the rx chainmax as read from
+ * For AR9271 we will temporarily use the rx chainmax as read from
* the EEPROM.
*/
if ((ah->hw_version.devid == AR5416_DEVID_PCI) &&
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 450ab19b1d4e..eaa07d6dbde0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -282,7 +282,7 @@ enum ath9k_hw_caps {
* an exact user defined pattern or de-authentication/disassoc pattern.
* @ATH9K_HW_WOW_PATTERN_MATCH_DWORD: device requires the first four
* bytes of the pattern for user defined pattern, de-authentication and
- * disassociation patterns for all types of possible frames recieved
+ * disassociation patterns for all types of possible frames received
* of those types.
*/
@@ -1066,7 +1066,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
-u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
+u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index d1e5767aab3c..d078a59d7d3c 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -50,7 +50,36 @@ reset:
"tx hung, resetting the chip\n");
ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
return false;
+}
+
+#define RX_INACTIVE_CHECK_INTERVAL (4 * MSEC_PER_SEC)
+
+static bool ath_hw_rx_inactive_check(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 interval, count;
+
+ interval = jiffies_to_msecs(jiffies - sc->rx_active_check_time);
+ count = sc->rx_active_count;
+
+ if (interval < RX_INACTIVE_CHECK_INTERVAL)
+ return true; /* too soon to check */
+ sc->rx_active_count = 0;
+ sc->rx_active_check_time = jiffies;
+
+ /* Need at least one interrupt per second, and we should only react if
+ * we are within a factor two of the expected interval
+ */
+ if (interval > RX_INACTIVE_CHECK_INTERVAL * 2 ||
+ count >= interval / MSEC_PER_SEC)
+ return true;
+
+ ath_dbg(common, RESET,
+ "RX inactivity detected. Schedule chip reset\n");
+ ath9k_queue_reset(sc, RESET_TYPE_RX_INACTIVE);
+
+ return false;
}
void ath_hw_check_work(struct work_struct *work)
@@ -58,8 +87,8 @@ void ath_hw_check_work(struct work_struct *work)
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_check_work.work);
- if (!ath_hw_check(sc) ||
- !ath_tx_complete_check(sc))
+ if (!ath_hw_check(sc) || !ath_tx_complete_check(sc) ||
+ !ath_hw_rx_inactive_check(sc))
return;
ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index f03d792732da..16203e7ecf29 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -251,7 +251,7 @@ struct ath_desc {
* when the descriptor is specifically marked to generate
* an interrupt with this flag. Descriptors should be
* marked periodically to insure timely replenishing of the
- * supply needed for sending frames. Defering interrupts
+ * supply needed for sending frames. Deferring interrupts
* reduces system load and potentially allows more concurrent
* work to be done but if done to aggressively can cause
* senders to backup. When the hardware queue is left too
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b92c89dad8de..a70c94564814 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -249,8 +249,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
if (sc->cur_chan->tsf_val) {
u32 offset;
- offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts,
- NULL);
+ offset = ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0);
ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset);
}
@@ -453,6 +452,7 @@ void ath9k_tasklet(struct tasklet_struct *t)
ath_rx_tasklet(sc, 0, true);
ath_rx_tasklet(sc, 0, false);
+ sc->rx_active_count++;
}
if (status & ATH9K_INT_TX) {
@@ -1001,7 +1001,7 @@ static bool ath9k_uses_beacons(int type)
static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
struct ieee80211_vif *vif)
{
- /* Use the first (configured) interface, but prefering AP interfaces. */
+ /* Use the first (configured) interface, but preferring AP interfaces. */
if (!iter_data->primary_beacon_vif) {
iter_data->primary_beacon_vif = vif;
} else {
@@ -1955,7 +1955,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
tsf = ath9k_hw_gettsf64(sc->sc_ah);
} else {
tsf = sc->cur_chan->tsf_val +
- ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
+ ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0);
}
tsf += le64_to_cpu(avp->tsf_adjust);
ath9k_ps_restore(sc);
@@ -1974,7 +1974,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
tsf -= le64_to_cpu(avp->tsf_adjust);
- ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
+ avp->chanctx->tsf_ts = ktime_get_raw();
if (sc->cur_chan == avp->chanctx)
ath9k_hw_settsf64(sc->sc_ah, tsf);
avp->chanctx->tsf_val = tsf;
@@ -1990,7 +1990,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
+ avp->chanctx->tsf_ts = ktime_get_raw();
if (sc->cur_chan == avp->chanctx)
ath9k_hw_reset_tsf(sc->sc_ah);
avp->chanctx->tsf_val = 0;
@@ -2767,7 +2767,7 @@ void ath9k_fill_chanctx_ops(void)
#endif
static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct ath_softc *sc = hw->priv;
struct ath_vif *avp = (void *)vif->drv_priv;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 0c0624a3b40d..34c74ed99b7b 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1042,8 +1042,8 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
if (!!(rxs->encoding == RX_ENC_HT)) {
/* MCS rates */
- airtime += ath_pkt_duration(sc, rxs->rate_idx, len,
- is_40, is_sgi, is_sp);
+ airtime += ath_pkt_duration(rxs->rate_idx, len,
+ is_40, is_sgi, is_sp);
} else {
phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM;
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 8d0b1730a9d5..ed4152cd44f0 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -60,7 +60,7 @@ static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
/*
- * Create Dissassociate / Deauthenticate packet filter
+ * Create Disassociate / Deauthenticate packet filter
*
* 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
* +--------------+----------+---------+--------+--------+----
@@ -70,7 +70,7 @@ static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
* The above is the management frame format for disassociate/
* deauthenticate pattern, from this we need to match the first byte
* of 'Frame Control' and DA, SA, and BSSID fields
- * (skipping 2nd byte of FC and Duration feild.
+ * (skipping 2nd byte of FC and Duration field.
*
* Disassociate pattern
* --------------------
@@ -225,7 +225,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
ath9k_stop_btcoex(sc);
/*
- * Enable wake up on recieving disassoc/deauth
+ * Enable wake up on receiving disassoc/deauth
* frame by default.
*/
ret = ath9k_wow_add_disassoc_deauth_pattern(sc);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 35aa47a9db90..db07ce6dbc08 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -67,8 +67,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok);
-static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf);
+static void ath_tx_update_baw(struct ath_atx_tid *tid, struct ath_buf *bf);
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
@@ -208,10 +207,10 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
ARRAY_SIZE(bf->rates));
}
-static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
- struct sk_buff *skb)
+static void ath_txq_skb_done(struct ath_softc *sc, struct sk_buff *skb)
{
struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_txq *txq;
int q = fi->txq;
if (q < 0)
@@ -224,7 +223,7 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
}
static struct ath_atx_tid *
-ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
+ath_get_skb_tid(struct ath_node *an, struct sk_buff *skb)
{
u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
return ATH_AN_2_TID(an, tidno);
@@ -294,13 +293,13 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
fi = get_frame_info(skb);
bf = fi->bf;
if (!bf) {
- ath_txq_skb_done(sc, txq, skb);
+ ath_txq_skb_done(sc, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
if (fi->baw_tracked) {
- ath_tx_update_baw(sc, tid, bf);
+ ath_tx_update_baw(tid, bf);
sendbar = true;
}
@@ -315,8 +314,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
}
-static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf)
+static void ath_tx_update_baw(struct ath_atx_tid *tid, struct ath_buf *bf)
{
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
u16 seqno = bf->bf_state.seqno;
@@ -338,8 +336,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
}
}
-static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf)
+static void ath_tx_addto_baw(struct ath_atx_tid *tid, struct ath_buf *bf)
{
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
u16 seqno = bf->bf_state.seqno;
@@ -452,9 +449,8 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
return tbf;
}
-static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts, int txok,
- int *nframes, int *nbad)
+static void ath_tx_count_frames(struct ath_buf *bf, struct ath_tx_status *ts,
+ int txok, int *nframes, int *nbad)
{
u16 seq_st = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
@@ -557,7 +553,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
/*
* AR5416 can become deaf/mute when BA
* issue happens. Chip needs to be reset.
- * But AP code may have sychronization issues
+ * But AP code may have synchronization issues
* when perform internal reset in this routine.
* Only enable reset in STA mode for now.
*/
@@ -568,7 +564,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
__skb_queue_head_init(&bf_pending);
- ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
+ ath_tx_count_frames(bf, ts, txok, &nframes, &nbad);
while (bf) {
u16 seqno = bf->bf_state.seqno;
@@ -621,7 +617,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update
* block-ack window
*/
- ath_tx_update_baw(sc, tid, bf);
+ ath_tx_update_baw(tid, bf);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -651,7 +647,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* run out of tx buf.
*/
if (!tbf) {
- ath_tx_update_baw(sc, tid, bf);
+ ath_tx_update_baw(tid, bf);
ath_tx_complete_buf(sc, bf, txq,
&bf_head, NULL, ts,
@@ -752,7 +748,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
if (sta) {
struct ath_node *an = (struct ath_node *)sta->drv_priv;
- tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
+ tid = ath_get_skb_tid(an, bf->bf_mpdu);
ath_tx_count_airtime(sc, sta, bf, ts, tid->tidno);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->clear_ps_filter = true;
@@ -962,7 +958,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_state.stale = false;
if (!bf) {
- ath_txq_skb_done(sc, txq, skb);
+ ath_txq_skb_done(sc, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
@@ -1012,13 +1008,13 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
- ath_tx_update_baw(sc, tid, bf);
+ ath_tx_update_baw(tid, bf);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
continue;
}
if (bf_isampdu(bf))
- ath_tx_addto_baw(sc, tid, bf);
+ ath_tx_addto_baw(tid, bf);
break;
}
@@ -1114,8 +1110,8 @@ finish:
* width - 0 for 20 MHz, 1 for 40 MHz
* half_gi - to use 4us v/s 3.6 us for symbol time
*/
-u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
- int width, int half_gi, bool shortPreamble)
+u32 ath_pkt_duration(u8 rix, int pktlen, int width,
+ int half_gi, bool shortPreamble)
{
u32 nbits, nsymbits, duration, nsymbols;
int streams;
@@ -1327,7 +1323,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
info->rates[i].Rate = rix | 0x80;
info->rates[i].ChSel = ath_txchainmask_reduction(sc,
ah->txchainmask, info->rates[i].Rate);
- info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
+ info->rates[i].PktDuration = ath_pkt_duration(rix, len,
is_40, is_sgi, is_sp);
if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
@@ -2122,7 +2118,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_state.bf_type = 0;
if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
bf->bf_state.bf_type = BUF_AMPDU;
- ath_tx_addto_baw(sc, tid, bf);
+ ath_tx_addto_baw(tid, bf);
}
bf->bf_next = NULL;
@@ -2368,7 +2364,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
if (txctl->sta) {
an = (struct ath_node *) sta->drv_priv;
- tid = ath_get_skb_tid(sc, an, skb);
+ tid = ath_get_skb_tid(an, skb);
}
ath_txq_lock(sc, txq);
@@ -2379,7 +2375,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
- ath_txq_skb_done(sc, txq, skb);
+ ath_txq_skb_done(sc, skb);
if (txctl->paprd)
dev_kfree_skb_any(skb);
else
@@ -2514,7 +2510,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- ath_txq_skb_done(sc, txq, skb);
+ ath_txq_skb_done(sc, skb);
tx_info->status.status_driver_data[0] = sta;
__skb_queue_tail(&txq->complete_q, skb);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 8557d4826a46..94d08d6ae1a3 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1590,7 +1590,10 @@ static int wcn36xx_probe(struct platform_device *pdev)
}
n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
- wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
+ wcn->chan_survey = devm_kcalloc(wcn->dev,
+ n_channels,
+ sizeof(struct wcn36xx_chan_survey),
+ GFP_KERNEL);
if (!wcn->chan_survey) {
ret = -ENOMEM;
goto out_wq;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 42d991d9f8cb..60eb95fc19a5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -455,6 +455,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
if (sg_data_sz > max_req_sz - req_sz)
sg_data_sz = max_req_sz - req_sz;
+ if (!sgl) {
+ /* out of (pre-allocated) scatterlist entries */
+ ret = -ENOMEM;
+ goto exit;
+ }
sg_set_buf(sgl, pkt_data, sg_data_sz);
sg_cnt++;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 297a7c738c01..4b70845e1a26 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -814,6 +814,8 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
* @name: name of the new interface.
* @params: contains mac address for AP or STA device.
* @type: interface type.
+ *
+ * Return: pointer to new vif on success, ERR_PTR(-errno) if not
*/
static
struct wireless_dev *brcmf_apsta_add_vif(struct wiphy *wiphy, const char *name,
@@ -900,6 +902,8 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
*
* @wiphy: wiphy device of new interface.
* @name: name of the new interface.
+ *
+ * Return: pointer to new vif on success, ERR_PTR(-errno) if not
*/
static struct wireless_dev *brcmf_mon_add_vif(struct wiphy *wiphy,
const char *name)
@@ -2676,7 +2680,7 @@ done:
static s32
brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- s32 *dbm)
+ unsigned int link_id, s32 *dbm)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev);
@@ -4999,12 +5003,16 @@ exit:
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
{
static const s32 pktflags[] = {
- BRCMF_VNDR_IE_PRBREQ_FLAG,
BRCMF_VNDR_IE_PRBRSP_FLAG,
BRCMF_VNDR_IE_BEACON_FLAG
};
int i;
+ if (vif->wdev.iftype == NL80211_IFTYPE_AP)
+ brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_ASSOCRSP_FLAG, NULL, 0);
+ else
+ brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG, NULL, 0);
+
for (i = 0; i < ARRAY_SIZE(pktflags); i++)
brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);
@@ -7408,6 +7416,8 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
* p2p, rsdb, and no mbss:
* #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
* channels = 2, 4 total
+ *
+ * Return: 0 on success, negative errno on failure
*/
static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index da72fd2d541f..3d63010ae079 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -327,8 +327,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
- brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
- brcmf_ifname(ifp), head_delta);
+ brcmf_dbg(INFO, "%s: %s headroom\n", brcmf_ifname(ifp),
+ head_delta ? "insufficient" : "unmodifiable");
atomic_inc(&drvr->bus_if->stats.pktcowed);
ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0,
GFP_ATOMIC);
@@ -540,6 +540,11 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
struct ethhdr *eh;
u16 type;
+ if (!ifp) {
+ brcmu_pkt_buf_free_skb(txp);
+ return;
+ }
+
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 31e080e4da66..ab3d6cfcb02b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -6,6 +6,8 @@
#ifndef _fwil_h_
#define _fwil_h_
+#include "debug.h"
+
/*******************************************************************************
* Dongle command codes that are interpreted by firmware
******************************************************************************/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index c1f18e2fe540..1681ad00f82e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -99,13 +99,13 @@ int brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
if (root && err) {
- char *board_type;
+ char *board_type = NULL;
const char *tmp;
- of_property_read_string_index(root, "compatible", 0, &tmp);
-
/* get rid of '/' in the compatible string to be able to find the FW */
- board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!of_property_read_string_index(root, "compatible", 0, &tmp))
+ board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+
if (!board_type) {
of_node_put(root);
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index d69879e1bd87..d362c4337616 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -23423,6 +23423,9 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
break;
}
+ if (WARN_ON(k == NPHY_IQCAL_NUMGAINS))
+ return;
+
params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1];
params->pga = tbl_iqcal_gainparams_nphy[band_idx][k][2];
params->pad = tbl_iqcal_gainparams_nphy[band_idx][k][3];
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 74fc76c00ebc..4013443698a2 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1127,44 +1127,6 @@ il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
rxq->rb_stts = NULL;
}
-/* Convert linear signal-to-noise ratio into dB */
-static u8 ratio2dB[100] = {
-/* 0 1 2 3 4 5 6 7 8 9 */
- 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
- 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
- 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
- 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
- 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
- 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
- 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
- 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
- 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
- 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
-};
-
-/* Calculates a relative dB value from a ratio of linear
- * (i.e. not dB) signal levels.
- * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
-int
-il3945_calc_db_from_ratio(int sig_ratio)
-{
- /* 1000:1 or higher just report as 60 dB */
- if (sig_ratio >= 1000)
- return 60;
-
- /* 100:1 or higher, divide by 10 and use table,
- * add 20 dB to make up for divide by 10 */
- if (sig_ratio >= 100)
- return 20 + (int)ratio2dB[sig_ratio / 10];
-
- /* We shouldn't see this */
- if (sig_ratio < 1)
- return 0;
-
- /* Use table for ratios 1:1 - 99:1 */
- return (int)ratio2dB[sig_ratio];
-}
-
/*
* il3945_rx_handle - Main entry function for receiving responses from uCode
*
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.h b/drivers/net/wireless/intel/iwlegacy/3945.h
index ffbe11902628..fb1e33c89d0e 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.h
+++ b/drivers/net/wireless/intel/iwlegacy/3945.h
@@ -173,7 +173,6 @@ struct il3945_ibss_seq {
* for use by iwl-*.c
*
*****************************************************************************/
-int il3945_calc_db_from_ratio(int sig_ratio);
void il3945_rx_replenish(void *data);
void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
unsigned int il3945_fill_beacon_frame(struct il_priv *il,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 958dd4f9bc69..af4f42534ea0 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -3915,37 +3915,6 @@ il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
}
EXPORT_SYMBOL(il_set_rxon_ht);
-/* Return valid, unused, channel for a passive scan to reset the RF */
-u8
-il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
-{
- const struct il_channel_info *ch_info;
- int i;
- u8 channel = 0;
- u8 min, max;
-
- if (band == NL80211_BAND_5GHZ) {
- min = 14;
- max = il->channel_count;
- } else {
- min = 0;
- max = 14;
- }
-
- for (i = min; i < max; i++) {
- channel = il->channel_info[i].channel;
- if (channel == le16_to_cpu(il->staging.channel))
- continue;
-
- ch_info = il_get_channel_info(il, band, channel);
- if (il_is_channel_valid(ch_info))
- break;
- }
-
- return channel;
-}
-EXPORT_SYMBOL(il_get_single_channel_number);
-
/*
* il_set_rxon_channel - Set the band and channel values in staging RXON
* @ch: requested channel as a pointer to struct ieee80211_channel
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 725c2a88ddb7..92285412ab10 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -1705,7 +1705,6 @@ int il_full_rxon_required(struct il_priv *il);
int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
void il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif);
-u8 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band);
void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
bool il_is_ht40_tx_allowed(struct il_priv *il,
struct ieee80211_sta_ht_cap *ht_cap);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 64c123314245..19c4ce6f2465 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -5,13 +5,14 @@ iwlwifi-objs += iwl-io.o
iwlwifi-objs += iwl-drv.o
iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-nvm-utils.o
+iwlwifi-objs += iwl-utils.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index 975e8aed1526..dcba1a5d793b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -31,40 +31,21 @@
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0"
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0"
-#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0"
#define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0"
#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0"
#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0"
-#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0"
#define IWL_MA_B_HR_B_FW_PRE "iwlwifi-ma-b0-hr-b0"
#define IWL_MA_B_GF_A_FW_PRE "iwlwifi-ma-b0-gf-a0"
#define IWL_MA_B_GF4_A_FW_PRE "iwlwifi-ma-b0-gf4-a0"
-#define IWL_MA_B_MR_A_FW_PRE "iwlwifi-ma-b0-mr-a0"
#define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \
IWL_SO_A_JF_B_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \
IWL_SO_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \
- IWL_SO_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \
- IWL_TY_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_A_MR_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_MA_B_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_B_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_B_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_B_MR_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_ax210_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -267,13 +248,6 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
.trans.low_latency_xtal = true,
};
-const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = {
- .fw_name_pre = IWL_SO_A_MR_A_FW_PRE,
- .uhb_supported = false,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
const struct iwl_cfg iwl_cfg_ma = {
.fw_name_mac = "ma",
.uhb_supported = true,
@@ -289,19 +263,11 @@ const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+IWL_FW_AND_PNVM(IWL_SO_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_TY_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+IWL_FW_AND_PNVM(IWL_MA_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_MA_A_GF4_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-
-MODULE_FIRMWARE("iwlwifi-so-a0-gf-a0.pnvm");
-MODULE_FIRMWARE("iwlwifi-so-a0-gf4-a0.pnvm");
-MODULE_FIRMWARE("iwlwifi-ty-a0-gf-a0.pnvm");
-MODULE_FIRMWARE("iwlwifi-ma-b0-gf-a0.pnvm");
-MODULE_FIRMWARE("iwlwifi-ma-b0-gf4-a0.pnvm");
+IWL_FW_AND_PNVM(IWL_MA_B_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_MA_B_GF4_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 1c43f283ac4a..efa3e0e35f79 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 94
+#define IWL_BZ_UCODE_API_MAX 96
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 92
@@ -37,20 +37,6 @@
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \
- IWL_BZ_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \
- IWL_BZ_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_FM_B_MODULE_FIRMWARE(api) \
- IWL_BZ_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_FM_C_MODULE_FIRMWARE(api) \
- IWL_BZ_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_FM4_B_MODULE_FIRMWARE(api) \
- IWL_BZ_A_FM4_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_GL_B_FM_B_MODULE_FIRMWARE(api) \
- IWL_GL_B_FM_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_GL_C_FM_C_MODULE_FIRMWARE(api) \
- IWL_GL_C_FM_C_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_bz_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -181,14 +167,11 @@ const struct iwl_cfg iwl_cfg_gl = {
.num_rbds = IWL_NUM_RBDS_BZ_EHT,
};
-
MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-
-MODULE_FIRMWARE("iwlwifi-gl-c0-fm-c0.pnvm");
+IWL_FW_AND_PNVM(IWL_BZ_A_GF_A_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_BZ_A_GF4_A_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
new file mode 100644
index 000000000000..ab7c0f8d54f4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-prph.h"
+#include "fw/api/txq.h"
+
+/* Highest firmware API version supported */
+#define IWL_DR_UCODE_API_MAX 96
+
+/* Lowest firmware API version supported */
+#define IWL_DR_UCODE_API_MIN 96
+
+/* NVM versions */
+#define IWL_DR_NVM_VERSION 0x0a1d
+
+/* Memory offsets and lengths */
+#define IWL_DR_DCCM_OFFSET 0x800000 /* LMAC1 */
+#define IWL_DR_DCCM_LEN 0x10000 /* LMAC1 */
+#define IWL_DR_DCCM2_OFFSET 0x880000
+#define IWL_DR_DCCM2_LEN 0x8000
+#define IWL_DR_SMEM_OFFSET 0x400000
+#define IWL_DR_SMEM_LEN 0xD0000
+
+#define IWL_DR_A_PE_A_FW_PRE "iwlwifi-dr-a0-pe-a0"
+#define IWL_BR_A_PET_A_FW_PRE "iwlwifi-br-a0-petc-a0"
+#define IWL_BR_A_PE_A_FW_PRE "iwlwifi-br-a0-pe-a0"
+
+#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \
+ IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(api) \
+ IWL_BR_A_PET_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(api) \
+ IWL_BR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl_dr_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+ .num_of_queues = 512,
+ .max_tfd_queue_size = 65536,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+#define IWL_DEVICE_DR_COMMON \
+ .ucode_api_max = IWL_DR_UCODE_API_MAX, \
+ .ucode_api_min = IWL_DR_UCODE_API_MIN, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = 10, \
+ .non_shared_ant = ANT_B, \
+ .dccm_offset = IWL_DR_DCCM_OFFSET, \
+ .dccm_len = IWL_DR_DCCM_LEN, \
+ .dccm2_offset = IWL_DR_DCCM2_OFFSET, \
+ .dccm2_len = IWL_DR_DCCM2_LEN, \
+ .smem_offset = IWL_DR_SMEM_OFFSET, \
+ .smem_len = IWL_DR_SMEM_LEN, \
+ .apmg_not_supported = true, \
+ .trans.mq_rx_supported = true, \
+ .vht_mu_mimo_supported = true, \
+ .mac_addr_from_csr = 0x30, \
+ .nvm_ver = IWL_DR_NVM_VERSION, \
+ .trans.rf_id = true, \
+ .trans.gen2 = true, \
+ .nvm_type = IWL_NVM_EXT, \
+ .dbgc_supported = true, \
+ .min_umac_error_event_table = 0xD0000, \
+ .d3_debug_data_base_addr = 0x401000, \
+ .d3_debug_data_length = 60 * 1024, \
+ .mon_smem_regs = { \
+ .write_ptr = { \
+ .addr = LDBG_M2S_BUF_WPTR, \
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = LDBG_M2S_BUF_WRAP_CNT, \
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
+ }, \
+ }, \
+ .trans.umac_prph_offset = 0x300000, \
+ .trans.device_family = IWL_DEVICE_FAMILY_DR, \
+ .trans.base_params = &iwl_dr_base_params, \
+ .min_txq_size = 128, \
+ .gp2_reg_addr = 0xd02c68, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = DBGC_DBGBUF_WRAP_AROUND, \
+ .mask = 0xffffffff, \
+ }, \
+ .cur_frag = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
+ }, \
+ }, \
+ .mon_dbgi_regs = { \
+ .write_ptr = { \
+ .addr = DBGI_SRAM_FIFO_POINTERS, \
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
+ }, \
+ }
+
+#define IWL_DEVICE_DR \
+ IWL_DEVICE_DR_COMMON, \
+ .uhb_supported = true, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .num_rbds = IWL_NUM_RBDS_DR_EHT, \
+ .ht_params = &iwl_22000_ht_params
+
+/*
+ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
+ * A-MPDU, with additional overhead to account for processing time.
+ */
+#define IWL_NUM_RBDS_DR_EHT (512 * 16)
+
+const struct iwl_cfg_trans_params iwl_dr_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_DR,
+ .base_params = &iwl_dr_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+ .gen2 = true,
+ .integrated = true,
+ .umac_prph_offset = 0x300000,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+};
+
+const char iwl_dr_name[] = "Intel(R) TBD Dr device";
+
+const struct iwl_cfg iwl_cfg_dr = {
+ .fw_name_mac = "dr",
+ IWL_DEVICE_DR,
+};
+
+const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_DR,
+ .base_params = &iwl_dr_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+ .gen2 = true,
+ .integrated = true,
+ .umac_prph_offset = 0x300000,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+};
+
+const char iwl_br_name[] = "Intel(R) TBD Br device";
+
+const struct iwl_cfg iwl_cfg_br = {
+ .fw_name_mac = "br",
+ IWL_DEVICE_DR,
+};
+
+MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index fc5e6e44c6aa..c9eeb3f6704e 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 94
+#define IWL_SC_UCODE_API_MAX 96
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 92
@@ -38,28 +38,10 @@
#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0"
#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0"
-#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_HR_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(api) \
- IWL_SC2_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC2_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(api) \
- IWL_SC2F_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC2F_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_sc_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -181,14 +163,14 @@ const struct iwl_cfg iwl_cfg_sc2f = {
IWL_DEVICE_SC,
};
-MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+IWL_FW_AND_PNVM(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
MODULE_FIRMWARE(IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+IWL_FW_AND_PNVM(IWL_SC_A_GF_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC_A_GF4_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC2F_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC2F_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
index 931aa3f5798d..cdc05f7e75a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
@@ -676,12 +676,12 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+ CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM);
/* See if we got it */
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM,
IWL_EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_EEPROM(trans->dev,
@@ -697,7 +697,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
{
iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+ CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM);
}
static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 74d163e56511..56d19a034c24 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -1565,6 +1565,16 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static void
+iwlagn_mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+ if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART)
+ iwl_trans_finish_sw_reset(priv->trans);
+}
+
const struct ieee80211_ops iwlagn_hw_ops = {
.add_chanctx = ieee80211_emulate_add_chanctx,
.remove_chanctx = ieee80211_emulate_remove_chanctx,
@@ -1598,6 +1608,7 @@ const struct ieee80211_ops iwlagn_hw_ops = {
.tx_last_beacon = iwlagn_mac_tx_last_beacon,
.event_callback = iwlagn_mac_event_callback,
.set_tim = iwlagn_mac_set_tim,
+ .reconfig_complete = iwlagn_mac_reconfig_complete,
};
/* This function both allocates and initializes hw and priv. */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 769b75c3fa5b..30789ba06d9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1241,7 +1241,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
STATISTICS_NOTIFICATION,
REPLY_TX,
};
- int i;
+ int i, err;
/************************
* 1. Allocating HW data
@@ -1249,6 +1249,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
hw = iwl_alloc_all();
if (!hw) {
pr_err("%s: Cannot allocate network device\n", trans->name);
+ err = -ENOMEM;
goto out;
}
@@ -1299,8 +1300,10 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
break;
}
- if (WARN_ON(!priv->lib))
+ if (WARN_ON(!priv->lib)) {
+ err = -ENODEV;
goto out_free_hw;
+ }
/*
* Populate the state variables that the transport layer needs
@@ -1377,12 +1380,14 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->trans->name, priv->trans->hw_rev);
- if (iwl_trans_start_hw(priv->trans))
+ err = iwl_trans_start_hw(priv->trans);
+ if (err)
goto out_free_hw;
/* Read the EEPROM */
- if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
- &priv->eeprom_blob_size)) {
+ err = iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
+ &priv->eeprom_blob_size);
+ if (err) {
IWL_ERR(priv, "Unable to init EEPROM\n");
goto out_free_hw;
}
@@ -1393,13 +1398,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
priv->nvm_data = iwl_parse_eeprom_data(priv->trans, priv->cfg,
priv->eeprom_blob,
priv->eeprom_blob_size);
- if (!priv->nvm_data)
+ if (!priv->nvm_data) {
+ err = -ENOMEM;
goto out_free_eeprom_blob;
+ }
- if (iwl_nvm_check_version(priv->nvm_data, priv->trans))
+ err = iwl_nvm_check_version(priv->nvm_data, priv->trans);
+ if (err)
goto out_free_eeprom;
- if (iwl_eeprom_init_hw_params(priv))
+ err = iwl_eeprom_init_hw_params(priv);
+ if (err)
goto out_free_eeprom;
/* extract MAC Address */
@@ -1446,7 +1455,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
atomic_set(&priv->queue_stop_count[i], 0);
}
- if (iwl_init_drv(priv))
+ err = iwl_init_drv(priv);
+ if (err)
goto out_free_eeprom;
/* At this point both hw and priv are initialized. */
@@ -1480,7 +1490,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
*
* 7. Setup and register with mac80211 and debugfs
**************************************************/
- if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
+ err = iwlagn_mac_setup_register(priv, &fw->ucode_capa);
+ if (err)
goto out_destroy_workqueue;
iwl_dbgfs_register(priv, dbgfs_dir);
@@ -1500,8 +1511,7 @@ out_free_eeprom:
out_free_hw:
ieee80211_free_hw(priv->hw);
out:
- op_mode = NULL;
- return op_mode;
+ return ERR_PTR(err);
}
static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
@@ -1895,17 +1905,9 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
unsigned int reload_msec;
unsigned long reload_jiffies;
- if (iwl_have_debug_level(IWL_DL_FW))
- iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
-
/* uCode is no longer loaded. */
priv->ucode_loaded = false;
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
-
- iwl_abort_notification_waits(&priv->notif_wait);
-
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
clear_bit(STATUS_READY, &priv->status);
@@ -1942,27 +1944,43 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
}
}
-static void iwl_nic_error(struct iwl_op_mode *op_mode, bool sync)
+static void iwl_nic_error(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ /* Set the FW error flag -- cleared on iwl_down */
+ set_bit(STATUS_FW_ERROR, &priv->status);
+
+ iwl_abort_notification_waits(&priv->notif_wait);
+
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL && iwl_check_for_ct_kill(priv))
+ return;
+
IWL_ERR(priv, "Loaded firmware version: %s\n",
priv->fw->fw_version);
- iwl_dump_nic_error_log(priv);
- iwl_dump_nic_event_log(priv, false, NULL);
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL) {
+ IWL_ERR(priv, "Command queue full!\n");
+ } else {
+ iwl_dump_nic_error_log(priv);
+ iwl_dump_nic_event_log(priv, false, NULL);
+ }
- iwlagn_fw_error(priv, false);
+ if (iwl_have_debug_level(IWL_DL_FW))
+ iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
}
-static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
+static bool iwlagn_sw_reset(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- if (!iwl_check_for_ct_kill(priv)) {
- IWL_ERR(priv, "Restarting adapter queue is full\n");
- iwlagn_fw_error(priv, false);
- }
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL && iwl_check_for_ct_kill(priv))
+ return false;
+
+ iwlagn_fw_error(priv, false);
+ return true;
}
#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
@@ -2117,7 +2135,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
.hw_rf_kill = iwl_set_hw_rfkill_state,
.free_skb = iwl_free_skb,
.nic_error = iwl_nic_error,
- .cmd_queue_full = iwl_cmd_queue_full,
+ .sw_reset = iwlagn_sw_reset,
.nic_config = iwl_nic_config,
.wimax_active = iwl_wimax_active,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 0bc32291815e..efa7b673ebc7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -79,9 +79,9 @@ static void *iwl_acpi_get_object(struct device *dev, acpi_string method)
* method (DSM) interface. The returned acpi object must be freed by calling
* function.
*/
-static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
- union acpi_object *args,
- const guid_t *guid)
+union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev,
+ int func, union acpi_object *args,
+ const guid_t *guid)
{
union acpi_object *obj;
@@ -108,7 +108,7 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
size_t expected_size)
{
union acpi_object *obj;
- int ret = 0;
+ int ret;
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid);
if (IS_ERR(obj)) {
@@ -123,8 +123,10 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
} else if (obj->type == ACPI_TYPE_BUFFER) {
__le64 le_value = 0;
- if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
- return -EINVAL;
+ if (WARN_ON_ONCE(expected_size > sizeof(le_value))) {
+ ret = -EINVAL;
+ goto out;
+ }
/* if the buffer size doesn't match the expected size */
if (obj->buffer.length != expected_size)
@@ -145,8 +147,9 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
}
IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM method evaluated: func=%d, ret=%d\n",
- func, ret);
+ "ACPI: DSM method evaluated: func=%d, value=%lld\n",
+ func, *value);
+ ret = 0;
out:
ACPI_FREE(obj);
return ret;
@@ -259,13 +262,14 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
struct iwl_tas_data *tas_data)
{
union acpi_object *wifi_pkg, *data;
- int ret, tbl_rev, i, block_list_size, enabled;
+ int ret, tbl_rev, block_list_size, enabled;
+ u32 tas_selection;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read wtas table revision 1 or revision 0*/
+ /* try to read wtas table */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WTAS_WIFI_DATA_SIZE,
&tbl_rev);
@@ -274,27 +278,23 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
goto out_free;
}
- if (tbl_rev == 1 && wifi_pkg->package.elements[1].type ==
- ACPI_TYPE_INTEGER) {
- u32 tas_selection =
- (u32)wifi_pkg->package.elements[1].integer.value;
-
- enabled = iwl_parse_tas_selection(fwrt, tas_data,
- tas_selection);
-
- } else if (tbl_rev == 0 &&
- wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
- enabled = !!wifi_pkg->package.elements[1].integer.value;
- } else {
+ if (tbl_rev < 0 || tbl_rev > 2 ||
+ wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
- if (!enabled) {
- IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
- ret = 0;
- goto out_free;
- }
+ tas_selection = (u32)wifi_pkg->package.elements[1].integer.value;
+ enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
+ tas_selection);
+ tas_data->table_source = BIOS_SOURCE_ACPI;
+ tas_data->table_revision = tbl_rev;
+ tas_data->tas_selection = tas_selection;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS %s enabled\n",
+ enabled ? "is" : "not");
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
@@ -305,13 +305,14 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
ret = -EINVAL;
goto out_free;
}
+
block_list_size = wifi_pkg->package.elements[2].integer.value;
- tas_data->block_list_size = cpu_to_le32(block_list_size);
+ tas_data->block_list_size = block_list_size;
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
- for (i = 0; i < block_list_size; i++) {
- u32 country;
+ for (int i = 0; i < block_list_size; i++) {
+ u16 country;
if (wifi_pkg->package.elements[3 + i].type !=
ACPI_TYPE_INTEGER) {
@@ -322,11 +323,11 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- tas_data->block_list_array[i] = cpu_to_le32(country);
+ tas_data->block_list_array[i] = country;
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
- ret = 1;
+ ret = enabled;
out_free:
kfree(data);
return ret;
@@ -1023,3 +1024,37 @@ out_free:
kfree(data);
return ret;
}
+
+int iwl_acpi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret = -ENOENT;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_DSBR_METHOD);
+ if (IS_ERR(data))
+ return ret;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_DSBR_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != ACPI_DSBR_WIFI_DATA_REV) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported ACPI DSBR revision:%d\n",
+ tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
+
+ *value = wifi_pkg->package.elements[1].integer.value;
+ IWL_DEBUG_RADIO(fwrt, "Loaded DSBR config from ACPI value: 0x%x\n",
+ *value);
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index bb88398a6987..e50b93472dd2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -28,6 +28,7 @@
#define ACPI_WPFC_METHOD "WPFC"
#define ACPI_GLAI_METHOD "GLAI"
#define ACPI_WBEM_METHOD "WBEM"
+#define ACPI_DSBR_METHOD "DSBR"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -76,6 +77,13 @@
#define ACPI_WBEM_WIFI_DATA_SIZE 2
/*
* One element for domain type,
+ * and one for DSBR response data
+ */
+#define ACPI_DSBR_WIFI_DATA_SIZE 2
+#define ACPI_DSBR_WIFI_DATA_REV 1
+
+/*
+ * One element for domain type,
* and one for the status
*/
#define ACPI_GLAI_WIFI_DATA_SIZE 2
@@ -101,6 +109,30 @@
#define ACPI_DSM_REV 0
+#define DSM_INTERNAL_FUNC_GET_PLAT_INFO 1
+/* TBD: VPRO is BIT(0) in the result, but what's the result? */
+
+#define DSM_INTERNAL_FUNC_PRODUCT_RESET 2
+
+/* DSM_INTERNAL_FUNC_PRODUCT_RESET - product reset (aka "PLDR") */
+enum iwl_dsm_internal_product_reset_cmds {
+ DSM_INTERNAL_PLDR_CMD_GET_MODE = 1,
+ DSM_INTERNAL_PLDR_CMD_SET_MODE = 2,
+ DSM_INTERNAL_PLDR_CMD_GET_STATUS = 3,
+};
+
+enum iwl_dsm_internal_product_reset_mode {
+ DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET = BIT(0),
+ DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR = BIT(1),
+ DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON = BIT(2),
+};
+
+struct iwl_dsm_internal_product_reset_cmd {
+ /* cmd is from enum iwl_dsm_internal_product_reset_cmds */
+ u16 cmd;
+ u16 value;
+} __packed;
+
#define IWL_ACPI_WBEM_REV0_MASK (BIT(0) | BIT(1))
#define IWL_ACPI_WBEM_REVISION 0
@@ -110,6 +142,10 @@ struct iwl_fw_runtime;
extern const guid_t iwl_guid;
+union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev,
+ int func, union acpi_object *args,
+ const guid_t *guid);
+
/**
* iwl_acpi_get_mcc - read MCC from ACPI, if available
*
@@ -153,10 +189,14 @@ int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
enum iwl_dsm_funcs func, u32 *value);
int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
+
+int iwl_acpi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
+
#else /* CONFIG_ACPI */
-static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
- int func, union acpi_object *args)
+static inline union acpi_object *
+iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
+ union acpi_object *args, const guid_t *guid)
{
return ERR_PTR(-ENOENT);
}
@@ -221,6 +261,11 @@ static inline int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
{
return -ENOENT;
}
+
+static inline int iwl_acpi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 2f40e69db318..34a1f97653c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -447,7 +447,7 @@ enum iwl_legacy_cmds {
/**
* @BA_NOTIF:
- * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif
+ * BlockAck notification, uses &struct iwl_compressed_ba_notif
* or &struct iwl_mvm_ba_notif depending on the HW
*/
BA_NOTIF = 0xc5,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 2ab38eaeb290..570a3f722510 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -391,7 +391,7 @@ enum iwl_datapath_monitor_notif_type {
struct iwl_datapath_monitor_notif {
__le32 type;
- u8 mac_id;
+ u8 link_id;
u8 reserved[3];
} __packed; /* MONITOR_NTF_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index bea0f4668cc8..aa88e91d117e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -477,6 +477,9 @@ struct iwl_mvm_tas_status_per_mac {
* @tas_status_mac: TAS status per lmac, uses
* &struct iwl_mvm_tas_status_per_mac
* @in_dual_radio: is TAS in dual radio? - TRUE/FALSE
+ * @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags.
+ * This member is valid only when fw has
+ * %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability.
* @reserved: reserved
*/
struct iwl_mvm_tas_status_resp {
@@ -486,7 +489,8 @@ struct iwl_mvm_tas_status_resp {
__le16 block_list[16];
struct iwl_mvm_tas_status_per_mac tas_status_mac[2];
u8 in_dual_radio;
- u8 reserved[3];
+ u8 uhb_allowed_flags;
+ u8 reserved[2];
} __packed; /*DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3*/
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index b23d5fc4bbe6..37bb7002c1c9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -74,7 +74,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
ROC_NOTIF = 0xF8,
/**
- * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
+ * @SESSION_PROTECTION_NOTIF: &struct iwl_session_prot_notif
*/
SESSION_PROTECTION_NOTIF = 0xFB,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index d424d0126367..5cdc09d465d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -464,21 +464,30 @@ struct iwl_tas_config_cmd_v3 {
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
+ * enum iwl_tas_uhb_allowed_flags - per country TAS UHB allowed flags.
+ * @TAS_UHB_ALLOWED_CANADA: TAS UHB is allowed in Canada. This flag is valid
+ * only when fw has %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability.
+ */
+enum iwl_tas_uhb_allowed_flags {
+ TAS_UHB_ALLOWED_CANADA = BIT(0),
+};
+
+/**
* struct iwl_tas_config_cmd_v4 - configures the TAS
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
* @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
- * @reserved: reserved
-*/
+ * @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags.
+ */
struct iwl_tas_config_cmd_v4 {
u8 override_tas_iec;
u8 enable_tas_iec;
u8 usa_tas_uhb_allowed;
- u8 reserved;
+ u8 uhb_allowed_flags;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
-struct iwl_tas_config_cmd {
+struct iwl_tas_config_cmd_v2_v4 {
struct iwl_tas_config_cmd_common common;
union {
struct iwl_tas_config_cmd_v3 v3;
@@ -487,6 +496,46 @@ struct iwl_tas_config_cmd {
};
/**
+ * enum bios_source - source of bios data
+ * @BIOS_SOURCE_NONE: BIOS source is not defined
+ * @BIOS_SOURCE_ACPI: BIOS source is ACPI
+ * @BIOS_SOURCE_UEFI: BIOS source is UEFI
+ */
+enum bios_source {
+ BIOS_SOURCE_NONE,
+ BIOS_SOURCE_ACPI,
+ BIOS_SOURCE_UEFI,
+};
+
+/**
+ * struct bios_value_u32 - BIOS configuration.
+ * @table_source: see &enum bios_source
+ * @table_revision: table revision.
+ * @reserved: reserved
+ * @value: value in bios.
+ */
+struct bios_value_u32 {
+ u8 table_source;
+ u8 table_revision;
+ u8 reserved[2];
+ __le32 value;
+} __packed; /* BIOS_TABLE_SOURCE_U32_S_VER_1 */
+
+/**
+ * struct iwl_tas_config_cmd - configures the TAS.
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ * @reserved: reserved
+ * @tas_config_info: see @struct bios_value_u32
+ */
+struct iwl_tas_config_cmd {
+ __le16 block_list_size;
+ __le16 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
+ u8 reserved[2];
+ struct bios_value_u32 tas_config_info;
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_5 */
+
+/**
* enum iwl_lari_config_masks - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
* @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 6a7bbfd6b2b7..9b09b835560b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -31,7 +31,7 @@ enum iwl_prot_offload_subcmd_ids {
/**
* @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif_v2 or
- * &struct iwl_stored_beacon_notif_v3
+ * &struct iwl_stored_beacon_notif
*/
STORED_BEACON_NTF = 0xFF,
};
@@ -71,18 +71,18 @@ struct iwl_stored_beacon_notif_v2 {
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
/**
- * struct iwl_stored_beacon_notif_v3 - Stored beacon notification
+ * struct iwl_stored_beacon_notif - Stored beacon notification
*
* @common: fields common for all versions
* @sta_id: station for which the beacon was received
* @reserved: reserved for alignment
* @data: beacon data, length in @byte_count
*/
-struct iwl_stored_beacon_notif_v3 {
+struct iwl_stored_beacon_notif {
struct iwl_stored_beacon_notif_common common;
u8 sta_id;
u8 reserved[3];
u8 data[MAX_STORED_BEACON_SIZE];
-} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3 */
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3, _VER_4 */
#endif /* __iwl_fw_api_offload_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index df0680eae30c..37ec26596ee7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -266,7 +266,7 @@ struct iwl_reduce_tx_power_cmd {
} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
enum iwl_dev_tx_power_cmd_mode {
- IWL_TX_POWER_MODE_SET_MAC = 0,
+ IWL_TX_POWER_MODE_SET_LINK = 0,
IWL_TX_POWER_MODE_SET_DEVICE = 1,
IWL_TX_POWER_MODE_SET_CHAINS = 2,
IWL_TX_POWER_MODE_SET_ACK = 3,
@@ -283,12 +283,14 @@ enum iwl_dev_tx_power_cmd_mode {
/**
* struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd
* @set_mode: see &enum iwl_dev_tx_power_cmd_mode
- * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @link_id: id of the link ctx for which we are reducing TX power.
+ * For version 9 / 10, this is the link id. For earlier versions, it is
+ * the mac id.
* @pwr_restriction: TX power restriction in 1/8 dBms.
*/
struct iwl_dev_tx_power_common {
__le32 set_mode;
- __le32 mac_context_id;
+ __le32 link_id;
__le16 pwr_restriction;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
index 893438aadab0..cfa6532a3cdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -10,7 +10,7 @@
#include "fw/api/tx.h"
#include "fw/api/phy-ctxt.h"
-#define IWL_MVM_TDLS_STA_COUNT 4
+#define IWL_TDLS_STA_COUNT 4
/* Type of TDLS request */
enum iwl_tdls_channel_switch_type {
@@ -128,7 +128,7 @@ struct iwl_tdls_config_cmd {
u8 tdls_peer_count;
u8 tx_to_ap_tid;
__le16 tx_to_ap_ssn;
- struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
+ struct iwl_tdls_sta_info sta_info[IWL_TDLS_STA_COUNT];
__le32 pti_req_data_offset;
struct iwl_tx_cmd pti_req_tx_cmd;
@@ -155,7 +155,7 @@ struct iwl_tdls_config_sta_info_res {
*/
struct iwl_tdls_config_res {
__le32 tx_to_ap_last_seq;
- struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
+ struct iwl_tdls_config_sta_info_res sta_info[IWL_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
#endif /* __iwl_fw_api_tdls_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index f4b827b58bd3..18d030334a6a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -395,7 +395,7 @@ struct iwl_roc_notif {
} __packed; /* ROC_NOTIF_API_S_VER_1 */
/**
- * enum iwl_mvm_session_prot_conf_id - session protection's configurations
+ * enum iwl_session_prot_conf_id - session protection's configurations
* @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
* The firmware will allocate two events.
* Valid for BSS_STA and P2P_STA.
@@ -424,7 +424,7 @@ struct iwl_roc_notif {
* be taken into account.
* @SESSION_PROTECT_CONF_MAX_ID: not used
*/
-enum iwl_mvm_session_prot_conf_id {
+enum iwl_session_prot_conf_id {
SESSION_PROTECT_CONF_ASSOC,
SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
@@ -433,12 +433,12 @@ enum iwl_mvm_session_prot_conf_id {
}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
/**
- * struct iwl_mvm_session_prot_cmd - configure a session protection
+ * struct iwl_session_prot_cmd - configure a session protection
* @id_and_color: the id and color of the link (or mac, for command version 1)
* for which this session protection is sent
* @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE,
* see &enum iwl_ctxt_action
- * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+ * @conf_id: see &enum iwl_session_prot_conf_id
* @duration_tu: the duration of the whole protection in TUs.
* @repetition_count: not used
* @interval: not used
@@ -448,7 +448,7 @@ enum iwl_mvm_session_prot_conf_id {
* The firmware supports only one concurrent session protection per vif.
* Adding a new session protection will remove any currently running session.
*/
-struct iwl_mvm_session_prot_cmd {
+struct iwl_session_prot_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 hdr */
__le32 id_and_color;
__le32 action;
@@ -462,17 +462,17 @@ struct iwl_mvm_session_prot_cmd {
*/
/**
- * struct iwl_mvm_session_prot_notif - session protection started / ended
+ * struct iwl_session_prot_notif - session protection started / ended
* @mac_link_id: the mac id (or link id, for notif ver > 2) for which the
* session protection started / ended
* @status: 1 means success, 0 means failure
* @start: 1 means the session protection started, 0 means it ended
- * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+ * @conf_id: see &enum iwl_session_prot_conf_id
*
* Note that any session protection will always get two notifications: start
* and end even the firmware could not schedule it.
*/
-struct iwl_mvm_session_prot_notif {
+struct iwl_session_prot_notif {
__le32 mac_link_id;
__le32 status;
__le32 start;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index f3bf2e087a40..0a39e4b6eb62 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -191,7 +191,7 @@ enum iwl_tx_offload_assist_flags_pos {
* cleared. Combination of RATE_MCS_*
* @sta_id: index of destination station in FW station table
* @sec_ctl: security control, TX_CMD_SEC_*
- * @initial_rate_index: index into the the rate table for initial TX attempt.
+ * @initial_rate_index: index into the rate table for initial TX attempt.
* Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
* @reserved2: reserved
* @key: security key
@@ -298,8 +298,7 @@ struct iwl_tx_cmd_gen3 {
__le32 rate_n_flags;
u8 reserved[8];
struct ieee80211_hdr hdr[];
-} __packed; /* TX_CMD_API_S_VER_8,
- TX_CMD_API_S_VER_10 */
+} __packed; /* TX_CMD_API_S_VER_8, TX_CMD_API_S_VER_10 */
/*
* TX response related data
@@ -482,8 +481,8 @@ struct agg_tx_status {
#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
TX_RES_RATE_TABLE_COLOR_POS)
-#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
-#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+#define IWL_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWL_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
/**
* struct iwl_tx_resp_v3 - notifies that fw is TXing a packet
@@ -601,7 +600,8 @@ struct iwl_tx_resp {
__le16 reserved2;
struct agg_tx_status status;
} __packed; /* TX_RSP_API_S_VER_6,
- TX_RSP_API_S_VER_7 */
+ TX_RSP_API_S_VER_7,
+ TX_RSP_API_S_VER_8 */
/**
* struct iwl_mvm_ba_notif - notifies about reception of BA
@@ -638,14 +638,14 @@ struct iwl_mvm_ba_notif {
} __packed;
/**
- * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
+ * struct iwl_compressed_ba_tfd - progress of a TFD queue
* @q_num: TFD queue number
* @tfd_index: Index of first un-acked frame in the TFD queue
* @scd_queue: For debug only - the physical queue the TFD queue is bound to
* @tid: TID of the queue (0-7)
* @reserved: reserved for alignment
*/
-struct iwl_mvm_compressed_ba_tfd {
+struct iwl_compressed_ba_tfd {
__le16 q_num;
__le16 tfd_index;
u8 scd_queue;
@@ -654,12 +654,12 @@ struct iwl_mvm_compressed_ba_tfd {
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
/**
- * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue
+ * struct iwl_compressed_ba_ratid - progress of a RA TID queue
* @q_num: RA TID queue number
* @tid: TID of the queue
* @ssn: BA window current SSN
*/
-struct iwl_mvm_compressed_ba_ratid {
+struct iwl_compressed_ba_ratid {
u8 q_num;
u8 tid;
__le16 ssn;
@@ -685,7 +685,7 @@ enum iwl_mvm_ba_resp_flags {
};
/**
- * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA
+ * struct iwl_compressed_ba_notif - notifies about reception of BA
* ( BA_NOTIF = 0xc5 )
* @flags: status flag, see the &iwl_mvm_ba_resp_flags
* @sta_id: Index of recipient (BA-sending) station in fw's station table
@@ -704,12 +704,12 @@ enum iwl_mvm_ba_resp_flags {
* @tx_rate: the rate the aggregation was sent at
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
- * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
+ * @tfd: array of TFD queue status updates. See &iwl_compressed_ba_tfd
* for details. Length in @tfd_cnt.
* @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
- * &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
+ * &iwl_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
*/
-struct iwl_mvm_compressed_ba_notif {
+struct iwl_compressed_ba_notif {
__le32 flags;
u8 sta_id;
u8 reduced_txp;
@@ -726,8 +726,8 @@ struct iwl_mvm_compressed_ba_notif {
__le16 tfd_cnt;
__le16 ra_tid_cnt;
union {
- DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid);
- DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd);
+ DECLARE_FLEX_ARRAY(struct iwl_compressed_ba_ratid, ra_tid);
+ DECLARE_FLEX_ARRAY(struct iwl_compressed_ba_tfd, tfd);
};
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4,
COMPRESSED_BA_RES_API_S_VER_5 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index f4803b55adb9..87998374f459 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -287,7 +287,7 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
trans->dbg.umac_error_event_table = umac_error_event_table;
}
-static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
+static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
{
enum iwl_fw_ini_time_point tp_id;
@@ -303,7 +303,7 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT;
}
- _iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync);
+ iwl_dbg_tlv_time_point_sync(fwrt, tp_id, NULL);
}
static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 893b21fcaf87..f0c813d675f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -123,6 +123,24 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_fw_runtime *fwrt,
+ char *buf, size_t count)
+{
+ if (count == 0)
+ return 0;
+
+ if (!iwl_trans_fw_running(fwrt->trans))
+ return count;
+
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, NULL);
+
+ iwl_fw_dbg_collect(fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL);
+
+ return count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 16);
+
static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
char *buf, size_t count)
{
@@ -282,6 +300,26 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
+static ssize_t iwl_dbgfs_fw_ver_read(struct iwl_fw_runtime *fwrt,
+ size_t size, char *buf)
+{
+ char *pos = buf;
+ char *endpos = buf + size;
+
+ pos += scnprintf(pos, endpos - pos, "FW id: %s\n",
+ fwrt->fw->fw_version);
+ pos += scnprintf(pos, endpos - pos, "FW: %s\n",
+ fwrt->fw->human_readable);
+ pos += scnprintf(pos, endpos - pos, "Device: %s\n",
+ fwrt->trans->name);
+ pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
+ fwrt->dev->bus->name);
+
+ return pos - buf;
+}
+
+FWRT_DEBUGFS_READ_FILE_OPS(fw_ver, 1024);
+
struct iwl_dbgfs_fw_info_priv {
struct iwl_fw_runtime *fwrt;
};
@@ -403,5 +441,7 @@ void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(enabled_severities, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(fw_dbg_collect, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
+ FWRT_DEBUGFS_ADD_FILE(fw_ver, dbgfs_dir, 0400);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index e63b08b7d336..3af275133da0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -169,7 +169,7 @@ struct iwl_fw_error_dump_info {
* @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
* @fw_mon_base_ptr: base pointer of the data
* @fw_mon_cycle_cnt: number of wraparounds
- * @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit
+ * @fw_mon_base_high_ptr: used in AX210 devices, the base address is 64 bit
* so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold
* MSB 32 bits
* @reserved: for future use
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index ae05227b6153..9860903ecd3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -104,6 +104,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_CURRENT_PC = 68,
IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
+ IWL_UCODE_TLV_FW_NUM_LINKS = IWL_UCODE_TLV_CONST_BASE + 1,
IWL_UCODE_TLV_FW_NUM_BEACONS = IWL_UCODE_TLV_CONST_BASE + 2,
IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
@@ -384,7 +385,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* to report the CSI information with (certain) RX frames
* @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both
* initiator and responder
- * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
+ * @IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA: supports (de)activating UNII-4
+ * for US/CA/WW from BIOS
* @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames
* @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in
* reset flow
@@ -397,6 +399,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement.
* @IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS: Support monitor mode on otherwise
* passive channels
+ * @IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA: supports (de)activating 5G9
+ * for CA from BIOS.
+ * @IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT: supports %TAS_UHB_ALLOWED_CANADA
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -474,7 +479,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)93,
/* set 3 */
- IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA = (__force iwl_ucode_tlv_capa_t)96,
/*
* @IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT: supports PSC channels
@@ -497,6 +502,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117,
IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121,
IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = (__force iwl_ucode_tlv_capa_t)122,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA = (__force iwl_ucode_tlv_capa_t)123,
+ IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT = (__force iwl_ucode_tlv_capa_t)124,
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
index b7deca05a953..c2f4fc83a22c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2019 - 2021 Intel Corporation
+ * Copyright(c) 2024 Intel Corporation
*/
#include <fw/api/commands.h>
#include "img.h"
@@ -75,6 +76,7 @@ static const struct {
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "NMI_INTERRUPT_PREG", 0x88 },
{ "PNVM_MISSING", FW_SYSASSERT_PNVM_MISSING },
{ "ADVANCED_SYSASSERT", 0 },
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 96bda80632f3..f9de139561a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -51,6 +51,7 @@ struct iwl_ucode_capabilities {
u32 error_log_addr;
u32 error_log_size;
u32 num_stations;
+ u32 num_links;
u32 num_beacons;
unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index 945bc4160cc9..a7b7cae874a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -249,7 +249,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
};
int blk_idx;
- /* loop for for all paging blocks + CSS block */
+ /* loop for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys;
__le32 phy_addr;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 4d9a1f83ef8c..ea435ee94312 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -39,6 +39,7 @@ IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
IWL_BIOS_TABLE_LOADER_DATA(wbem, u32);
+IWL_BIOS_TABLE_LOADER_DATA(dsbr, u32);
static const struct dmi_system_id dmi_ppag_approved_list[] = {
@@ -100,6 +101,11 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
},
},
+ { .ident = "WIKO",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WIKO"),
+ },
+ },
{}
};
@@ -424,25 +430,31 @@ bool iwl_is_tas_approved(void)
}
IWL_EXPORT_SYMBOL(iwl_is_tas_approved);
-int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_data *tas_data,
- const u32 tas_selection)
+struct iwl_tas_selection_data
+iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev)
{
- u8 override_iec = u32_get_bits(tas_selection,
+ struct iwl_tas_selection_data tas_selection_out = {};
+ u8 override_iec = u32_get_bits(tas_selection_in,
IWL_WTAS_OVERRIDE_IEC_MSK);
- u8 enabled_iec = u32_get_bits(tas_selection, IWL_WTAS_ENABLE_IEC_MSK);
- u8 usa_tas_uhb = u32_get_bits(tas_selection, IWL_WTAS_USA_UHB_MSK);
- int enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
-
- IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
- tas_selection);
+ u8 canada_tas_uhb = u32_get_bits(tas_selection_in,
+ IWL_WTAS_CANADA_UHB_MSK);
+ u8 enabled_iec = u32_get_bits(tas_selection_in,
+ IWL_WTAS_ENABLE_IEC_MSK);
+ u8 usa_tas_uhb = u32_get_bits(tas_selection_in,
+ IWL_WTAS_USA_UHB_MSK);
+
+ if (tbl_rev > 0) {
+ tas_selection_out.usa_tas_uhb_allowed = usa_tas_uhb;
+ tas_selection_out.override_tas_iec = override_iec;
+ tas_selection_out.enable_tas_iec = enabled_iec;
+ }
- tas_data->usa_tas_uhb_allowed = usa_tas_uhb;
- tas_data->override_tas_iec = override_iec;
- tas_data->enable_tas_iec = enabled_iec;
+ if (tbl_rev > 1)
+ tas_selection_out.canada_tas_uhb_allowed = canada_tas_uhb;
- return enabled;
+ return tas_selection_out;
}
+IWL_EXPORT_SYMBOL(iwl_parse_tas_selection);
static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
@@ -552,10 +564,16 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
if (!ret) {
- if (cmd_ver < 9)
- value &= DSM_UNII4_ALLOW_BITMAP_CMD_V8;
- else
- value &= DSM_UNII4_ALLOW_BITMAP;
+ value &= DSM_UNII4_ALLOW_BITMAP;
+
+ /* Since version 9, bits 4 and 5 are supported
+ * regardless of this capability.
+ */
+ if (cmd_ver < 9 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA))
+ value &= ~(DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK |
+ DSM_VALUE_UNII4_CANADA_EN_MSK);
cmd->oem_unii4_allow_bitmap = cpu_to_le32(value);
}
@@ -564,7 +582,13 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
if (!ret) {
if (cmd_ver < 8)
value &= ~ACTIVATE_5G2_IN_WW_MASK;
- if (cmd_ver < 12)
+
+ /* Since version 12, bits 5 and 6 are supported
+ * regardless of this capability.
+ */
+ if (cmd_ver < 12 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA))
value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11;
cmd->chan_state_active_bitmap = cpu_to_le32(value);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index 81787501d4a4..b355d7bef14c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -40,11 +40,19 @@
#define IWL_PPAG_ETSI_CHINA_MASK 3
#define IWL_PPAG_REV3_MASK 0x7FF
-#define IWL_WTAS_ENABLED_MSK 0x1
-#define IWL_WTAS_OVERRIDE_IEC_MSK 0x2
-#define IWL_WTAS_ENABLE_IEC_MSK 0x4
+#define IWL_WTAS_ENABLED_MSK BIT(0)
+#define IWL_WTAS_OVERRIDE_IEC_MSK BIT(1)
+#define IWL_WTAS_ENABLE_IEC_MSK BIT(2)
+#define IWL_WTAS_CANADA_UHB_MSK BIT(15)
#define IWL_WTAS_USA_UHB_MSK BIT(16)
+struct iwl_tas_selection_data {
+ u8 override_tas_iec:1,
+ enable_tas_iec:1,
+ usa_tas_uhb_allowed:1,
+ canada_tas_uhb_allowed:1;
+};
+
#define BIOS_MCC_CHINA 0x434e
/*
@@ -97,11 +105,11 @@ struct iwl_ppag_chain {
};
struct iwl_tas_data {
- __le32 block_list_size;
- __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
- u8 override_tas_iec;
- u8 enable_tas_iec;
- u8 usa_tas_uhb_allowed;
+ u8 block_list_size;
+ u16 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
+ u8 table_source;
+ u8 table_revision;
+ u32 tas_selection;
};
/* For DSM revision 0 and 4 */
@@ -144,12 +152,11 @@ enum iwl_dsm_unii4_bitmap {
DSM_VALUE_UNII4_CANADA_EN_MSK = BIT(5),
};
-#define DSM_UNII4_ALLOW_BITMAP_CMD_V8 (DSM_VALUE_UNII4_US_OVERRIDE_MSK | \
- DSM_VALUE_UNII4_US_EN_MSK | \
- DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK | \
- DSM_VALUE_UNII4_ETSI_EN_MSK)
-#define DSM_UNII4_ALLOW_BITMAP (DSM_UNII4_ALLOW_BITMAP_CMD_V8 | \
- DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | \
+#define DSM_UNII4_ALLOW_BITMAP (DSM_VALUE_UNII4_US_OVERRIDE_MSK |\
+ DSM_VALUE_UNII4_US_EN_MSK |\
+ DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK |\
+ DSM_VALUE_UNII4_ETSI_EN_MSK |\
+ DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK |\
DSM_VALUE_UNII4_CANADA_EN_MSK)
enum iwl_dsm_values_rfi {
@@ -184,9 +191,8 @@ bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
bool iwl_is_tas_approved(void);
-int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_data *tas_data,
- const u32 tas_selection);
+struct iwl_tas_selection_data
+iwl_parse_tas_selection(const u32 tas_selection, const u8 tbl_rev);
int iwl_bios_get_wrds_table(struct iwl_fw_runtime *fwrt);
@@ -221,4 +227,27 @@ static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
}
bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc);
+
+#define IWL_DSBR_FW_MODIFIED_URM_MASK BIT(8)
+#define IWL_DSBR_PERMANENT_URM_MASK BIT(9)
+
+int iwl_bios_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
+
+static inline void iwl_bios_setup_step(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt)
+{
+ u32 dsbr;
+
+ if (!trans->trans_cfg->integrated)
+ return;
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ return;
+
+ if (iwl_bios_get_dsbr(fwrt, &dsbr))
+ dsbr = 0;
+
+ trans->dsbr_urm_fw_dependent = !!(dsbr & IWL_DSBR_FW_MODIFIED_URM_MASK);
+ trans->dsbr_urm_permanent = !!(dsbr & IWL_DSBR_PERMANENT_URM_MASK);
+}
#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 091fb6fd7c78..434eed4130b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -13,9 +13,12 @@
#include <linux/efi.h>
#include "fw/runtime.h"
-#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
- 0xb2, 0xec, 0xf5, 0xa3, \
- 0x59, 0x4f, 0x4a, 0xea)
+#define IWL_EFI_WIFI_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
+ 0xb2, 0xec, 0xf5, 0xa3, \
+ 0x59, 0x4f, 0x4a, 0xea)
+#define IWL_EFI_WIFI_BT_GUID EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, \
+ 0x8d, 0x03, 0x77, 0x2e, \
+ 0xcc, 0x3d, 0xa5, 0x31)
struct iwl_uefi_pnvm_mem_desc {
__le32 addr;
@@ -61,7 +64,7 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
*len = 0;
- data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID,
+ data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_WIFI_GUID,
&package_size);
if (IS_ERR(data)) {
IWL_DEBUG_FW(trans,
@@ -76,18 +79,18 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
return data;
}
-static
-void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
- efi_char16_t *uefi_var_name,
- char *var_name,
- unsigned int expected_size,
- unsigned long *size)
+static void *
+iwl_uefi_get_verified_variable_guid(struct iwl_trans *trans,
+ efi_guid_t *guid,
+ efi_char16_t *uefi_var_name,
+ char *var_name,
+ unsigned int expected_size,
+ unsigned long *size)
{
void *var;
unsigned long var_size;
- var = iwl_uefi_get_variable(uefi_var_name, &IWL_EFI_VAR_GUID,
- &var_size);
+ var = iwl_uefi_get_variable(uefi_var_name, guid, &var_size);
if (IS_ERR(var)) {
IWL_DEBUG_RADIO(trans,
@@ -112,6 +115,18 @@ void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
return var;
}
+static void *
+iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+ efi_char16_t *uefi_var_name,
+ char *var_name,
+ unsigned int expected_size,
+ unsigned long *size)
+{
+ return iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_GUID,
+ uefi_var_name, var_name,
+ expected_size, size);
+}
+
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data)
{
@@ -311,8 +326,9 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
- data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_STEP_NAME,
- "STEP", sizeof(*data), NULL);
+ data = iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_BT_GUID,
+ IWL_UEFI_STEP_NAME,
+ "STEP", sizeof(*data), NULL);
if (IS_ERR(data))
return;
@@ -554,27 +570,31 @@ int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
struct iwl_tas_data *tas_data)
{
struct uefi_cnv_var_wtas *uefi_tas;
- int ret = 0, enabled, i;
+ int ret, enabled;
uefi_tas = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WTAS_NAME,
"WTAS", sizeof(*uefi_tas), NULL);
if (IS_ERR(uefi_tas))
return -EINVAL;
- if (uefi_tas->revision != IWL_UEFI_WTAS_REVISION) {
+ if (uefi_tas->revision < IWL_UEFI_MIN_WTAS_REVISION ||
+ uefi_tas->revision > IWL_UEFI_MAX_WTAS_REVISION) {
ret = -EINVAL;
IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WTAS revision:%d\n",
uefi_tas->revision);
goto out;
}
- enabled = iwl_parse_tas_selection(fwrt, tas_data,
- uefi_tas->tas_selection);
- if (!enabled) {
- IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
- ret = 0;
- goto out;
- }
+ IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
+ uefi_tas->tas_selection);
+
+ enabled = uefi_tas->tas_selection & IWL_WTAS_ENABLED_MSK;
+ tas_data->table_source = BIOS_SOURCE_UEFI;
+ tas_data->table_revision = uefi_tas->revision;
+ tas_data->tas_selection = uefi_tas->tas_selection;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS %s enabled\n",
+ enabled ? "is" : "not");
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n",
uefi_tas->revision);
@@ -584,15 +604,16 @@ int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
ret = -EINVAL;
goto out;
}
- tas_data->block_list_size = cpu_to_le32(uefi_tas->black_list_size);
+
+ tas_data->block_list_size = uefi_tas->black_list_size;
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", uefi_tas->black_list_size);
- for (i = 0; i < uefi_tas->black_list_size; i++) {
- tas_data->block_list_array[i] =
- cpu_to_le32(uefi_tas->black_list[i]);
+ for (u8 i = 0; i < uefi_tas->black_list_size; i++) {
+ tas_data->block_list_array[i] = uefi_tas->black_list[i];
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n",
uefi_tas->black_list[i]);
}
+ ret = enabled;
out:
kfree(uefi_tas);
return ret;
@@ -758,3 +779,29 @@ int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
return puncturing;
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_puncturing);
+
+int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ struct uefi_cnv_wlan_dsbr_data *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable_guid(fwrt->trans,
+ &IWL_EFI_WIFI_BT_GUID,
+ IWL_UEFI_DSBR_NAME, "DSBR",
+ sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_DSBR_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI DSBR revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *value = data->config;
+ IWL_DEBUG_RADIO(fwrt, "Loaded DSBR config from UEFI value: 0x%x\n",
+ *value);
+out:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index e525d449e656..0c8943a8bd01 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -23,6 +23,7 @@
#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
+#define IWL_UEFI_DSBR_NAME L"UefiCnvCommonDSBR"
#define IWL_SGOM_MAP_SIZE 339
@@ -33,13 +34,15 @@
#define IWL_UEFI_WGDS_REVISION 3
#define IWL_UEFI_MIN_PPAG_REV 1
#define IWL_UEFI_MAX_PPAG_REV 3
-#define IWL_UEFI_WTAS_REVISION 1
+#define IWL_UEFI_MIN_WTAS_REVISION 1
+#define IWL_UEFI_MAX_WTAS_REVISION 2
#define IWL_UEFI_SPLC_REVISION 0
#define IWL_UEFI_WRDD_REVISION 0
#define IWL_UEFI_ECKV_REVISION 0
#define IWL_UEFI_WBEM_REVISION 0
#define IWL_UEFI_DSM_REVISION 4
#define IWL_UEFI_PUNCTURING_REVISION 0
+#define IWL_UEFI_DSBR_REVISION 1
struct pnvm_sku_package {
u8 rev;
@@ -213,6 +216,20 @@ struct uefi_cnv_var_puncturing_data {
u32 puncturing;
} __packed;
+/**
+ * struct uefi_cnv_wlan_dsbr_data - BIOS STEP configuration information
+ * @revision: the revision of the table
+ * @config: STEP configuration flags:
+ * bit 8, switch to URM depending on FW setting
+ * bit 9, switch to URM
+ *
+ * Platform information for STEP configuration/workarounds.
+ */
+struct uefi_cnv_wlan_dsbr_data {
+ u8 revision;
+ u32 config;
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -244,6 +261,7 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwr
int iwl_uefi_get_uats_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -346,5 +364,11 @@ int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
{
return 0;
}
+
+static inline
+int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 17721bb47e25..2b6a80142aba 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -38,6 +38,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_AX210,
IWL_DEVICE_FAMILY_BZ,
IWL_DEVICE_FAMILY_SC,
+ IWL_DEVICE_FAMILY_DR,
};
/*
@@ -102,6 +103,10 @@ enum iwl_nvm_type {
#define ANT_ABC (ANT_A | ANT_B | ANT_C)
+#define IWL_FW_AND_PNVM(pfx, api) \
+ MODULE_FIRMWARE(pfx "-" __stringify(api) ".ucode"); \
+ MODULE_FIRMWARE(pfx ".pnvm")
+
static inline u8 num_of_ant(u8 mask)
{
return !!((mask) & ANT_A) +
@@ -424,6 +429,8 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_SC2 0x49
#define IWL_CFG_MAC_TYPE_SC2F 0x4A
#define IWL_CFG_MAC_TYPE_BZ_W 0x4B
+#define IWL_CFG_MAC_TYPE_BR 0x4C
+#define IWL_CFG_MAC_TYPE_DR 0x4D
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -434,6 +441,7 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_GF 0x10D
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_TYPE_WH 0x113
+#define IWL_CFG_RF_TYPE_PE 0x114
#define IWL_CFG_RF_ID_TH 0x1
#define IWL_CFG_RF_ID_TH1 0x1
@@ -506,6 +514,8 @@ extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_dr_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_br_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
extern const char iwl9260_1_name[];
@@ -551,6 +561,8 @@ extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
extern const char iwl_sc2_name[];
extern const char iwl_sc2f_name[];
+extern const char iwl_dr_name[];
+extern const char iwl_br_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -649,7 +661,6 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
extern const struct iwl_cfg iwl_cfg_ma;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
-extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz;
@@ -658,6 +669,8 @@ extern const struct iwl_cfg iwl_cfg_gl;
extern const struct iwl_cfg iwl_cfg_sc;
extern const struct iwl_cfg iwl_cfg_sc2;
extern const struct iwl_cfg iwl_cfg_sc2f;
+extern const struct iwl_cfg iwl_cfg_dr;
+extern const struct iwl_cfg iwl_cfg_br;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 5b62933134cf..cd25a1b9f2ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -76,7 +76,17 @@ enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29),
};
-/*
+/**
+ * enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags
+ * @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting
+ * @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode
+ */
+enum iwl_prph_scratch_ext_flags {
+ IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
+ IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
+};
+
+/**
* struct iwl_prph_scratch_version - version structure
* @mac_id: SKU and revision id
* @version: prph scratch information version id
@@ -90,17 +100,18 @@ struct iwl_prph_scratch_version {
__le16 reserved;
} __packed; /* PERIPH_SCRATCH_VERSION_S */
-/*
+/**
* struct iwl_prph_scratch_control - control structure
* @control_flags: context information flags see &enum iwl_prph_scratch_flags
- * @reserved: reserved
+ * @control_flags_ext: context information for extended flags,
+ * see &enum iwl_prph_scratch_ext_flags
*/
struct iwl_prph_scratch_control {
__le32 control_flags;
- __le32 reserved;
+ __le32 control_flags_ext;
} __packed; /* PERIPH_SCRATCH_CONTROL_S */
-/*
+/**
* struct iwl_prph_scratch_pnvm_cfg - PNVM scratch
* @pnvm_base_addr: PNVM start address
* @pnvm_size: the size of the PNVM image in bytes
@@ -120,7 +131,8 @@ struct iwl_prph_scratch_pnvm_cfg {
struct iwl_prph_scrath_mem_desc_addr_array {
__le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX];
} __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */
-/*
+
+/**
* struct iwl_prph_scratch_hwm_cfg - hwm config
* @hwm_base_addr: hwm start address
* @hwm_size: hwm size in DWs
@@ -132,7 +144,7 @@ struct iwl_prph_scratch_hwm_cfg {
__le32 debug_token_config;
} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
-/*
+/**
* struct iwl_prph_scratch_rbd_cfg - RBDs configuration
* @free_rbd_addr: default queue free RB CB base address
* @reserved: reserved
@@ -142,10 +154,11 @@ struct iwl_prph_scratch_rbd_cfg {
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
-/*
+/**
* struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
* @base_addr: reduce power table address
* @size: the size of the entire power table image
+ * @reserved: (reserved)
*/
struct iwl_prph_scratch_uefi_cfg {
__le64 base_addr;
@@ -153,7 +166,7 @@ struct iwl_prph_scratch_uefi_cfg {
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
-/*
+/**
* struct iwl_prph_scratch_step_cfg - prph scratch step configuration
* @mbx_addr_0: [0:7] revision,
* [8:15] cnvi_to_cnvr length,
@@ -167,13 +180,14 @@ struct iwl_prph_scratch_step_cfg {
__le32 mbx_addr_1;
} __packed;
-/*
+/**
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
* @version: version information of context info and HW
* @control: control flags of FH configurations
* @pnvm_cfg: ror configuration
* @hwm_cfg: hwm configuration
* @rbd_cfg: default RX queue configuration
+ * @reduce_power_cfg: UEFI power reduction table
* @step_cfg: step configuration
*/
struct iwl_prph_scratch_ctrl_cfg {
@@ -186,7 +200,7 @@ struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_step_cfg step_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
-/*
+/**
* struct iwl_prph_scratch - peripheral scratch mapping
* @ctrl_cfg: control and configuration of prph scratch
* @dram: firmware images addresses in DRAM
@@ -202,7 +216,7 @@ struct iwl_prph_scratch {
struct iwl_context_info_dram dram;
} __packed; /* PERIPH_SCRATCH_S */
-/*
+/**
* struct iwl_prph_info - peripheral information
* @boot_stage_mirror: reflects the value in the Boot Stage CSR register
* @ipc_status_mirror: reflects the value in the IPC Status CSR register
@@ -216,7 +230,7 @@ struct iwl_prph_info {
__le32 reserved;
} __packed; /* PERIPH_INFO_S */
-/*
+/**
* struct iwl_context_info_gen3 - device INIT configuration
* @version: version of the context information
* @size: size of context information in DWs
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index 1a1321db137c..dfd44fabf237 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2022 Intel Corporation
+ * Copyright (C) 2018-2020, 2022, 2024 Intel Corporation
*/
#ifndef __iwl_context_info_file_h__
#define __iwl_context_info_file_h__
-/* maximmum number of DRAM map entries supported by FW */
+/* maximum number of DRAM map entries supported by FW */
#define IWL_MAX_DRAM_ENTRY 64
#define CSR_CTXT_INFO_BA 0x40
@@ -53,11 +53,12 @@ enum iwl_context_info_flags {
IWL_CTXT_INFO_RB_SIZE_32K = 0xe,
};
-/*
+/**
* struct iwl_context_info_version - version structure
* @mac_id: SKU and revision id
* @version: context information version id
* @size: the size of the context information in DWs
+ * @reserved: (reserved)
*/
struct iwl_context_info_version {
__le16 mac_id;
@@ -66,16 +67,17 @@ struct iwl_context_info_version {
__le16 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info_control - version structure
* @control_flags: context information flags see &enum iwl_context_info_flags
+ * @reserved: (reserved)
*/
struct iwl_context_info_control {
__le32 control_flags;
__le32 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info_dram - images DRAM map
* each entry in the map represents a DRAM chunk of up to 32 KB
* @umac_img: UMAC image DRAM map
@@ -88,7 +90,7 @@ struct iwl_context_info_dram {
__le64 virtual_img[IWL_MAX_DRAM_ENTRY];
} __packed;
-/*
+/**
* struct iwl_context_info_rbd_cfg - RBDs configuration
* @free_rbd_addr: default queue free RB CB base address
* @used_rbd_addr: default queue used RB CB base address
@@ -100,10 +102,11 @@ struct iwl_context_info_rbd_cfg {
__le64 status_wr_ptr;
} __packed;
-/*
+/**
* struct iwl_context_info_hcmd_cfg - command queue configuration
* @cmd_queue_addr: address of command queue
* @cmd_queue_size: number of entries
+ * @reserved: (reserved)
*/
struct iwl_context_info_hcmd_cfg {
__le64 cmd_queue_addr;
@@ -111,10 +114,11 @@ struct iwl_context_info_hcmd_cfg {
u8 reserved[7];
} __packed;
-/*
+/**
* struct iwl_context_info_dump_cfg - Core Dump configuration
* @core_dump_addr: core dump (debug DRAM address) start address
* @core_dump_size: size, in DWs
+ * @reserved: (reserved)
*/
struct iwl_context_info_dump_cfg {
__le64 core_dump_addr;
@@ -122,10 +126,11 @@ struct iwl_context_info_dump_cfg {
__le32 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info_pnvm_cfg - platform NVM data configuration
* @platform_nvm_addr: Platform NVM data start address
* @platform_nvm_size: size in DWs
+ * @reserved: (reserved)
*/
struct iwl_context_info_pnvm_cfg {
__le64 platform_nvm_addr;
@@ -133,11 +138,12 @@ struct iwl_context_info_pnvm_cfg {
__le32 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info_early_dbg_cfg - early debug configuration for
* dumping DRAM addresses
* @early_debug_addr: early debug start address
* @early_debug_size: size in DWs
+ * @reserved: (reserved)
*/
struct iwl_context_info_early_dbg_cfg {
__le64 early_debug_addr;
@@ -145,16 +151,20 @@ struct iwl_context_info_early_dbg_cfg {
__le32 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info - device INIT configuration
* @version: version information of context info and HW
* @control: control flags of FH configurations
+ * @reserved0: (reserved)
* @rbd_cfg: default RX queue configuration
* @hcmd_cfg: command queue configuration
+ * @reserved1: (reserved)
* @dump_cfg: core dump data
* @edbg_cfg: early debug configuration
* @pnvm_cfg: platform nvm configuration
+ * @reserved2: (reserved)
* @dram: firmware image addresses in DRAM
+ * @reserved3: (reserved)
*/
struct iwl_context_info {
struct iwl_context_info_version version;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 98563757ce2c..be9e464c9b7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -167,13 +167,15 @@
#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
-#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
-#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
-#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
-#define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000)
-#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
+#define CSR_HW_IF_CONFIG_REG_HAP_WAKE 0x00080000
+/* NOTE: EEPROM_OWN_SEM is no longer defined for new HW */
+#define CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM 0x00200000
+#define CSR_HW_IF_CONFIG_REG_PCI_OWN_SET 0x00400000
+#define CSR_HW_IF_CONFIG_REG_IAMT_UP 0x01000000
+#define CSR_HW_IF_CONFIG_REG_ME_OWN 0x02000000
+#define CSR_HW_IF_CONFIG_REG_WAKE_ME 0x08000000
+#define CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN 0x10000000
+#define CSR_HW_IF_CONFIG_REG_PERSISTENCE 0x40000000
#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5)
@@ -351,7 +353,6 @@ enum {
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
-#define CSR_HW_RF_ID_TYPE_MS (0x00111000)
#define CSR_HW_RF_ID_TYPE_FM (0x00112000)
#define CSR_HW_RF_ID_TYPE_WP (0x00113000)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index 1b9f16a31b54..bf52c2edaad1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2021 Intel Corporation
+ * Copyright(c) 2018 - 2021, 2024 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project.
*****************************************************************************/
@@ -209,6 +209,7 @@ do { \
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_DEV_RADIO(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
+#define IWL_DEBUG_DEV_POWER(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
#define IWL_DEBUG_WOWLAN(p, f, a...) IWL_DEBUG(p, IWL_DL_WOWLAN, f, ## a)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c620911a1193..d3a65f33097c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -22,6 +22,7 @@
#include "iwl-modparams.h"
#include "fw/api/alive.h"
#include "fw/api/mac.h"
+#include "fw/api/mac-cfg.h"
/******************************************************************************
*
@@ -137,8 +138,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
memset(&drv->fw, 0, sizeof(drv->fw));
}
-static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
- struct fw_sec *sec)
+static int iwl_alloc_fw_desc(struct fw_desc *desc, struct fw_sec *sec)
{
void *data;
@@ -318,17 +318,6 @@ struct iwl_firmware_pieces {
size_t n_mem_tlv;
};
-/*
- * These functions are just to extract uCode section data from the pieces
- * structure.
- */
-static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
- enum iwl_ucode_type type,
- int sec)
-{
- return &pieces->img[type].sec[sec];
-}
-
static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
enum iwl_ucode_type type,
int sec)
@@ -389,22 +378,18 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
/*
* Gets uCode section from tlv.
*/
-static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
- const void *data, enum iwl_ucode_type type,
- int size)
+static int iwl_store_ucode_sec(struct fw_img_parsing *img,
+ const void *data, int size)
{
- struct fw_img_parsing *img;
struct fw_sec *sec;
const struct fw_sec_parsing *sec_parse;
size_t alloc_size;
- if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
- return -1;
+ if (WARN_ON(!img || !data))
+ return -EINVAL;
sec_parse = (const struct fw_sec_parsing *)data;
- img = &pieces->img[type];
-
alloc_size = sizeof(*img->sec) * (img->sec_counter + 1);
sec = krealloc(img->sec, alloc_size, GFP_KERNEL);
if (!sec)
@@ -900,18 +885,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_SEC_RT:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_REGULAR],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SEC_INIT:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_INIT],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SEC_WOWLAN:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_WOWLAN],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_DEF_CALIB:
@@ -932,18 +917,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
FW_PHY_CFG_RX_CHAIN_POS;
break;
case IWL_UCODE_TLV_SECURE_SEC_RT:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_REGULAR],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SECURE_SEC_INIT:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_INIT],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_WOWLAN],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_NUM_OF_CPU:
@@ -1110,9 +1095,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
*usniffer_images = true;
- iwl_store_ucode_sec(pieces, tlv_data,
- IWL_UCODE_REGULAR_USNIFFER,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_REGULAR_USNIFFER],
+ tlv_data, tlv_len);
break;
case IWL_UCODE_TLV_PAGING:
if (tlv_len != sizeof(u32))
@@ -1214,6 +1198,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->num_stations =
le32_to_cpup((const __le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_FW_NUM_LINKS:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ if (le32_to_cpup((const __le32 *)tlv_data) >
+ IWL_FW_MAX_LINK_ID + 1) {
+ IWL_ERR(drv,
+ "%d is an invalid number of links\n",
+ le32_to_cpup((const __le32 *)tlv_data));
+ goto tlv_error;
+ }
+ capa->num_links =
+ le32_to_cpup((const __le32 *)tlv_data);
+ break;
case IWL_UCODE_TLV_FW_NUM_BEACONS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
@@ -1337,26 +1334,31 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
-static int iwl_alloc_ucode(struct iwl_drv *drv,
- struct iwl_firmware_pieces *pieces,
- enum iwl_ucode_type type)
+static int iwl_alloc_ucode_mem(struct fw_img *out, struct fw_img_parsing *img)
{
- int i;
struct fw_desc *sec;
- sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL);
+ sec = kcalloc(img->sec_counter, sizeof(*sec), GFP_KERNEL);
if (!sec)
return -ENOMEM;
- drv->fw.img[type].sec = sec;
- drv->fw.img[type].num_sec = pieces->img[type].sec_counter;
- for (i = 0; i < pieces->img[type].sec_counter; i++)
- if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i)))
+ out->sec = sec;
+ out->num_sec = img->sec_counter;
+
+ for (int i = 0; i < out->num_sec; i++)
+ if (iwl_alloc_fw_desc(&sec[i], &img->sec[i]))
return -ENOMEM;
return 0;
}
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+ struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type)
+{
+ return iwl_alloc_ucode_mem(&drv->fw.img[type], &pieces->img[type]);
+}
+
static int validate_sec_sizes(struct iwl_drv *drv,
struct iwl_firmware_pieces *pieces,
const struct iwl_cfg *cfg)
@@ -1429,18 +1431,21 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
op_mode = ops->start(drv->trans, drv->trans->cfg,
&drv->fw, dbgfs_dir);
- if (op_mode)
+ if (!IS_ERR(op_mode))
return op_mode;
if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status))
break;
- IWL_ERR(drv, "retry init count %d\n", retry);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(drv->dbgfs_op_mode);
drv->dbgfs_op_mode = NULL;
#endif
+
+ if (PTR_ERR(op_mode) != -ETIMEDOUT)
+ break;
+
+ IWL_ERR(drv, "retry init count %d\n", retry);
}
return NULL;
@@ -1944,6 +1949,7 @@ module_init(iwl_drv_init);
static void __exit iwl_drv_exit(void)
{
iwl_pci_unregister_driver();
+ iwl_trans_free_restart_list();
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(iwl_dbgfs_root);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 060becfd64f3..0653ca8b974a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -526,5 +526,5 @@ void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
if (interrupts_enabled)
iwl_trans_interrupts(trans, true);
- iwl_trans_fw_error(trans, false);
+ iwl_trans_fw_error(trans, IWL_ERR_TYPE_NMI_FORCED);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index d902121da009..9f7e013252fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -141,8 +141,10 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
/**
* enum iwl_nvm_channel_flags - channel flags in NVM
* @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
- * @NVM_CHANNEL_IBSS: usable as an IBSS channel
- * @NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
* @NVM_CHANNEL_RADAR: radar detection required
* @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
* @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 8ef5ed2db051..34eca1a568ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -45,6 +45,55 @@ struct iwl_cfg;
*/
/**
+ * enum iwl_fw_error_type - FW error types/sources
+ * @IWL_ERR_TYPE_IRQ: "normal" FW error through an IRQ
+ * @IWL_ERR_TYPE_NMI_FORCED: NMI was forced by driver
+ * @IWL_ERR_TYPE_RESET_HS_TIMEOUT: reset handshake timed out,
+ * any debug collection must happen synchronously as
+ * the device will be shut down
+ * @IWL_ERR_TYPE_CMD_QUEUE_FULL: command queue was full
+ */
+enum iwl_fw_error_type {
+ IWL_ERR_TYPE_IRQ,
+ IWL_ERR_TYPE_NMI_FORCED,
+ IWL_ERR_TYPE_RESET_HS_TIMEOUT,
+ IWL_ERR_TYPE_CMD_QUEUE_FULL,
+};
+
+/**
+ * enum iwl_fw_error_context - error dump context
+ * @IWL_ERR_CONTEXT_WORKER: regular from worker context,
+ * opmode must acquire locks and must also check
+ * for @IWL_ERR_CONTEXT_ABORT after acquiring locks
+ * @IWL_ERR_CONTEXT_FROM_OPMODE: context is in a call
+ * originating from the opmode, e.g. while resetting
+ * or stopping the device, so opmode must not acquire
+ * any locks
+ * @IWL_ERR_CONTEXT_ABORT: after lock acquisition, indicates
+ * that the dump already happened via another callback
+ * (currently only while stopping the device) via the
+ * @IWL_ERR_CONTEXT_FROM_OPMODE context, and this call
+ * must be aborted
+ */
+enum iwl_fw_error_context {
+ IWL_ERR_CONTEXT_WORKER,
+ IWL_ERR_CONTEXT_FROM_OPMODE,
+ IWL_ERR_CONTEXT_ABORT,
+};
+
+/**
+ * struct iwl_fw_error_dump_mode - error dump mode for callback
+ * @type: The reason for the dump, per &enum iwl_fw_error_type.
+ * @context: The context for the dump, may also indicate this
+ * call needs to be skipped. This MUST be checked before
+ * and after acquiring any locks in the op-mode!
+ */
+struct iwl_fw_error_dump_mode {
+ enum iwl_fw_error_type type;
+ enum iwl_fw_error_context context;
+};
+
+/**
* struct iwl_op_mode_ops - op_mode specific operations
*
* The op_mode exports its ops so that external components can start it and
@@ -77,10 +126,11 @@ struct iwl_cfg;
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
* Must be atomic
- * @nic_error: error notification. Must be atomic and must be called with BH
- * disabled, unless the sync parameter is true.
- * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
- * called with BH disabled.
+ * @nic_error: error notification. Must be atomic, the op mode should handle
+ * the error (e.g. abort notification waiters) and print the error if
+ * applicable
+ * @dump_error: NIC error dump collection (can sleep, synchronous)
+ * @sw_reset: (maybe) initiate a software reset, return %true if started
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
@@ -104,8 +154,12 @@ struct iwl_op_mode_ops {
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
- void (*nic_error)(struct iwl_op_mode *op_mode, bool sync);
- void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
+ void (*nic_error)(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type);
+ void (*dump_error)(struct iwl_op_mode *op_mode,
+ struct iwl_fw_error_dump_mode *mode);
+ bool (*sw_reset)(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
void (*time_point)(struct iwl_op_mode *op_mode,
@@ -177,14 +231,19 @@ static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
op_mode->ops->free_skb(op_mode, skb);
}
-static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync)
+static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
{
- op_mode->ops->nic_error(op_mode, sync);
+ op_mode->ops->nic_error(op_mode, type);
}
-static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
+static inline void iwl_op_mode_dump_error(struct iwl_op_mode *op_mode,
+ struct iwl_fw_error_dump_mode *mode)
{
- op_mode->ops->cmd_queue_full(op_mode);
+ might_sleep();
+
+ if (op_mode->ops->dump_error)
+ op_mode->ops->dump_error(op_mode, mode);
}
static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index dc171c29eb7b..23b2009fbb28 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -381,6 +381,10 @@ enum {
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
+#define CNVI_SCU_REG_FOR_ECO_1 0xA26EF8
+#define CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN BIT(4)
+#define CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT BIT(5)
+
#define CNVI_PMU_STEP_FLOW 0xA2D588
#define CNVI_PMU_STEP_FLOW_FORCE_URM BIT(2)
@@ -458,6 +462,7 @@ enum {
#define REG_CRF_ID_TYPE_GF 0x410
#define REG_CRF_ID_TYPE_FM 0x910
#define REG_CRF_ID_TYPE_WHP 0xA10
+#define REG_CRF_ID_TYPE_PE 0xA30
#define HPM_DEBUG 0xA03440
#define PERSISTENCE_BIT BIT(12)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 3c9d91496c82..49c8507d1a6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -6,6 +6,7 @@
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
+#include <linux/list.h>
#include "fw/api/tx.h"
#include "iwl-trans.h"
@@ -16,13 +17,200 @@
#include "pcie/internal.h"
#include "iwl-context-info-gen3.h"
+struct iwl_trans_dev_restart_data {
+ struct list_head list;
+ unsigned int restart_count;
+ time64_t last_error;
+ char name[];
+};
+
+static LIST_HEAD(restart_data_list);
+static DEFINE_SPINLOCK(restart_data_lock);
+
+static struct iwl_trans_dev_restart_data *
+iwl_trans_get_restart_data(struct device *dev)
+{
+ struct iwl_trans_dev_restart_data *tmp, *data = NULL;
+ const char *name = dev_name(dev);
+
+ spin_lock(&restart_data_lock);
+ list_for_each_entry(tmp, &restart_data_list, list) {
+ if (strcmp(tmp->name, name))
+ continue;
+ data = tmp;
+ break;
+ }
+ spin_unlock(&restart_data_lock);
+
+ if (data)
+ return data;
+
+ data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC);
+ if (!data)
+ return NULL;
+
+ strcpy(data->name, name);
+ spin_lock(&restart_data_lock);
+ list_add_tail(&data->list, &restart_data_list);
+ spin_unlock(&restart_data_lock);
+
+ return data;
+}
+
+static void iwl_trans_inc_restart_count(struct device *dev)
+{
+ struct iwl_trans_dev_restart_data *data;
+
+ data = iwl_trans_get_restart_data(dev);
+ if (data) {
+ data->last_error = ktime_get_boottime_seconds();
+ data->restart_count++;
+ }
+}
+
+void iwl_trans_free_restart_list(void)
+{
+ struct iwl_trans_dev_restart_data *tmp;
+
+ while ((tmp = list_first_entry_or_null(&restart_data_list,
+ typeof(*tmp), list))) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+struct iwl_trans_reprobe {
+ struct device *dev;
+ struct work_struct work;
+};
+
+static void iwl_trans_reprobe_wk(struct work_struct *wk)
+{
+ struct iwl_trans_reprobe *reprobe;
+
+ reprobe = container_of(wk, typeof(*reprobe), work);
+
+ if (device_reprobe(reprobe->dev))
+ dev_err(reprobe->dev, "reprobe failed!\n");
+ put_device(reprobe->dev);
+ kfree(reprobe);
+ module_put(THIS_MODULE);
+}
+
+#define IWL_TRANS_RESET_OK_TIME 180 /* seconds */
+
+static enum iwl_reset_mode
+iwl_trans_determine_restart_mode(struct iwl_trans *trans)
+{
+ struct iwl_trans_dev_restart_data *data;
+ enum iwl_reset_mode at_least = 0;
+ unsigned int index;
+ static const enum iwl_reset_mode escalation_list[] = {
+ IWL_RESET_MODE_SW_RESET,
+ IWL_RESET_MODE_REPROBE,
+ IWL_RESET_MODE_REPROBE,
+ IWL_RESET_MODE_FUNC_RESET,
+ /* FIXME: add TOP reset */
+ IWL_RESET_MODE_PROD_RESET,
+ /* FIXME: add TOP reset */
+ IWL_RESET_MODE_PROD_RESET,
+ /* FIXME: add TOP reset */
+ IWL_RESET_MODE_PROD_RESET,
+ };
+
+ if (trans->restart.during_reset)
+ at_least = IWL_RESET_MODE_REPROBE;
+
+ data = iwl_trans_get_restart_data(trans->dev);
+ if (!data)
+ return at_least;
+
+ if (ktime_get_boottime_seconds() - data->last_error >=
+ IWL_TRANS_RESET_OK_TIME)
+ data->restart_count = 0;
+
+ index = data->restart_count;
+ if (index >= ARRAY_SIZE(escalation_list))
+ index = ARRAY_SIZE(escalation_list) - 1;
+
+ return max(at_least, escalation_list[index]);
+}
+
+#define IWL_TRANS_RESET_DELAY (HZ * 60)
+
+static void iwl_trans_restart_wk(struct work_struct *wk)
+{
+ struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk);
+ struct iwl_trans_reprobe *reprobe;
+ enum iwl_reset_mode mode;
+
+ if (!trans->op_mode)
+ return;
+
+ /* might have been scheduled before marked as dead, re-check */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return;
+
+ iwl_op_mode_dump_error(trans->op_mode, &trans->restart.mode);
+
+ /*
+ * If the opmode stopped the device while we were trying to dump and
+ * reset, then we'll have done the dump already (synchronized by the
+ * opmode lock that it will acquire in iwl_op_mode_dump_error()) and
+ * managed that via trans->restart.mode.
+ * Additionally, make sure that in such a case we won't attempt to do
+ * any resets now, since it's no longer requested.
+ */
+ if (!test_and_clear_bit(STATUS_RESET_PENDING, &trans->status))
+ return;
+
+ if (!iwlwifi_mod_params.fw_restart)
+ return;
+
+ mode = iwl_trans_determine_restart_mode(trans);
+
+ iwl_trans_inc_restart_count(trans->dev);
+
+ switch (mode) {
+ case IWL_RESET_MODE_SW_RESET:
+ IWL_ERR(trans, "Device error - SW reset\n");
+ iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type);
+ break;
+ case IWL_RESET_MODE_REPROBE:
+ IWL_ERR(trans, "Device error - reprobe!\n");
+
+ /*
+ * get a module reference to avoid doing this while unloading
+ * anyway and to avoid scheduling a work with code that's
+ * being removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(trans, "Module is being unloaded - abort\n");
+ return;
+ }
+
+ reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
+ if (!reprobe) {
+ module_put(THIS_MODULE);
+ return;
+ }
+ reprobe->dev = get_device(trans->dev);
+ INIT_WORK(&reprobe->work, iwl_trans_reprobe_wk);
+ schedule_work(&reprobe->work);
+ break;
+ default:
+ iwl_trans_pcie_reset(trans, mode);
+ break;
+ }
+}
+
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
- static struct lock_class_key __key;
+ static struct lock_class_key __sync_cmd_key;
#endif
trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL);
@@ -33,12 +221,14 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
#ifdef CONFIG_LOCKDEP
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
- &__key, 0);
+ &__sync_cmd_key, 0);
#endif
trans->dev = dev;
trans->num_rx_queues = 1;
+ INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk);
+
return trans;
}
@@ -81,6 +271,7 @@ int iwl_trans_init(struct iwl_trans *trans)
void iwl_trans_free(struct iwl_trans *trans)
{
+ cancel_work_sync(&trans->restart.wk);
kmem_cache_destroy(trans->dev_cmd_pool);
}
@@ -391,6 +582,34 @@ void iwl_trans_stop_device(struct iwl_trans *trans)
{
might_sleep();
+ /*
+ * See also the comment in iwl_trans_restart_wk().
+ *
+ * When the opmode stops the device while a reset is pending, the
+ * worker (iwl_trans_restart_wk) might not have run yet or, more
+ * likely, will be blocked on the opmode lock. Due to the locking,
+ * we can't just flush the worker.
+ *
+ * If this is the case, then the test_and_clear_bit() ensures that
+ * the worker won't attempt to do anything after the stop.
+ *
+ * The trans->restart.mode is a handshake with the opmode, we set
+ * the context there to ABORT so that when the worker can finally
+ * acquire the lock in the opmode, the code there won't attempt to
+ * do any dumps. Since we'd really like to have the dump though,
+ * also do it inline here (with the opmode locks already held),
+ * but use a separate mode struct to avoid races.
+ */
+ if (test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) {
+ struct iwl_fw_error_dump_mode mode;
+
+ mode = trans->restart.mode;
+ mode.context = IWL_ERR_CONTEXT_FROM_OPMODE;
+ trans->restart.mode.context = IWL_ERR_CONTEXT_ABORT;
+
+ iwl_op_mode_dump_error(trans->op_mode, &mode);
+ }
+
if (trans->trans_cfg->gen2)
iwl_trans_pcie_gen2_stop_device(trans);
else
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index c70da7281551..f6234065dbdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -300,6 +300,10 @@ enum iwl_d3_status {
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
* @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
* e.g. for testing
+ * @STATUS_IN_SW_RESET: device is undergoing reset, cleared by opmode
+ * via iwl_trans_finish_sw_reset()
+ * @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump
+ * the firmware state yet
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -311,6 +315,8 @@ enum iwl_trans_status {
STATUS_FW_ERROR,
STATUS_TRANS_DEAD,
STATUS_SUPPRESS_CMD_ERROR_ONCE,
+ STATUS_IN_SW_RESET,
+ STATUS_RESET_PENDING,
};
static inline int
@@ -322,7 +328,6 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
case IWL_AMSDU_4K:
return get_order(4 * 1024);
case IWL_AMSDU_8K:
- return get_order(8 * 1024);
case IWL_AMSDU_12K:
return get_order(16 * 1024);
default:
@@ -628,8 +633,6 @@ struct iwl_pc_data {
* @n_dest_reg: num of reg_ops in %dbg_dest_tlv
* @rec_on: true iff there is a fw debug recording currently active
* @dest_tlv: points to the destination TLV for debug
- * @conf_tlv: array of pointers to configuration TLVs for debug
- * @trigger_tlv: array of pointers to triggers TLVs for debug
* @lmac_error_event_table: addrs of lmacs error tables
* @umac_error_event_table: addr of umac error table
* @tcm_error_event_table: address(es) of TCM error table(s)
@@ -664,8 +667,6 @@ struct iwl_trans_debug {
bool rec_on;
const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
- const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
- struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
@@ -877,7 +878,16 @@ struct iwl_txq {
* @reduced_cap_sku: reduced capability supported SKU
* @no_160: device not supporting 160 MHz
* @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
+ * @restart: restart worker data
+ * @restart.wk: restart worker
+ * @restart.mode: reset/restart error mode information
+ * @restart.during_reset: error occurred during previous software reset
+ * @me_recheck_wk: worker to recheck WiAMT/CSME presence
+ * @me_present: WiAMT/CSME is detected as present (1), not present (0)
+ * or unknown (-1, so can still use it as a boolean safely)
* @trans_specific: data for the specific transport this is allocated for/with
+ * @dsbr_urm_fw_dependent: switch to URM based on fw settings
+ * @dsbr_urm_permanent: switch to URM permanently
*/
struct iwl_trans {
bool csme_own;
@@ -902,6 +912,9 @@ struct iwl_trans {
bool reduced_cap_sku;
u8 no_160:1, step_urm:1;
+ u8 dsbr_urm_fw_dependent:1,
+ dsbr_urm_permanent:1;
+
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
bool pm_support;
@@ -944,6 +957,15 @@ struct iwl_trans {
struct iwl_dma_ptr invalid_tx_cmd;
+ struct {
+ struct work_struct wk;
+ struct iwl_fw_error_dump_mode mode;
+ bool during_reset;
+ } restart;
+
+ struct delayed_work me_recheck_wk;
+ s8 me_present;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[] __aligned(sizeof(void *));
@@ -1120,7 +1142,28 @@ bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans);
-static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
+static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,
+ enum iwl_fw_error_type type)
+{
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return;
+
+ trans->restart.mode.type = type;
+ trans->restart.mode.context = IWL_ERR_CONTEXT_WORKER;
+
+ set_bit(STATUS_RESET_PENDING, &trans->status);
+
+ /*
+ * keep track of whether or not this happened while resetting,
+ * by the timer the worker runs it might have finished
+ */
+ trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET,
+ &trans->status);
+ queue_work(system_unbound_wq, &trans->restart.wk);
+}
+
+static inline void iwl_trans_fw_error(struct iwl_trans *trans,
+ enum iwl_fw_error_type type)
{
if (WARN_ON_ONCE(!trans->op_mode))
return;
@@ -1128,10 +1171,24 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
/* prevent double restarts due to the same erroneous FW */
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
trans->state = IWL_TRANS_NO_FW;
- iwl_op_mode_nic_error(trans->op_mode, sync);
+ iwl_op_mode_nic_error(trans->op_mode, type);
+ iwl_trans_schedule_reset(trans, type);
}
}
+static inline void iwl_trans_opmode_sw_reset(struct iwl_trans *trans,
+ enum iwl_fw_error_type type)
+{
+ if (WARN_ON_ONCE(!trans->op_mode))
+ return;
+
+ set_bit(STATUS_IN_SW_RESET, &trans->status);
+
+ if (!trans->op_mode->ops->sw_reset ||
+ !trans->op_mode->ops->sw_reset(trans->op_mode, type))
+ clear_bit(STATUS_IN_SW_RESET, &trans->status);
+}
+
static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
{
return trans->state == IWL_TRANS_FW_ALIVE;
@@ -1164,6 +1221,11 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
+static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)
+{
+ clear_bit(STATUS_IN_SW_RESET, &trans->status);
+}
+
/*****************************************************
* transport helper functions
*****************************************************/
@@ -1178,12 +1240,27 @@ static inline bool iwl_trans_is_hw_error_value(u32 val)
return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);
}
+void iwl_trans_free_restart_list(void);
+
/*****************************************************
* PCIe handling
*****************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
-void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
+
+/* Note: order matters */
+enum iwl_reset_mode {
+ /* upper level modes: */
+ IWL_RESET_MODE_SW_RESET,
+ IWL_RESET_MODE_REPROBE,
+ /* PCIE level modes: */
+ IWL_RESET_MODE_REMOVE_ONLY,
+ IWL_RESET_MODE_RESCAN,
+ IWL_RESET_MODE_FUNC_RESET,
+ IWL_RESET_MODE_PROD_RESET,
+};
+
+void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
new file mode 100644
index 000000000000..b14ec98e28b6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <net/gso.h>
+#include <linux/ieee80211.h>
+#include <net/gso.h>
+#include <net/ip.h>
+
+#include "iwl-drv.h"
+#include "iwl-utils.h"
+
+#ifdef CONFIG_INET
+int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ netdev_features_t netdev_flags,
+ struct sk_buff_head *mpdus_skbs)
+{
+ struct sk_buff *tmp, *next;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ char cb[sizeof(skb->cb)];
+ u16 i = 0;
+ unsigned int tcp_payload_len;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ bool ipv4 = (skb->protocol == htons(ETH_P_IP));
+ bool qos = ieee80211_is_data_qos(hdr->frame_control);
+ u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
+
+ skb_shinfo(skb)->gso_size = num_subframes * mss;
+ memcpy(cb, skb->cb, sizeof(cb));
+
+ next = skb_gso_segment(skb, netdev_flags);
+ skb_shinfo(skb)->gso_size = mss;
+ skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+
+ if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
+ return -ENOMEM;
+
+ if (WARN_ONCE(IS_ERR(next),
+ "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
+ return PTR_ERR(next);
+
+ if (next)
+ consume_skb(skb);
+
+ skb_list_walk_safe(next, tmp, next) {
+ memcpy(tmp->cb, cb, sizeof(tmp->cb));
+ /*
+ * Compute the length of all the data added for the A-MSDU.
+ * This will be used to compute the length to write in the TX
+ * command. We have: SNAP + IP + TCP for n -1 subframes and
+ * ETH header for n subframes.
+ */
+ tcp_payload_len = skb_tail_pointer(tmp) -
+ skb_transport_header(tmp) -
+ tcp_hdrlen(tmp) + tmp->data_len;
+
+ if (ipv4)
+ ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
+
+ if (tcp_payload_len > mss) {
+ skb_shinfo(tmp)->gso_size = mss;
+ skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
+ SKB_GSO_TCPV6;
+ } else {
+ if (qos) {
+ u8 *qc;
+
+ if (ipv4)
+ ip_send_check(ip_hdr(tmp));
+
+ qc = ieee80211_get_qos_ctl((void *)tmp->data);
+ *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+ skb_shinfo(tmp)->gso_size = 0;
+ }
+
+ skb_mark_not_on_list(tmp);
+ __skb_queue_tail(mpdus_skbs, tmp);
+ i++;
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_tx_tso_segment);
+#endif /* CONFIG_INET */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.h b/drivers/net/wireless/intel/iwlwifi/iwl-utils.h
new file mode 100644
index 000000000000..8f1f11d06fbe
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_utils_h__
+#define __iwl_utils_h__
+
+#include <net/cfg80211.h>
+
+#ifdef CONFIG_INET
+/**
+ * iwl_tx_tso_segment - Segments a TSO packet into subframes for A-MSDU.
+ * @skb: buffer to segment.
+ * @num_subframes: number of subframes to create.
+ * @netdev_flags: netdev feature flags.
+ * @mpdus_skbs: list to hold the segmented subframes.
+ *
+ * This function segments a large TCP packet into subframes.
+ * subframes are added to the mpdus_skbs list
+ *
+ * Returns: 0 on success and negative value on failure.
+ */
+int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ netdev_features_t netdev_flags,
+ struct sk_buff_head *mpdus_skbs);
+#else
+static inline
+int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ netdev_features_t netdev_flags,
+ struct sk_buff_head *mpdus_skbs)
+{
+ WARN_ON(1);
+
+ return -1;
+}
+#endif /* CONFIG_INET */
+
+static inline
+u32 iwl_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+{
+ struct ieee80211_mgmt *mgmt = (void *)beacon;
+ const u8 *ie;
+
+ if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon)))
+ return 0;
+
+ frame_size -= mgmt->u.beacon.variable - beacon;
+
+ ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size);
+ if (!ie)
+ return 0;
+
+ return ie - beacon;
+}
+
+#endif /* __iwl_utils_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
index 458b97930059..58e9a940024d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2012-2014, 2020 Intel Corporation
* Copyright (C) 2016 Intel Deutschland GmbH
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2022, 2024 Intel Corporation
*/
#include <net/mac80211.h>
#include "fw-api.h"
@@ -158,9 +158,8 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
ret = iwl_mvm_binding_update(mvm, vif, mvmvif->deflink.phy_ctxt,
false);
- if (!ret)
- if (iwl_mvm_sf_update(mvm, vif, true))
- IWL_ERR(mvm, "Failed to update SF state\n");
+ if (!ret && iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 36726ea4b822..21641d41a958 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -530,18 +530,15 @@ static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = _data;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
lockdep_assert_held(&mvm->mutex);
if (vif->type != NL80211_IFTYPE_STATION)
return;
- for (int link_id = 0;
- link_id < IEEE80211_MLD_MAX_NUM_LINKS;
- link_id++) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference_check(vif->link_conf[link_id],
- lockdep_is_held(&mvm->mutex));
+ for_each_vif_active_link(vif, link_conf, link_id) {
struct ieee80211_chanctx_conf *chanctx_conf =
rcu_dereference_check(link_conf->chanctx_conf,
lockdep_is_held(&mvm->mutex));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 7d973546c9fb..129b6bdf9ef9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1395,13 +1395,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret < 0) {
iwl_mvm_free_nd(mvm);
- if (!unified_image) {
- if (mvm->fw_restart > 0) {
- mvm->fw_restart--;
- ieee80211_restart_hw(mvm->hw);
- }
- }
-
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
}
out_noreset:
@@ -2498,12 +2491,6 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
u32 expected_len = sizeof(*data) +
data->num_mlo_link_keys * sizeof(status->mlo_keys[0]);
- if (!data) {
- IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
- status = NULL;
- return;
- }
-
if (len < expected_len) {
IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
status = NULL;
@@ -2555,12 +2542,6 @@ iwl_mvm_parse_wowlan_info_notif_v4(struct iwl_mvm *mvm,
u32 i;
u32 expected_len = sizeof(*data);
- if (!data) {
- IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
- status = NULL;
- return;
- }
-
if (has_mlo_keys)
expected_len += (data->num_mlo_link_keys *
sizeof(status->mlo_keys[0]));
@@ -2609,12 +2590,6 @@ iwl_mvm_parse_wowlan_info_notif_v2(struct iwl_mvm *mvm,
{
u32 i;
- if (!data) {
- IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
- status = NULL;
- return;
- }
-
if (len < sizeof(*data)) {
IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
status = NULL;
@@ -3541,6 +3516,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
if (iwl_mvm_check_rt_status(mvm, vif)) {
+ IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
iwl_dbg_tlv_time_point(&mvm->fwrt,
@@ -3713,8 +3689,7 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
if (iwl_mvm_check_rt_status(mvm, NULL)) {
- IWL_ERR(mvm,
- "iwl_mvm_check_rt_status failed, device is gone during suspend\n");
+ IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
iwl_dbg_tlv_time_point(&mvm->fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 91ca830a7b60..83e3c1160362 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -16,6 +16,7 @@
#include "debugfs.h"
#include "iwl-modparams.h"
#include "iwl-drv.h"
+#include "iwl-utils.h"
#include "fw/error-dump.h"
#include "fw/api/phy-ctxt.h"
@@ -462,7 +463,6 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_link_sta *link_sta,
if (amsdu_len) {
mvm_link_sta->orig_amsdu_len = link_sta->agg.max_amsdu_len;
link_sta->agg.max_amsdu_len = amsdu_len;
- link_sta->agg.max_amsdu_len = amsdu_len;
for (i = 0; i < ARRAY_SIZE(link_sta->agg.max_tid_amsdu_len); i++)
link_sta->agg.max_tid_amsdu_len[i] = amsdu_len;
} else {
@@ -537,43 +537,12 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
-static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- char *buff, *pos, *endpos;
- static const size_t bufsz = 1024;
- int ret;
-
- buff = kmalloc(bufsz, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- pos = buff;
- endpos = pos + bufsz;
-
- pos += scnprintf(pos, endpos - pos, "FW id: %s\n",
- mvm->fwrt.fw->fw_version);
- pos += scnprintf(pos, endpos - pos, "FW: %s\n",
- mvm->fwrt.fw->human_readable);
- pos += scnprintf(pos, endpos - pos, "Device: %s\n",
- mvm->fwrt.trans->name);
- pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
- mvm->fwrt.dev->bus->name);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
- kfree(buff);
-
- return ret;
-}
-
static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- struct iwl_mvm_tas_status_resp tas_rsp;
- struct iwl_mvm_tas_status_resp *rsp = &tas_rsp;
+ struct iwl_mvm_tas_status_resp *rsp = NULL;
static const size_t bufsz = 1024;
char *buff, *pos, *endpos;
const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = {
@@ -609,6 +578,10 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
if (!iwl_mvm_firmware_running(mvm))
return -ENODEV;
+ if (iwl_fw_lookup_notif_ver(mvm->fw, DEBUG_GROUP, GET_TAS_STATUS,
+ 0) != 3)
+ return -EOPNOTSUPP;
+
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
mutex_unlock(&mvm->mutex);
@@ -659,6 +632,14 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
rsp->tas_fw_version);
pos += scnprintf(pos, endpos - pos, "Is UHB enabled for USA?: %s\n",
rsp->is_uhb_for_usa_enable ? "True" : "False");
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT))
+ pos += scnprintf(pos, endpos - pos,
+ "Is UHB enabled for CANADA?: %s\n",
+ rsp->uhb_allowed_flags &
+ TAS_UHB_ALLOWED_CANADA ? "True" : "False");
+
pos += scnprintf(pos, endpos - pos, "Current MCC: 0x%x\n",
le16_to_cpu(rsp->curr_mcc));
@@ -1159,10 +1140,6 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
mutex_lock(&mvm->mutex);
- /* allow one more restart that we're provoking here */
- if (mvm->fw_restart >= 0)
- mvm->fw_restart++;
-
if (count == 6 && !strcmp(buf, "nolog\n")) {
set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mvm->trans->status);
@@ -1409,9 +1386,9 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
if (iwl_fw_lookup_cmd_ver(mvm->fw,
BEACON_TEMPLATE_CMD, 0) >= 14) {
- u32 offset = iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_S1G_TWT,
- beacon->len);
+ u32 offset = iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len);
beacon_cmd.btwt_offset = cpu_to_le32(offset);
}
@@ -1495,22 +1472,6 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
return ret ?: count;
}
-static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- if (count == 0)
- return 0;
-
- iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER,
- NULL);
-
- iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
- (count - 1), NULL);
-
- return count;
-}
-
static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
@@ -1964,14 +1925,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(fw_system_stats);
-MVM_DEBUGFS_READ_FILE_OPS(fw_ver);
MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
MVM_DEBUGFS_READ_FILE_OPS(tas_get_status);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
-MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_clear, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
@@ -2164,7 +2123,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
- MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400);
@@ -2173,7 +2131,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
- MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(fw_dbg_clear, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 5ea684802ad1..df49dd2e2026 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -642,7 +642,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
/* if we needed reset then fail here, but notify and remove */
if (mvm->fw_product_reset) {
iwl_mei_alive_notif(false);
- iwl_trans_pcie_remove(mvm->trans, true);
+ iwl_trans_pcie_reset(mvm->trans,
+ IWL_RESET_MODE_RESCAN);
}
goto error;
@@ -1093,36 +1094,40 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
return iwl_mvm_ppag_send_cmd(mvm);
}
-static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
+static bool
+iwl_mvm_add_to_tas_block_list(u16 *list, u8 *size, u16 mcc)
{
- int i;
- u32 size = le32_to_cpu(*le_size);
-
/* Verify that there is room for another country */
- if (size >= IWL_WTAS_BLACK_LIST_MAX)
+ if (*size >= IWL_WTAS_BLACK_LIST_MAX)
return false;
- for (i = 0; i < size; i++) {
- if (list[i] == cpu_to_le32(mcc))
+ for (u8 i = 0; i < *size; i++) {
+ if (list[i] == mcc)
return true;
}
- list[size++] = cpu_to_le32(mcc);
- *le_size = cpu_to_le32(size);
+ list[*size++] = mcc;
return true;
}
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
- int ret;
+ int fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ struct iwl_tas_selection_data selection_data = {};
+ struct iwl_tas_config_cmd_v2_v4 cmd_v2_v4 = {};
+ struct iwl_tas_config_cmd cmd_v5 = {};
struct iwl_tas_data data = {};
- struct iwl_tas_config_cmd cmd = {};
- int cmd_size, fw_ver;
+ void *cmd_data = &cmd_v2_v4;
+ int cmd_size;
+ int ret;
BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
IWL_WTAS_BLACK_LIST_MAX);
- BUILD_BUG_ON(ARRAY_SIZE(cmd.common.block_list_array) !=
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v2_v4.common.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v5.block_list_array) !=
IWL_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
@@ -1138,7 +1143,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
return;
}
- if (ret == 0)
+ if (ret == 0 && fw_ver < 5)
return;
if (!iwl_is_tas_approved()) {
@@ -1161,27 +1166,49 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
}
- fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- memcpy(&cmd.common, &data, sizeof(struct iwl_tas_config_cmd_common));
-
- /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */
- if (fw_ver == 4) {
- cmd.v4.override_tas_iec = data.override_tas_iec;
- cmd.v4.enable_tas_iec = data.enable_tas_iec;
- cmd.v4.usa_tas_uhb_allowed = data.usa_tas_uhb_allowed;
+ if (fw_ver < 5) {
+ selection_data = iwl_parse_tas_selection(data.tas_selection,
+ data.table_revision);
+ cmd_v2_v4.common.block_list_size =
+ cpu_to_le32(data.block_list_size);
+ for (u8 i = 0; i < data.block_list_size; i++)
+ cmd_v2_v4.common.block_list_array[i] =
+ cpu_to_le32(data.block_list_array[i]);
+ }
+
+ if (fw_ver == 5) {
+ cmd_size = sizeof(cmd_v5);
+ cmd_data = &cmd_v5;
+ cmd_v5.block_list_size = cpu_to_le16(data.block_list_size);
+ for (u16 i = 0; i < data.block_list_size; i++)
+ cmd_v5.block_list_array[i] =
+ cpu_to_le16(data.block_list_array[i]);
+ cmd_v5.tas_config_info.table_source = data.table_source;
+ cmd_v5.tas_config_info.table_revision = data.table_revision;
+ cmd_v5.tas_config_info.value = cpu_to_le32(data.tas_selection);
+ } else if (fw_ver == 4) {
+ cmd_size = sizeof(cmd_v2_v4.common) + sizeof(cmd_v2_v4.v4);
+ cmd_v2_v4.v4.override_tas_iec = selection_data.override_tas_iec;
+ cmd_v2_v4.v4.enable_tas_iec = selection_data.enable_tas_iec;
+ cmd_v2_v4.v4.usa_tas_uhb_allowed =
+ selection_data.usa_tas_uhb_allowed;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT) &&
+ selection_data.canada_tas_uhb_allowed)
+ cmd_v2_v4.v4.uhb_allowed_flags = TAS_UHB_ALLOWED_CANADA;
+ } else if (fw_ver == 3) {
+ cmd_size = sizeof(cmd_v2_v4.common) + sizeof(cmd_v2_v4.v3);
+ cmd_v2_v4.v3.override_tas_iec =
+ cpu_to_le16(selection_data.override_tas_iec);
+ cmd_v2_v4.v3.enable_tas_iec =
+ cpu_to_le16(selection_data.enable_tas_iec);
+ } else if (fw_ver == 2) {
+ cmd_size = sizeof(cmd_v2_v4.common);
} else {
- cmd.v3.override_tas_iec = cpu_to_le16(data.override_tas_iec);
- cmd.v3.enable_tas_iec = cpu_to_le16(data.enable_tas_iec);
+ return;
}
- cmd_size = sizeof(struct iwl_tas_config_cmd_common);
- if (fw_ver >= 3)
- /* v4 is the same size as v3 */
- cmd_size += sizeof(struct iwl_tas_config_cmd_v3);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, cmd_data);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index 272da41567ef..851869c0bd50 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -412,9 +412,8 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_REMOVE);
- if (!ret)
- if (iwl_mvm_sf_update(mvm, vif, true))
- IWL_ERR(mvm, "Failed to update SF state\n");
+ if (!ret && iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
return ret;
}
@@ -762,9 +761,8 @@ bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false))
return false;
- if (a->chandef->width != b->chandef->width ||
- !(a->chandef->chan->band == NL80211_BAND_6GHZ &&
- b->chandef->chan->band == NL80211_BAND_5GHZ))
+ if (a->chandef->chan->band == b->chandef->chan->band ||
+ a->chandef->width != b->chandef->width)
ret |= IWL_MVM_ESR_EXIT_BANDWIDTH;
if (ret) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 51ee62ae70fb..6b06732441c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -12,6 +12,7 @@
#include "fw-api.h"
#include "mvm.h"
#include "time-event.h"
+#include "iwl-utils.h"
const u8 iwl_mvm_ac_to_tx_fifo[] = {
IWL_MVM_TX_FIFO_VO,
@@ -868,23 +869,6 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
}
}
-u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
-{
- struct ieee80211_mgmt *mgmt = (void *)beacon;
- const u8 *ie;
-
- if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon)))
- return 0;
-
- frame_size -= mgmt->u.beacon.variable - beacon;
-
- ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size);
- if (!ie)
- return 0;
-
- return ie - beacon;
-}
-
u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct ieee80211_vif *vif)
@@ -1078,22 +1062,23 @@ static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm,
beacon->data, beacon->len);
beacon_cmd.csa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_CHANNEL_SWITCH,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
beacon_cmd.ecsa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_EXT_CHANSWITCH_ANN,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *ctx)
{
- if (IWL_MVM_DISABLE_AP_FILS)
+ if (vif->type != NL80211_IFTYPE_AP || IWL_MVM_DISABLE_AP_FILS)
return false;
if (cfg80211_channel_is_psc(ctx->def.chan))
@@ -1122,7 +1107,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
ctx = rcu_dereference(link_conf->chanctx_conf);
channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq);
WARN_ON(channel == 0);
- if (iwl_mvm_enable_fils(mvm, ctx)) {
+ if (iwl_mvm_enable_fils(mvm, vif, ctx)) {
flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
0) > 10 ?
IWL_MAC_BEACON_FILS :
@@ -1151,20 +1136,20 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
beacon->data, beacon->len);
beacon_cmd.csa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_CHANNEL_SWITCH,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
beacon_cmd.ecsa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_EXT_CHANSWITCH_ANN,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
if (vif->type == NL80211_IFTYPE_AP &&
iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) >= 14)
beacon_cmd.btwt_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_S1G_TWT,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len));
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
@@ -1767,7 +1752,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
data = sb_v2->data;
} else {
- struct iwl_stored_beacon_notif_v3 *sb_v3 = (void *)pkt->data;
+ struct iwl_stored_beacon_notif *sb_v3 = (void *)pkt->data;
if (pkt_len < struct_size(sb_v3, data, size))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 07778d55878b..af6644b7e95f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1153,7 +1153,7 @@ static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
* Delete the stale data to avoid issues later on.
*/
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
- link_id, false);
+ link_id);
}
}
}
@@ -1300,23 +1300,16 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
- iwlwifi_mod_params.fw_restart) {
+ iwlwifi_mod_params.fw_restart)
max_retry = IWL_MAX_INIT_RETRY;
- /*
- * This will prevent mac80211 recovery flows to trigger during
- * init failures
- */
- set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
- }
for (retry = 0; retry <= max_retry; retry++) {
ret = __iwl_mvm_mac_start(mvm);
- if (!ret)
+ if (ret != -ETIMEDOUT)
break;
IWL_ERR(mvm, "mac start retry %d\n", retry);
}
- clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
@@ -1347,6 +1340,11 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
iwl_mvm_teardown_tdls_peers(mvm);
IWL_INFO(mvm, "restart completed\n");
+ iwl_trans_finish_sw_reset(mvm->trans);
+
+ /* no need to lock, adding in parallel would schedule too */
+ if (!list_empty(&mvm->add_stream_txqs))
+ schedule_work(&mvm->add_stream_wk);
}
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
@@ -1485,11 +1483,12 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm,
s16 tx_power)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
- u32 mac_id = iwl_mvm_vif_from_mac80211(link_conf->vif)->id;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(link_conf->vif);
+ u32 mac_id = mvmvif->id;
int len;
struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
- .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
- .common.mac_context_id = cpu_to_le32(mac_id),
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK),
+ .common.link_id = cpu_to_le32(mac_id),
};
struct iwl_dev_tx_power_cmd cmd_v9_v10;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
@@ -1500,9 +1499,16 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm,
cmd.common.pwr_restriction = cpu_to_le16(u_tx_power);
if (cmd_ver > 8) {
+ u32 link_id;
+
+ if (WARN_ON(!mvmvif->link[link_conf->link_id]))
+ return -ENODEV;
+
+ link_id = mvmvif->link[link_conf->link_id]->fw_link_id;
+
/* Those fields sit on the same place for v9 and v10 */
- cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC);
- cmd_v9_v10.common.mac_context_id = cpu_to_le32(mac_id);
+ cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK);
+ cmd_v9_v10.common.link_id = cpu_to_le32(link_id);
cmd_v9_v10.common.pwr_restriction = cpu_to_le16(u_tx_power);
cmd_data = &cmd_v9_v10;
}
@@ -1802,6 +1808,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvmvif->deflink.active = 0;
mvmvif->link[0] = &mvmvif->deflink;
+ vif->driver_flags = IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
+
ret = iwl_mvm_set_link_mapping(mvm, vif, &vif->bss_conf);
if (ret)
goto out;
@@ -2967,33 +2975,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
&mvm->status),
"Failed to update SF upon disassociation\n");
- /*
- * If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
- &mvm->status)) {
- /* first remove remaining keys */
- iwl_mvm_sec_key_remove_ap(mvm, vif,
- &mvmvif->deflink, 0);
-
- /*
- * Remove AP station now that
- * the MAC is unassoc
- */
- ret = iwl_mvm_rm_sta_id(mvm, vif,
- mvmvif->deflink.ap_sta_id);
- if (ret)
- IWL_ERR(mvm,
- "failed to remove AP station\n");
-
- mvmvif->deflink.ap_sta_id = IWL_INVALID_STA;
- }
-
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret)
@@ -3913,7 +3894,7 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
if (sta->tdls &&
(vif->p2p ||
- iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_MVM_TDLS_STA_COUNT ||
+ iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_TDLS_STA_COUNT ||
iwl_mvm_phy_ctx_count(mvm) > 1)) {
IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
return -EBUSY;
@@ -4113,10 +4094,6 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
wiphy_delayed_work_cancel(mvm->hw->wiphy,
&mvmvif->unblock_esr_tmp_non_bss_wk);
-
- /* No need for the periodic statistics anymore */
- if (ieee80211_vif_is_mld(vif) && mvmvif->esr_active)
- iwl_mvm_request_periodic_system_statistics(mvm, false);
}
return 0;
@@ -5004,34 +4981,46 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
return 0;
}
-struct iwl_mvm_ftm_responder_iter_data {
- bool responder;
+struct iwl_mvm_chanctx_usage_data {
+ struct iwl_mvm *mvm;
struct ieee80211_chanctx_conf *ctx;
+ bool use_def;
};
-static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_chanctx_usage_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct iwl_mvm_ftm_responder_iter_data *data = _data;
+ struct iwl_mvm_chanctx_usage_data *data = _data;
+ struct ieee80211_bss_conf *link_conf;
+ int link_id;
- if (rcu_access_pointer(vif->bss_conf.chanctx_conf) == data->ctx &&
- vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params)
- data->responder = true;
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx)
+ continue;
+
+ if (iwl_mvm_enable_fils(data->mvm, vif, data->ctx))
+ data->use_def = true;
+
+ if (vif->type == NL80211_IFTYPE_AP && link_conf->ftmr_params)
+ data->use_def = true;
+ }
}
-bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
- struct ieee80211_chanctx_conf *ctx)
+struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
{
- struct iwl_mvm_ftm_responder_iter_data data = {
- .responder = false,
+ struct iwl_mvm_chanctx_usage_data data = {
+ .mvm = mvm,
.ctx = ctx,
+ .use_def = false,
};
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_ftm_responder_chanctx_iter,
- &data);
- return data.responder;
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_chanctx_usage_iter,
+ &data);
+
+ return data.use_def ? &ctx->def : &ctx->min_def;
}
static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
@@ -5415,7 +5404,7 @@ out_reassign:
out_restart:
/* things keep failing, better restart the hw */
- iwl_mvm_nic_restart(mvm, false);
+ iwl_force_nmi(mvm->trans);
return ret;
}
@@ -5451,7 +5440,7 @@ out_reassign:
out_restart:
/* things keep failing, better restart the hw */
- iwl_mvm_nic_restart(mvm, false);
+ iwl_force_nmi(mvm->trans);
return ret;
}
@@ -6278,7 +6267,7 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
guard(mvm)(mvm);
- if (mvmvif->deflink.ap_sta_id != mvmsta->deflink.sta_id)
+ if (sta != mvmvif->ap_sta)
return;
if (iwl_mvm_request_statistics(mvm, false))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index b807046144c0..341a2a7a49ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -18,6 +18,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
mvmvif->mvm = mvm;
+ vif->driver_flags |= IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
+
/* Not much to do here. The stack will not allow interface
* types or combinations that we didn't advertise, so we
* don't really have to check the types.
@@ -208,32 +210,6 @@ static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
return n_active;
}
-static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif)
-{
- struct ieee80211_sta *ap_sta = mvmvif->ap_sta;
- struct iwl_mvm_sta *mvmsta;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!ap_sta)
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
- if (!mvmsta->mpdu_counters)
- return;
-
- for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
- spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
- memset(mvmsta->mpdu_counters[q].per_link, 0,
- sizeof(mvmsta->mpdu_counters[q].per_link));
- mvmsta->mpdu_counters[q].window_start = jiffies;
- spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
- }
-
- IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
-}
-
static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -267,16 +243,6 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
else
mvmvif->primary_link = __ffs(vif->active_links);
- /* Needed for tracking RSSI */
- iwl_mvm_request_periodic_system_statistics(mvm, true);
-
- /*
- * Restart the MPDU counters and the counting window, so when the
- * statistics arrive (which is where we look at the counters) we
- * will be at the end of the window.
- */
- iwl_mvm_restart_mpdu_count(mvm, mvmvif);
-
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
NULL);
@@ -323,7 +289,6 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
ret = iwl_mvm_esr_mode_active(mvm, vif);
if (ret) {
IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret);
- iwl_mvm_request_periodic_system_statistics(mvm, false);
goto out;
}
}
@@ -449,11 +414,6 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
break;
}
- iwl_mvm_request_periodic_system_statistics(mvm, false);
-
- /* Start a new counting window */
- iwl_mvm_restart_mpdu_count(mvm, mvmvif);
-
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_DOWN,
NULL);
@@ -831,30 +791,6 @@ static bool iwl_mvm_mld_vif_have_valid_ap_sta(struct iwl_mvm_vif *mvmvif)
return false;
}
-static void iwl_mvm_mld_vif_delete_all_stas(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int i, ret;
-
- if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- return;
-
- for_each_mvm_vif_valid_link(mvmvif, i) {
- struct iwl_mvm_vif_link_info *link = mvmvif->link[i];
-
- if (!link)
- continue;
-
- iwl_mvm_sec_key_remove_ap(mvm, vif, link, i);
- ret = iwl_mvm_mld_rm_sta_id(mvm, link->ap_sta_id);
- if (ret)
- IWL_ERR(mvm, "failed to remove AP station\n");
-
- link->ap_sta_id = IWL_INVALID_STA;
- }
-}
-
static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u64 changes)
@@ -881,8 +817,13 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (vif->cfg.assoc) {
mvmvif->session_prot_connection_loss = false;
- /* clear statistics to get clean beacon counter */
+ /*
+ * Clear statistics to get clean beacon counter, and ask for
+ * periodic statistics, as they are needed for link
+ * selection and RX OMI decisions.
+ */
iwl_mvm_request_statistics(mvm, true);
+ iwl_mvm_request_periodic_system_statistics(mvm, true);
iwl_mvm_sf_update(mvm, vif, false);
iwl_mvm_power_vif_assoc(mvm, vif);
@@ -930,6 +871,8 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
} else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
iwl_mvm_mei_host_disassociated(mvm);
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
+
/* If update fails - SF might be running in associated
* mode while disassociated - which is forbidden.
*/
@@ -938,15 +881,6 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status),
"Failed to update SF upon disassociation\n");
-
- /* If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
}
iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 019839604011..2f159024eeb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -518,14 +518,12 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
struct iwl_mvm_link_sta *mvm_sta_link,
- unsigned int link_id,
- bool is_in_fw)
+ unsigned int link_id)
{
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
- RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
- is_in_fw ? ERR_PTR(-EINVAL) : NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], NULL);
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[mvm_sta_link->sta_id], NULL);
RCU_INIT_POINTER(mvm_sta->link[link_id], NULL);
@@ -546,7 +544,7 @@ static void iwl_mvm_mld_sta_rm_all_sta_links(struct iwl_mvm *mvm,
if (!link)
continue;
- iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id, false);
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id);
}
}
@@ -844,18 +842,11 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_link_sta *mvm_link_sta =
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
- bool stay_in_fw;
+ iwl_mvm_sta_del(mvm, vif, sta, link_sta);
- stay_in_fw = iwl_mvm_sta_del(mvm, vif, sta, link_sta, &ret);
- if (ret)
- break;
-
- if (!stay_in_fw)
- ret = iwl_mvm_mld_rm_sta_from_fw(mvm,
- mvm_link_sta->sta_id);
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_link_sta->sta_id);
- iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
- link_id, stay_in_fw);
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, link_id);
}
kfree(mvm_sta->mpdu_counters);
mvm_sta->mpdu_counters = NULL;
@@ -1122,8 +1113,7 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION)
mvm_vif_link->ap_sta_id = IWL_INVALID_STA;
- iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id,
- false);
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id);
}
for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
@@ -1227,8 +1217,7 @@ err:
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
- iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id,
- false);
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id);
}
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2ad615293c75..ee769da72e68 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -103,6 +103,7 @@ struct iwl_mvm_phy_ctxt {
u32 center_freq1;
bool rlc_disabled;
u32 channel_load_by_us;
+ u32 channel_load_not_by_us;
};
struct iwl_mvm_time_event_data {
@@ -1174,8 +1175,6 @@ struct iwl_mvm {
struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_FW_MAX_LINK_ID + 1];
- /* -1 for always, 0 for never, >0 for that many times */
- s8 fw_restart;
u8 *error_recovery_buf;
#ifdef CONFIG_IWLWIFI_LEDS
@@ -1401,8 +1400,6 @@ DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mu
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
* @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
* if this is set, when intentionally triggered
- * @IWL_MVM_STATUS_STARTING: starting mac,
- * used to disable restart flow while in STARTING state
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@@ -1414,7 +1411,6 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_IN_D3,
IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
- IWL_MVM_STATUS_STARTING,
};
struct iwl_mvm_csme_conn_info {
@@ -1736,12 +1732,19 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans)
{
- if ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM) &&
- !CSR_HW_RFID_IS_CDB(trans->hw_rf_id))
+ if (CSR_HW_RFID_IS_CDB(trans->hw_rf_id))
+ return false;
+
+ switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_FM:
/* Step A doesn't support eSR */
return CSR_HW_RFID_STEP(trans->hw_rf_id);
-
- return false;
+ case IWL_CFG_RF_TYPE_WH:
+ case IWL_CFG_RF_TYPE_PE:
+ return true;
+ default:
+ return false;
+ }
}
static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
@@ -1824,7 +1827,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
u64 *boottime, ktime_t *realtime);
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
-u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
@@ -2591,7 +2593,6 @@ void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
-void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -2996,18 +2997,11 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
struct cfg80211_set_hw_timestamp *hwts);
int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *ctx);
-bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
- struct ieee80211_chanctx_conf *ctx);
-
-static inline struct cfg80211_chan_def *
-iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
-{
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- return use_def ? &ctx->def : &ctx->min_def;
-}
+struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx);
void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
u32 duration_ms,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 30fcc733395e..984f407f7027 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -208,7 +208,8 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
return;
- vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
+ /* FIXME: should fetch the link and not the vif */
+ vif = iwl_mvm_get_vif_by_macid(mvm, notif->link_id);
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
@@ -408,7 +409,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_SYNC, struct iwl_time_event_notif),
RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
- struct iwl_mvm_session_prot_notif),
+ struct iwl_session_prot_notif),
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
@@ -1285,6 +1286,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
+ int err;
/*
* We use IWL_STATION_COUNT_MAX to check the validity of the station
@@ -1302,7 +1304,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_has_mld_api(fw) ? &iwl_mvm_mld_hw_ops :
&iwl_mvm_hw_ops);
if (!hw)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
max_agg = 512;
@@ -1331,6 +1333,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_get_bios_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
iwl_uefi_get_step_table(trans);
+ iwl_bios_setup_step(trans, &mvm->fwrt);
mvm->init_status = 0;
@@ -1346,11 +1349,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->rx_mpdu_cmd_hdr_size =
sizeof(struct iwl_rx_mpdu_res_start);
- if (WARN_ON(trans->num_rx_queues > 1))
+ if (WARN_ON(trans->num_rx_queues > 1)) {
+ err = -EINVAL;
goto out_free;
+ }
}
- mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
mvm->bios_enable_puncturing = iwl_uefi_get_puncturing(&mvm->fwrt);
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -1424,8 +1428,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_RESPONSE_NOTIF, 5);
/* we only support up to version 9 */
- if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9))
+ if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) {
+ err = -EINVAL;
goto out_free;
+ }
/*
* Populate the state variables that the transport layer needs
@@ -1474,9 +1480,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
- memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv,
- sizeof(trans->dbg.conf_tlv));
- trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -1488,6 +1491,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->phy_db = iwl_phy_db_init(trans);
if (!mvm->phy_db) {
IWL_ERR(mvm, "Cannot init phy_db\n");
+ err = -ENOMEM;
goto out_free;
}
@@ -1500,8 +1504,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
scan_size = iwl_mvm_scan_size(mvm);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
- if (!mvm->scan_cmd)
+ if (!mvm->scan_cmd) {
+ err = -ENOMEM;
goto out_free;
+ }
mvm->scan_cmd_size = scan_size;
/* invalidate ids to prevent accidental removal of sta_id 0 */
@@ -1530,7 +1536,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter);
- if (iwl_mvm_start_get_nvm(mvm)) {
+ err = iwl_mvm_start_get_nvm(mvm);
+ if (err) {
/*
* Getting NVM failed while CSME is the owner, but we are
* registered to MEI, we'll get the NVM later when it'll be
@@ -1543,7 +1550,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
- if (iwl_mvm_start_post_nvm(mvm))
+ err = iwl_mvm_start_post_nvm(mvm);
+ if (err)
goto out_thermal_exit;
return op_mode;
@@ -1563,7 +1571,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_trans_op_mode_leave(trans);
ieee80211_free_hw(mvm->hw);
- return NULL;
+ return ERR_PTR(err);
}
void iwl_mvm_stop_device(struct iwl_mvm *mvm)
@@ -1998,27 +2006,62 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
ieee80211_free_txskb(mvm->hw, skb);
}
-struct iwl_mvm_reprobe {
- struct device *dev;
- struct work_struct work;
-};
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_abort_notification_waits(&mvm->notif_wait);
+ iwl_dbg_tlv_del_timers(mvm->trans);
-static void iwl_mvm_reprobe_wk(struct work_struct *wk)
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL)
+ IWL_ERR(mvm, "Command queue full!\n");
+ else if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
+ !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
+ &mvm->status))
+ iwl_mvm_dump_nic_error_log(mvm);
+
+ /*
+ * This should be first thing before trying to collect any
+ * data to avoid endless loops if any HW error happens while
+ * collecting debug data.
+ * It might not actually be true that we'll restart, but the
+ * setting of the bit doesn't matter if we're going to be
+ * unbound either.
+ */
+ if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+}
+
+static void iwl_mvm_dump_error(struct iwl_op_mode *op_mode,
+ struct iwl_fw_error_dump_mode *mode)
{
- struct iwl_mvm_reprobe *reprobe;
-
- reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
- if (device_reprobe(reprobe->dev))
- dev_err(reprobe->dev, "reprobe failed!\n");
- put_device(reprobe->dev);
- kfree(reprobe);
- module_put(THIS_MODULE);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ /* if we come in from opmode we have the mutex held */
+ if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) {
+ lockdep_assert_held(&mvm->mutex);
+ iwl_fw_error_collect(&mvm->fwrt);
+ } else {
+ mutex_lock(&mvm->mutex);
+ if (mode->context != IWL_ERR_CONTEXT_ABORT)
+ iwl_fw_error_collect(&mvm->fwrt);
+ mutex_unlock(&mvm->mutex);
+ }
}
-void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
+static bool iwl_mvm_sw_reset(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
{
- iwl_abort_notification_waits(&mvm->notif_wait);
- iwl_dbg_tlv_del_timers(mvm->trans);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ /*
+ * If the firmware crashes while we're already considering it
+ * to be dead then don't ask for a restart, that cannot do
+ * anything useful anyway.
+ */
+ if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
+ return false;
/*
* This is a bit racy, but worst case we tell mac80211 about
@@ -2033,52 +2076,11 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
iwl_mvm_report_scan_aborted(mvm);
/*
- * If we're restarting already, don't cycle restarts.
* If INIT fw asserted, it will likely fail again.
* If WoWLAN fw asserted, don't restart either, mac80211
* can't recover this since we're already half suspended.
*/
- if (!mvm->fw_restart && fw_error) {
- iwl_fw_error_collect(&mvm->fwrt, false);
- } else if (test_bit(IWL_MVM_STATUS_STARTING,
- &mvm->status)) {
- IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
- } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- struct iwl_mvm_reprobe *reprobe;
-
- IWL_ERR(mvm,
- "Firmware error during reconfiguration - reprobe!\n");
-
- /*
- * get a module reference to avoid doing this while unloading
- * anyway and to avoid scheduling a work with code that's
- * being removed.
- */
- if (!try_module_get(THIS_MODULE)) {
- IWL_ERR(mvm, "Module is being unloaded - abort\n");
- return;
- }
-
- reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
- if (!reprobe) {
- module_put(THIS_MODULE);
- return;
- }
- reprobe->dev = get_device(mvm->trans->dev);
- INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
- schedule_work(&reprobe->work);
- } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status)) {
- IWL_ERR(mvm, "HW restart already requested, but not started\n");
- } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
- mvm->hw_registered &&
- !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
- /* This should be first thing before trying to collect any
- * data to avoid endless loops if any HW error happens while
- * collecting debug data.
- */
- set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
-
+ if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered) {
if (mvm->fw->ucode_capa.error_log_size) {
u32 src_size = mvm->fw->ucode_capa.error_log_size;
u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
@@ -2093,57 +2095,18 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
}
}
- iwl_fw_error_collect(&mvm->fwrt, false);
-
- if (fw_error && mvm->fw_restart > 0) {
- mvm->fw_restart--;
- ieee80211_restart_hw(mvm->hw);
- } else if (mvm->fwrt.trans->dbg.restart_required) {
+ if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
+ return true;
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
+ return true;
}
}
-}
-
-static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
-{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
- if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
- !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
- &mvm->status))
- iwl_mvm_dump_nic_error_log(mvm);
-
- if (sync) {
- iwl_fw_error_collect(&mvm->fwrt, true);
- /*
- * Currently, the only case for sync=true is during
- * shutdown, so just stop in this case. If/when that
- * changes, we need to be a bit smarter here.
- */
- return;
- }
-
- /*
- * If the firmware crashes while we're already considering it
- * to be dead then don't ask for a restart, that cannot do
- * anything useful anyway.
- */
- if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
- return;
-
- iwl_mvm_nic_restart(mvm, false);
-}
-
-static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
-{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- WARN_ON(1);
- iwl_mvm_nic_restart(mvm, true);
+ return false;
}
static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
@@ -2179,7 +2142,8 @@ static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
.hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
.free_skb = iwl_mvm_free_skb, \
.nic_error = iwl_mvm_nic_error, \
- .cmd_queue_full = iwl_mvm_cmd_queue_full, \
+ .dump_error = iwl_mvm_dump_error, \
+ .sw_reset = iwl_mvm_sw_reset, \
.nic_config = iwl_mvm_nic_config, \
/* as we only register one, these MUST be common! */ \
.start = iwl_op_mode_mvm_start, \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 7cab5373c8ae..5e7e2926be0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -31,7 +31,7 @@ u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef)
/*
* Maps the driver specific control channel position (relative to the center
- * freq) definitions to the the fw values
+ * freq) definitions to the fw values
*/
u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index bc363e8427e4..a386b315e52f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -567,7 +567,7 @@ struct iwl_power_vifs {
bool monitor_active;
};
-static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac,
+static void iwl_mvm_power_disable_pm_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -575,7 +575,7 @@ static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac,
mvmvif->pm_enabled = false;
}
-static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
+static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 9e72db9bab40..2dbef7b46355 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -789,6 +789,8 @@ static void iwl_mvm_handle_per_phy_stats(struct iwl_mvm *mvm,
continue;
mvm->phy_ctxts[i].channel_load_by_us =
le32_to_cpu(per_phy[i].channel_load_by_us);
+ mvm->phy_ctxts[i].channel_load_not_by_us =
+ le32_to_cpu(per_phy[i].channel_load_not_by_us);
}
}
@@ -962,6 +964,9 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
#define SEC_LINK_MIN_TX 3000
#define SEC_LINK_MIN_RX 400
+/* Accept a ~20% short window to avoid issues due to jitter */
+#define IWL_MVM_TPT_MIN_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ * 4 / 5)
+
static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
{
struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
@@ -971,6 +976,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
unsigned long sec_link_tx = 0, sec_link_rx = 0;
u8 sec_link_tx_perc, sec_link_rx_perc;
u8 sec_link;
+ bool skip = false;
lockdep_assert_held(&mvm->mutex);
@@ -1010,13 +1016,25 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
/*
* In EMLSR we have statistics every 5 seconds, so we can reset
* the counters upon every statistics notification.
+ * The FW sends the notification regularly, but it will be
+ * misaligned at the start. Skipping the measurement if it is
+ * short will synchronize us.
*/
+ if (jiffies - mvmsta->mpdu_counters[q].window_start <
+ IWL_MVM_TPT_MIN_COUNT_WINDOW)
+ skip = true;
+ mvmsta->mpdu_counters[q].window_start = jiffies;
memset(mvmsta->mpdu_counters[q].per_link, 0,
sizeof(mvmsta->mpdu_counters[q].per_link));
spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
}
+ if (skip) {
+ IWL_DEBUG_INFO(mvm, "MPDU statistics window was short\n");
+ return;
+ }
+
IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
total_tx, total_rx);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a2f16bfaec44..09fd8752046e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -636,15 +636,21 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
baid, nssn);
- if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
- baid >= ARRAY_SIZE(mvm->baid_map)))
+ if (IWL_FW_CHECK(mvm,
+ baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= ARRAY_SIZE(mvm->baid_map),
+ "invalid BAID from FW: %d\n", baid))
return;
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (WARN(!ba_data, "BAID %d not found in map\n", baid))
+ if (!ba_data) {
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID %d but not allocated, invalid frame release!\n",
+ baid);
goto out;
+ }
/* pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
@@ -2506,19 +2512,24 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bar_frame_release *release = (void *)pkt->data;
- unsigned int baid = le32_get_bits(release->ba_info,
- IWL_BAR_FRAME_RELEASE_BAID_MASK);
- unsigned int nssn = le32_get_bits(release->ba_info,
- IWL_BAR_FRAME_RELEASE_NSSN_MASK);
- unsigned int sta_id = le32_get_bits(release->sta_tid,
- IWL_BAR_FRAME_RELEASE_STA_MASK);
- unsigned int tid = le32_get_bits(release->sta_tid,
- IWL_BAR_FRAME_RELEASE_TID_MASK);
struct iwl_mvm_baid_data *baid_data;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ unsigned int baid, nssn, sta_id, tid;
- if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
+ if (IWL_FW_CHECK(mvm, pkt_len < sizeof(*release),
+ "Unexpected frame release notif size %d (expected %zu)\n",
+ pkt_len, sizeof(*release)))
return;
+ baid = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_BAID_MASK);
+ nssn = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_NSSN_MASK);
+ sta_id = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_STA_MASK);
+ tid = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_TID_MASK);
+
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
baid >= ARRAY_SIZE(mvm->baid_map)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 376b9b12fa62..60bd9c7e5f03 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -462,7 +462,7 @@ static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
if (!ssid_list[i].len)
break;
if (ssid_list[i].len == ssid_len &&
- !memcmp(ssid_list->ssid, ssid, ssid_len))
+ !memcmp(ssid_list[i].ssid, ssid, ssid_len))
return i;
}
return -1;
@@ -3477,7 +3477,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
* restart_hw, so do not report if FW is about to be
* restarted.
*/
- if (!mvm->fw_restart)
+ if (!iwlwifi_mod_params.fw_restart)
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
@@ -3528,7 +3528,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
* restarted.
*/
if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
- !mvm->fw_restart) {
+ !iwlwifi_mod_params.fw_restart) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index cd74c181c260..7a4844ec3c10 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1520,7 +1520,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
add_stream_wk);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
+
+ /* will reschedule to run after restart */
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ||
+ test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
@@ -1564,8 +1569,6 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
local_bh_enable();
}
-
- mutex_unlock(&mvm->mutex);
}
static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
@@ -2045,9 +2048,9 @@ int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
* Returns if we're done with removing the station, either
* with error or success
*/
-bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_link_sta *link_sta, int *ret)
+ struct ieee80211_link_sta *link_sta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_vif_link_info *mvm_link =
@@ -2063,38 +2066,12 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_is_held(&mvm->mutex));
sta_id = mvm_link_sta->sta_id;
- /* If there is a TXQ still marked as reserved - free it */
- if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
- u8 reserved_txq = mvm_sta->reserved_queue;
- enum iwl_mvm_queue_status *status;
-
- /*
- * If no traffic has gone through the reserved TXQ - it
- * is still marked as IWL_MVM_QUEUE_RESERVED, and
- * should be manually marked as free again
- */
- status = &mvm->queue_info[reserved_txq].status;
- if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
- (*status != IWL_MVM_QUEUE_FREE),
- "sta_id %d reserved txq %d status %d",
- sta_id, reserved_txq, *status)) {
- *ret = -EINVAL;
- return true;
- }
-
- *status = IWL_MVM_QUEUE_FREE;
- }
-
if (vif->type == NL80211_IFTYPE_STATION &&
mvm_link->ap_sta_id == sta_id) {
- /* if associated - we can't remove the AP STA now */
- if (vif->cfg.assoc)
- return true;
-
/* first remove remaining keys */
- iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0);
+ iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link,
+ link_sta->link_id);
- /* unassoc - go ahead - remove the AP STA now */
mvm_link->ap_sta_id = IWL_INVALID_STA;
}
@@ -2106,8 +2083,6 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
cancel_delayed_work(&mvm->tdls_cs.dwork);
}
-
- return false;
}
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
@@ -2143,8 +2118,27 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
iwl_mvm_disable_sta_queues(mvm, vif, sta);
- if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret))
- return ret;
+ /* If there is a TXQ still marked as reserved - free it */
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+ u8 reserved_txq = mvm_sta->reserved_queue;
+ enum iwl_mvm_queue_status *status;
+
+ /*
+ * If no traffic has gone through the reserved TXQ - it
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
+ * should be manually marked as free again
+ */
+ status = &mvm->queue_info[reserved_txq].status;
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+ (*status != IWL_MVM_QUEUE_FREE),
+ "sta_id %d reserved txq %d status %d",
+ mvm_sta->deflink.sta_id, reserved_txq, *status))
+ return -EINVAL;
+
+ *status = IWL_MVM_QUEUE_FREE;
+ }
+
+ iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id);
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL);
@@ -2912,7 +2906,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/*
* The division below will be OK if either the cache line size
* can be divided by the entry size (ALIGN will round up) or if
- * if the entry size can be divided by the cache line size, in
+ * the entry size can be divided by the cache line size, in
* which case the ALIGN() will do nothing.
*/
BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 4a3799ae7c18..6856f7440ef3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -133,7 +133,7 @@ struct iwl_mvm_vif;
* and no TID data as this is also not needed.
* One thing to note, is that these stations have an ID in the fw, but not
* in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
- * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
+ * we fill ERR_PTR(-EINVAL) in this mapping and all other dereferencing of
* pointers from this mapping need to check that the value is not error
* or NULL.
*
@@ -507,9 +507,9 @@ void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta);
-bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_link_sta *link_sta, int *ret);
+ struct ieee80211_link_sta *link_sta);
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -665,8 +665,7 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
struct iwl_mvm_link_sta *mvm_sta_link,
- unsigned int link_id,
- bool is_in_fw);
+ unsigned int link_id);
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 65927ebbabb7..36379b738de1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -24,7 +24,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
- if (!sta || IS_ERR(sta) || !sta->tdls)
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
continue;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -47,7 +47,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
- if (!sta || IS_ERR(sta) || !sta->tdls)
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
continue;
if (vif) {
@@ -472,7 +472,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
lockdep_is_held(&mvm->mutex));
/* the station may not be here, but if it is, it must be a TDLS peer */
- if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
+ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
index 1dc57e022191..d692f1813d44 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
@@ -262,7 +262,7 @@ static const struct valid_link_pair_case {
.desc = "LB + HB, no BT.",
.chan_a = &chan_2ghz,
.chan_b = &chan_5ghz,
- .valid = false,
+ .valid = true,
},
{
.desc = "LB + HB, with BT.",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 72fa7ac86516..9216c43a35c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -751,7 +751,7 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
u32 id, s8 link_id)
{
int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
- struct iwl_mvm_session_prot_cmd cmd = {
+ struct iwl_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.conf_id = cpu_to_le32(id),
@@ -955,7 +955,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
+ struct iwl_session_prot_notif *notif = (void *)pkt->data;
unsigned int ver =
iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
SESSION_PROTECTION_NOTIF, 2);
@@ -1148,7 +1148,7 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
enum ieee80211_roc_type type)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_session_prot_cmd cmd = {
+ struct iwl_session_prot_cmd cmd = {
.id_and_color =
cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
@@ -1417,7 +1417,7 @@ static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
{
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
- struct iwl_mvm_session_prot_notif *resp;
+ struct iwl_session_prot_notif *resp;
int resp_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
@@ -1449,7 +1449,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
- struct iwl_mvm_session_prot_cmd cmd = {
+ struct iwl_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index c9867d26361b..f67afb66ef2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -13,6 +13,7 @@
#include "iwl-trans.h"
#include "iwl-nvm-utils.h"
+#include "iwl-utils.h"
#include "mvm.h"
#include "sta.h"
#include "time-sync.h"
@@ -938,78 +939,6 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
#ifdef CONFIG_INET
-static int
-iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
- netdev_features_t netdev_flags,
- struct sk_buff_head *mpdus_skb)
-{
- struct sk_buff *tmp, *next;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- char cb[sizeof(skb->cb)];
- u16 i = 0;
- unsigned int tcp_payload_len;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- bool ipv4 = (skb->protocol == htons(ETH_P_IP));
- bool qos = ieee80211_is_data_qos(hdr->frame_control);
- u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
-
- skb_shinfo(skb)->gso_size = num_subframes * mss;
- memcpy(cb, skb->cb, sizeof(cb));
-
- next = skb_gso_segment(skb, netdev_flags);
- skb_shinfo(skb)->gso_size = mss;
- skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
-
- if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
- return -ENOMEM;
-
- if (WARN_ONCE(IS_ERR(next),
- "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
- return PTR_ERR(next);
-
- if (next)
- consume_skb(skb);
-
- skb_list_walk_safe(next, tmp, next) {
- memcpy(tmp->cb, cb, sizeof(tmp->cb));
- /*
- * Compute the length of all the data added for the A-MSDU.
- * This will be used to compute the length to write in the TX
- * command. We have: SNAP + IP + TCP for n -1 subframes and
- * ETH header for n subframes.
- */
- tcp_payload_len = skb_tail_pointer(tmp) -
- skb_transport_header(tmp) -
- tcp_hdrlen(tmp) + tmp->data_len;
-
- if (ipv4)
- ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
-
- if (tcp_payload_len > mss) {
- skb_shinfo(tmp)->gso_size = mss;
- skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
- SKB_GSO_TCPV6;
- } else {
- if (qos) {
- u8 *qc;
-
- if (ipv4)
- ip_send_check(ip_hdr(tmp));
-
- qc = ieee80211_get_qos_ctl((void *)tmp->data);
- *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- }
- skb_shinfo(tmp)->gso_size = 0;
- }
-
- skb_mark_not_on_list(tmp);
- __skb_queue_tail(mpdus_skb, tmp);
- i++;
- }
-
- return 0;
-}
-
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
@@ -1028,7 +957,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
!mvmsta->amsdu_enabled)
- return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
/*
* Do not build AMSDU for IPv6 with extension headers.
@@ -1038,7 +967,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
IPPROTO_TCP) {
netdev_flags &= ~NETIF_F_CSUM_MASK;
- return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
}
tid = ieee80211_get_tid(hdr);
@@ -1052,7 +981,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
!mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
!(mvmsta->amsdu_enabled & BIT(tid)))
- return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
/*
* Take the min of ieee80211 station and mvm station
@@ -1110,8 +1039,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* Trick the segmentation function to make it
* create SKBs that can fit into one A-MSDU.
*/
- return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
- mpdus_skb);
+ return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skb);
}
#else /* CONFIG_INET */
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
@@ -1698,8 +1626,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
int txq_id = SEQ_TO_QUEUE(sequence);
/* struct iwl_tx_resp_v3 is almost the same */
struct iwl_tx_resp *tx_resp = (void *)pkt->data;
- int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
- int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
struct agg_tx_status *agg_status =
iwl_mvm_get_agg_status(mvm, tx_resp);
u32 status = le16_to_cpu(agg_status->status);
@@ -1880,7 +1808,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
IWL_DEBUG_TX_REPLY(mvm,
"Next reclaimed packet:%d\n",
next_reclaimed);
- iwl_mvm_count_mpdu(mvmsta, sta_id, 1, true, 0);
+ if (tid < IWL_MAX_TID_COUNT)
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1,
+ true, 0);
} else {
IWL_DEBUG_TX_REPLY(mvm,
"NDP - don't update next_reclaimed\n");
@@ -1989,8 +1919,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_tx_resp *tx_resp = (void *)pkt->data;
- int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
- int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
struct iwl_mvm_sta *mvmsta;
int queue = SEQ_TO_QUEUE(sequence);
@@ -2193,7 +2123,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_info.flags = IEEE80211_TX_STAT_AMPDU;
if (iwl_mvm_has_new_tx_api(mvm)) {
- struct iwl_mvm_compressed_ba_notif *ba_res =
+ struct iwl_compressed_ba_notif *ba_res =
(void *)pkt->data;
u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
u16 tfd_cnt;
@@ -2241,8 +2171,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
/* Free per TID */
for (i = 0; i < tfd_cnt; i++) {
- struct iwl_mvm_compressed_ba_tfd *ba_tfd =
- &ba_res->tfd[i];
+ struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
tid = ba_tfd->tid;
if (tid == IWL_MGMT_TID)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
index 080a1587caa5..0f7fa6032c66 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
@@ -104,9 +104,9 @@ static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
};
enum iwl_mvm_vendor_events_idx {
- /* 0x0 - 0x3 are deprecated */
- IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4,
- NUM_IWL_MVM_VENDOR_EVENT_IDX
+ /* 0x0 - 0x3 are deprecated */
+ IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4,
+ NUM_IWL_MVM_VENDOR_EVENT_IDX
};
static const struct nl80211_vendor_cmd_info
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index ae93a72542b2..838c426db7f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -106,6 +106,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
struct iwl_prph_info *prph_info;
u32 control_flags = 0;
+ u32 control_flags_ext = 0;
int ret;
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
@@ -130,6 +131,12 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
break;
}
+ if (trans->dsbr_urm_fw_dependent)
+ control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW;
+
+ if (trans->dsbr_urm_permanent)
+ control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;
+
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
&trans_pcie->prph_scratch_dma_addr,
@@ -165,6 +172,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
&control_flags);
prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+ prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext);
/* initialize the Step equalizer data */
prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 8fb2aa282242..e0b657b2f74b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -540,6 +540,9 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
{IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
{IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
+
+/* Dr devices */
+ {IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -1182,6 +1185,19 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc2f, iwl_sc2f_name),
+/* Dr */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_dr, iwl_dr_name),
+
+/* Br */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_br, iwl_br_name),
#endif /* CONFIG_IWLMVM */
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
@@ -1286,6 +1302,9 @@ static int map_crf_id(struct iwl_trans *iwl_trans)
case REG_CRF_ID_TYPE_WHP:
iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12);
break;
+ case REG_CRF_ID_TYPE_PE:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12);
+ break;
default:
ret = -EIO;
IWL_ERR(iwl_trans,
@@ -1391,6 +1410,47 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
}
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);
+static void iwl_pcie_recheck_me_status(struct work_struct *wk)
+{
+ struct iwl_trans *trans = container_of(wk, typeof(*trans),
+ me_recheck_wk.work);
+ u32 val;
+
+ val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);
+ trans->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP);
+}
+
+static void iwl_pcie_check_me_status(struct iwl_trans *trans)
+{
+ u32 val;
+
+ trans->me_present = -1;
+
+ INIT_DELAYED_WORK(&trans->me_recheck_wk,
+ iwl_pcie_recheck_me_status);
+
+ /* we don't have a good way of determining this until BZ */
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ return;
+
+ val = iwl_read_prph(trans, CNVI_SCU_REG_FOR_ECO_1);
+ if (val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN) {
+ trans->me_present =
+ !!(val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT);
+ return;
+ }
+
+ val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);
+ if (val & (CSR_HW_IF_CONFIG_REG_ME_OWN |
+ CSR_HW_IF_CONFIG_REG_IAMT_UP)) {
+ trans->me_present = 1;
+ return;
+ }
+
+ /* recheck again later, ME might still be initializing */
+ schedule_delayed_work(&trans->me_recheck_wk, HZ);
+}
+
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg_trans_params *trans;
@@ -1420,6 +1480,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
+ iwl_trans_pcie_check_product_reset_status(pdev);
+ iwl_trans_pcie_check_product_reset_mode(pdev);
+
/*
* Let's try to grab NIC access early here. Sometimes, NICs may
* fail to initialize, and if that happens it's better if we see
@@ -1566,6 +1629,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, iwl_trans);
+ iwl_pcie_check_me_status(iwl_trans);
+
/* try to get ownership so that we'll know if we don't own it */
iwl_pcie_prepare_card_hw(iwl_trans);
@@ -1593,6 +1658,8 @@ static void iwl_pci_remove(struct pci_dev *pdev)
if (!trans)
return;
+ cancel_delayed_work_sync(&trans->me_recheck_wk);
+
iwl_drv_stop(trans->drv);
iwl_trans_pcie_free(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 27a7e0b5b3d5..856b7e9f717d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -563,6 +563,9 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
__cond_lock(nic_access_nobh, \
likely(__iwl_trans_pcie_grab_nic_access(trans)))
+void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev);
+void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);
+
/*****************************************************
* RX
******************************************************/
@@ -1134,9 +1137,6 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
-void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
-void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
- bool test, bool reset);
int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index afb88eab8174..4a442d03d8d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1702,7 +1702,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
/* The STATUS_FW_ERROR bit is set in this function. This must happen
* before we wake up the command caller, to ensure a proper cleanup. */
- iwl_trans_fw_error(trans, false);
+ iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans->wait_command_queue);
@@ -2297,7 +2297,9 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {
IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
inta_hw);
- /* TODO: PLDR flow required here for >= Bz */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_trans_pcie_reset(trans,
+ IWL_RESET_MODE_PROD_RESET);
}
/* Error detected by uCode */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 8903a5692dfb..793514a1852a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -43,7 +43,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
* wake device's PCI Express link L1a -> L0s
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+ CSR_HW_IF_CONFIG_REG_HAP_WAKE);
iwl_pcie_apm_config(trans);
@@ -68,8 +68,8 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE |
- CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ CSR_HW_IF_CONFIG_REG_WAKE_ME |
+ CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
mdelay(1);
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@@ -123,14 +123,21 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
"timeout waiting for FW reset ACK (inta_hw=0x%x)\n",
inta_hw);
- if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE))
- iwl_trans_fw_error(trans, true);
+ if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)) {
+ struct iwl_fw_error_dump_mode mode = {
+ .type = IWL_ERR_TYPE_RESET_HS_TIMEOUT,
+ .context = IWL_ERR_CONTEXT_FROM_OPMODE,
+ };
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_RESET_HS_TIMEOUT);
+ iwl_op_mode_dump_error(trans->op_mode, &mode);
+ }
}
trans_pcie->fw_reset_state = FW_RESET_IDLE;
}
-void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -139,9 +146,9 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
if (trans_pcie->is_down)
return;
- if (trans->state >= IWL_TRANS_FW_STARTED)
- if (trans_pcie->fw_reset_handshake)
- iwl_trans_pcie_fw_reset_handshake(trans);
+ if (trans->state >= IWL_TRANS_FW_STARTED &&
+ trans_pcie->fw_reset_handshake)
+ iwl_trans_pcie_fw_reset_handshake(trans);
trans_pcie->is_down = true;
@@ -287,9 +294,6 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
pos = scnprintf(buf, buflen, "HRCDB");
break;
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_MS):
- pos = scnprintf(buf, buflen, "MS");
- break;
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_FM):
pos = scnprintf(buf, buflen, "FM");
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 86f1d87a909c..c917ed4c19bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -24,6 +24,7 @@
#include "fw/error-dump.h"
#include "fw/dbg.h"
#include "fw/api/tx.h"
+#include "fw/acpi.h"
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -311,7 +312,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
* wake device's PCI Express link L1a -> L0s
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+ CSR_HW_IF_CONFIG_REG_HAP_WAKE);
iwl_pcie_apm_config(trans);
@@ -439,7 +440,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* SHRD_HW_RST is applied in S3.
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+ CSR_HW_IF_CONFIG_REG_PERSISTENCE);
/*
* Clear "initialization complete" bit to move adapter from
@@ -508,8 +509,8 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE |
- CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ CSR_HW_IF_CONFIG_REG_WAKE_ME |
+ CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
mdelay(1);
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@@ -581,12 +582,12 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
int ret;
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+ CSR_HW_IF_CONFIG_REG_PCI_OWN_SET);
/* See if we got it */
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,
+ CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,
HW_READY_TIMEOUT);
if (ret >= 0)
@@ -620,7 +621,7 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
+ CSR_HW_IF_CONFIG_REG_WAKE_ME);
do {
ret = iwl_pcie_set_hw_ready(trans);
@@ -1488,8 +1489,8 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
_iwl_trans_pcie_stop_device(trans, from_irq);
}
-void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
- bool test, bool reset)
+static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
+ bool test, bool reset)
{
iwl_disable_interrupts(trans);
@@ -1566,7 +1567,7 @@ int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
if (!reset)
/* Enable persistence mode to avoid reset */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+ CSR_HW_IF_CONFIG_REG_PERSISTENCE);
ret = iwl_pcie_d3_handshake(trans, true);
if (ret)
@@ -2105,10 +2106,157 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_trans_free(trans);
}
+static union acpi_object *
+iwl_trans_pcie_call_prod_reset_dsm(struct pci_dev *pdev, u16 cmd, u16 value)
+{
+#ifdef CONFIG_ACPI
+ struct iwl_dsm_internal_product_reset_cmd pldr_arg = {
+ .cmd = cmd,
+ .value = value,
+ };
+ union acpi_object arg = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(pldr_arg),
+ .buffer.pointer = (void *)&pldr_arg,
+ };
+ static const guid_t dsm_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
+ 0x81, 0x4F, 0x75, 0xE4,
+ 0xDD, 0x26, 0xB5, 0xFD);
+
+ if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &dsm_guid, ACPI_DSM_REV,
+ DSM_INTERNAL_FUNC_PRODUCT_RESET))
+ return ERR_PTR(-ENODEV);
+
+ return iwl_acpi_get_dsm_object(&pdev->dev, ACPI_DSM_REV,
+ DSM_INTERNAL_FUNC_PRODUCT_RESET,
+ &arg, &dsm_guid);
+#else
+ return ERR_PTR(-EOPNOTSUPP);
+#endif
+}
+
+void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev)
+{
+ union acpi_object *res;
+
+ res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
+ DSM_INTERNAL_PLDR_CMD_GET_MODE,
+ 0);
+ if (IS_ERR(res))
+ return;
+
+ if (res->type != ACPI_TYPE_INTEGER)
+ IWL_ERR_DEV(&pdev->dev,
+ "unexpected return type from product reset DSM\n");
+ else
+ IWL_DEBUG_DEV_POWER(&pdev->dev,
+ "product reset mode is 0x%llx\n",
+ res->integer.value);
+
+ ACPI_FREE(res);
+}
+
+static void iwl_trans_pcie_set_product_reset(struct pci_dev *pdev, bool enable,
+ bool integrated)
+{
+ union acpi_object *res;
+ u16 mode = enable ? DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET : 0;
+
+ if (!integrated)
+ mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR |
+ DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON;
+
+ res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
+ DSM_INTERNAL_PLDR_CMD_SET_MODE,
+ mode);
+ if (IS_ERR(res)) {
+ if (enable)
+ IWL_ERR_DEV(&pdev->dev,
+ "ACPI _DSM not available (%d), cannot do product reset\n",
+ (int)PTR_ERR(res));
+ return;
+ }
+
+ ACPI_FREE(res);
+ IWL_DEBUG_DEV_POWER(&pdev->dev, "%sabled product reset via DSM\n",
+ enable ? "En" : "Dis");
+ iwl_trans_pcie_check_product_reset_mode(pdev);
+}
+
+void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev)
+{
+ union acpi_object *res;
+
+ res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
+ DSM_INTERNAL_PLDR_CMD_GET_STATUS,
+ 0);
+ if (IS_ERR(res))
+ return;
+
+ if (res->type != ACPI_TYPE_INTEGER)
+ IWL_ERR_DEV(&pdev->dev,
+ "unexpected return type from product reset DSM\n");
+ else
+ IWL_DEBUG_DEV_POWER(&pdev->dev,
+ "product reset status is 0x%llx\n",
+ res->integer.value);
+
+ ACPI_FREE(res);
+}
+
+static void iwl_trans_pcie_call_reset(struct pci_dev *pdev)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *p, *ref;
+ acpi_status status;
+ int ret = -EINVAL;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev),
+ "_PRR", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n");
+ goto out;
+ }
+ p = buffer.pointer;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 1) {
+ pci_err(pdev, "Bad _PRR return type\n");
+ goto out;
+ }
+
+ ref = &p->package.elements[0];
+ if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) {
+ pci_err(pdev, "_PRR wasn't a reference\n");
+ goto out;
+ }
+
+ status = acpi_evaluate_object(ref->reference.handle,
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ pci_err(pdev,
+ "Failed to call _RST on object returned by _PRR (%d)\n",
+ status);
+ goto out;
+ }
+ ret = 0;
+out:
+ kfree(buffer.pointer);
+ if (!ret) {
+ IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n");
+ return;
+ }
+ IWL_DEBUG_DEV_POWER(&pdev->dev,
+ "No BIOS support, using pci_reset_function()\n");
+#endif
+ pci_reset_function(pdev);
+}
+
struct iwl_trans_pcie_removal {
struct pci_dev *pdev;
struct work_struct work;
- bool rescan;
+ enum iwl_reset_mode mode;
+ bool integrated;
};
static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
@@ -2126,14 +2274,66 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
if (!bus)
goto out;
- dev_err(&pdev->dev, "Device gone - attempting removal\n");
-
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
+ if (removal->mode == IWL_RESET_MODE_PROD_RESET) {
+ struct pci_dev *bt = NULL;
+
+ if (!removal->integrated) {
+ /* discrete devices have WiFi/BT at function 0/1 */
+ int slot = PCI_SLOT(pdev->devfn);
+ int func = PCI_FUNC(pdev->devfn);
+
+ if (func == 0)
+ bt = pci_get_slot(bus, PCI_DEVFN(slot, 1));
+ else
+ pci_info(pdev, "Unexpected function %d\n",
+ func);
+ } else {
+ /* on integrated we have to look up by ID (same bus) */
+ static const struct pci_device_id bt_device_ids[] = {
+#define BT_DEV(_id) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, _id) }
+ BT_DEV(0xA876), /* LNL */
+ BT_DEV(0xE476), /* PTL-P */
+ BT_DEV(0xE376), /* PTL-H */
+ BT_DEV(0xD346), /* NVL-H */
+ BT_DEV(0x6E74), /* NVL-S */
+ BT_DEV(0x4D76), /* WCL */
+ BT_DEV(0xD246), /* RZL-H */
+ BT_DEV(0x6C46), /* RZL-M */
+ {}
+ };
+ struct pci_dev *tmp = NULL;
+
+ for_each_pci_dev(tmp) {
+ if (tmp->bus != bus)
+ continue;
+
+ if (pci_match_id(bt_device_ids, tmp)) {
+ bt = tmp;
+ break;
+ }
+ }
+ }
+
+ if (bt) {
+ pci_info(bt, "Removal by WiFi due to product reset\n");
+ pci_stop_and_remove_bus_device(bt);
+ pci_dev_put(bt);
+ }
+ }
+
+ iwl_trans_pcie_set_product_reset(pdev,
+ removal->mode ==
+ IWL_RESET_MODE_PROD_RESET,
+ removal->integrated);
+ if (removal->mode >= IWL_RESET_MODE_FUNC_RESET)
+ iwl_trans_pcie_call_reset(pdev);
+
pci_stop_and_remove_bus_device(pdev);
pci_dev_put(pdev);
- if (removal->rescan) {
+ if (removal->mode >= IWL_RESET_MODE_RESCAN) {
if (bus->parent)
bus = bus->parent;
pci_rescan_bus(bus);
@@ -2146,14 +2346,27 @@ out:
module_put(THIS_MODULE);
}
-void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
+void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
{
struct iwl_trans_pcie_removal *removal;
+ char _msg = 0, *msg = &_msg;
+
+ if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY))
+ return;
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return;
- IWL_ERR(trans, "Device gone - scheduling removal!\n");
+ if (trans->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
+ mode = IWL_RESET_MODE_FUNC_RESET;
+ if (trans->me_present < 0)
+ msg = " instead of product reset as ME may be present";
+ else
+ msg = " instead of product reset as ME is present";
+ }
+
+ IWL_INFO(trans, "scheduling reset (mode=%d%s)\n", mode, msg);
+
iwl_pcie_dump_csr(trans);
/*
@@ -2180,12 +2393,13 @@ void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
set_bit(STATUS_TRANS_DEAD, &trans->status);
removal->pdev = to_pci_dev(trans->dev);
- removal->rescan = rescan;
+ removal->mode = mode;
+ removal->integrated = trans->trans_cfg->integrated;
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
pci_dev_get(removal->pdev);
schedule_work(&removal->work);
}
-EXPORT_SYMBOL(iwl_trans_pcie_remove);
+EXPORT_SYMBOL(iwl_trans_pcie_reset);
/*
* This version doesn't disable BHs but rather assumes they're
@@ -2250,7 +2464,8 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
iwl_trans_pcie_dump_regs(trans);
if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
- iwl_trans_pcie_remove(trans, false);
+ iwl_trans_pcie_reset(trans,
+ IWL_RESET_MODE_REMOVE_ONLY);
else
iwl_write32(trans, CSR_RESET,
CSR_RESET_REG_FLAG_FORCE_NMI);
@@ -3037,12 +3252,47 @@ static ssize_t iwl_dbgfs_rf_read(struct file *file,
strlen(trans_pcie->rf_name));
}
+static ssize_t iwl_dbgfs_reset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ static const char * const modes[] = {
+ [IWL_RESET_MODE_SW_RESET] = "n/a",
+ [IWL_RESET_MODE_REPROBE] = "n/a",
+ [IWL_RESET_MODE_REMOVE_ONLY] = "remove",
+ [IWL_RESET_MODE_RESCAN] = "rescan",
+ [IWL_RESET_MODE_FUNC_RESET] = "function",
+ [IWL_RESET_MODE_PROD_RESET] = "product",
+ };
+ char buf[10] = {};
+ int mode;
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ mode = sysfs_match_string(modes, buf);
+ if (mode < 0)
+ return mode;
+
+ if (mode < IWL_RESET_MODE_REMOVE_ONLY)
+ return -EINVAL;
+
+ iwl_trans_pcie_reset(trans, mode);
+
+ return count;
+}
+
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
DEBUGFS_READ_FILE_OPS(rf);
+DEBUGFS_WRITE_FILE_OPS(reset);
static const struct file_operations iwl_dbgfs_tx_queue_ops = {
.owner = THIS_MODULE,
@@ -3071,6 +3321,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
DEBUGFS_ADD_FILE(rf, dir, 0400);
+ DEBUGFS_ADD_FILE(reset, dir, 0200);
}
void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index b1846abb99b7..1f483f15c238 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -1298,7 +1298,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
- iwl_op_mode_cmd_queue_full(trans->op_mode);
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_CMD_QUEUE_FULL);
+ iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
idx = -ENOSPC;
goto free_dup_buf;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 1ef14340953c..334ebd4c12fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1449,7 +1449,9 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
- iwl_op_mode_cmd_queue_full(trans->op_mode);
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_CMD_QUEUE_FULL);
+ iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
idx = -ENOSPC;
goto free_dup_buf;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index fca3eea7ee84..a099fdaafa45 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -410,7 +410,7 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
static int
mwifiex_cfg80211_get_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv = mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index e06a0622973e..f79589cafe57 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -545,7 +545,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
adapter->hs_activate_wait_q_woken,
- (10 * HZ)) <= 0) {
+ (5 * HZ)) <= 0) {
mwifiex_dbg(adapter, ERROR,
"hs_activate_wait_q terminated\n");
return false;
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index f7f2d9a8ab0f..87512d101a91 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_MT792x_USB) += mt792x-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o wed.o
+ tx.o agg-rx.o mcu.o wed.o scan.o channel.o
mt76-$(CONFIG_PCI) += pci.o
mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c
new file mode 100644
index 000000000000..6a35c6ebd823
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/channel.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2024 Felix Fietkau <nbd@nbd.name>
+ */
+#include "mt76.h"
+
+static struct mt76_vif_link *
+mt76_alloc_mlink(struct mt76_dev *dev, struct mt76_vif_data *mvif)
+{
+ struct mt76_vif_link *mlink;
+
+ mlink = kzalloc(dev->drv->link_data_size, GFP_KERNEL);
+ if (!mlink)
+ return NULL;
+
+ mlink->mvif = mvif;
+
+ return mlink;
+}
+
+static int
+mt76_phy_update_channel(struct mt76_phy *phy,
+ struct ieee80211_chanctx_conf *conf)
+{
+ phy->radar_enabled = conf->radar_enabled;
+ phy->main_chandef = conf->def;
+ phy->chanctx = (struct mt76_chanctx *)conf->drv_priv;
+
+ return __mt76_set_channel(phy, &phy->main_chandef, false);
+}
+
+int mt76_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ int ret = -EINVAL;
+
+ phy = ctx->phy = dev->band_phys[conf->def.chan->band];
+ if (WARN_ON_ONCE(!phy))
+ return ret;
+
+ if (dev->scan.phy == phy)
+ mt76_abort_scan(dev);
+
+ mutex_lock(&dev->mutex);
+ if (!phy->chanctx)
+ ret = mt76_phy_update_channel(phy, conf);
+ else
+ ret = 0;
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_add_chanctx);
+
+void mt76_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+
+ phy = ctx->phy;
+ if (WARN_ON_ONCE(!phy))
+ return;
+
+ if (dev->scan.phy == phy)
+ mt76_abort_scan(dev);
+
+ mutex_lock(&dev->mutex);
+ if (phy->chanctx == ctx)
+ phy->chanctx = NULL;
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_remove_chanctx);
+
+void mt76_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ u32 changed)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_phy *phy = ctx->phy;
+ struct mt76_dev *dev = phy->dev;
+
+ if (!(changed & (IEEE80211_CHANCTX_CHANGE_WIDTH |
+ IEEE80211_CHANCTX_CHANGE_RADAR)))
+ return;
+
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mutex);
+ mt76_phy_update_channel(phy, conf);
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_change_chanctx);
+
+
+int mt76_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ int link_id = link_conf->link_id;
+ struct mt76_phy *phy = ctx->phy;
+ struct mt76_dev *dev = phy->dev;
+ bool mlink_alloc = false;
+ int ret = 0;
+
+ if (dev->scan.vif == vif)
+ mt76_abort_scan(dev);
+
+ mutex_lock(&dev->mutex);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR &&
+ is_zero_ether_addr(vif->addr))
+ goto out;
+
+ mlink = mt76_vif_conf_link(dev, vif, link_conf);
+ if (!mlink) {
+ mlink = mt76_alloc_mlink(dev, mvif);
+ if (!mlink) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ mlink_alloc = true;
+ }
+
+ mlink->ctx = conf;
+ ret = dev->drv->vif_link_add(phy, vif, link_conf, mlink);
+ if (ret) {
+ if (mlink_alloc)
+ kfree(mlink);
+ goto out;
+ }
+
+ if (link_conf != &vif->bss_conf)
+ rcu_assign_pointer(mvif->link[link_id], mlink);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_assign_vif_chanctx);
+
+void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ int link_id = link_conf->link_id;
+ struct mt76_phy *phy = ctx->phy;
+ struct mt76_dev *dev = phy->dev;
+
+ if (dev->scan.vif == vif)
+ mt76_abort_scan(dev);
+
+ mutex_lock(&dev->mutex);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR &&
+ is_zero_ether_addr(vif->addr))
+ goto out;
+
+ mlink = mt76_vif_conf_link(dev, vif, link_conf);
+ if (!mlink)
+ goto out;
+
+ if (link_conf != &vif->bss_conf)
+ rcu_assign_pointer(mvif->link[link_id], NULL);
+
+ dev->drv->vif_link_remove(phy, vif, link_conf, mlink);
+ mlink->ctx = NULL;
+
+ if (link_conf != &vif->bss_conf)
+ kfree_rcu(mlink, rcu_head);
+
+out:
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_unassign_vif_chanctx);
+
+int mt76_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct mt76_chanctx *old_ctx = (struct mt76_chanctx *)vifs->old_ctx->drv_priv;
+ struct mt76_chanctx *new_ctx = (struct mt76_chanctx *)vifs->new_ctx->drv_priv;
+ struct ieee80211_chanctx_conf *conf = vifs->new_ctx;
+ struct mt76_phy *old_phy = old_ctx->phy;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_vif_link *mlink;
+ bool update_chan;
+ int i, ret = 0;
+
+ if (mode == CHANCTX_SWMODE_SWAP_CONTEXTS)
+ phy = new_ctx->phy = dev->band_phys[conf->def.chan->band];
+ else
+ phy = new_ctx->phy;
+ if (!phy)
+ return -EINVAL;
+
+ update_chan = phy->chanctx != new_ctx;
+ if (update_chan) {
+ if (dev->scan.phy == phy)
+ mt76_abort_scan(dev);
+
+ cancel_delayed_work_sync(&phy->mac_work);
+ }
+
+ mutex_lock(&dev->mutex);
+
+ if (mode == CHANCTX_SWMODE_SWAP_CONTEXTS &&
+ phy != old_phy && old_phy->chanctx == old_ctx)
+ old_phy->chanctx = NULL;
+
+ if (update_chan)
+ ret = mt76_phy_update_channel(phy, vifs->new_ctx);
+
+ if (ret)
+ goto out;
+
+ if (old_phy == phy)
+ goto skip_link_replace;
+
+ for (i = 0; i < n_vifs; i++) {
+ mlink = mt76_vif_conf_link(dev, vifs[i].vif, vifs[i].link_conf);
+ if (!mlink)
+ continue;
+
+ dev->drv->vif_link_remove(old_phy, vifs[i].vif,
+ vifs[i].link_conf, mlink);
+
+ ret = dev->drv->vif_link_add(phy, vifs[i].vif,
+ vifs[i].link_conf, mlink);
+ if (ret)
+ goto out;
+
+ }
+
+skip_link_replace:
+ for (i = 0; i < n_vifs; i++) {
+ mlink = mt76_vif_conf_link(dev, vifs[i].vif, vifs[i].link_conf);
+ if (!mlink)
+ continue;
+
+ mlink->ctx = vifs->new_ctx;
+ }
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_switch_vif_chanctx);
+
+struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt76_dev *dev = phy->dev;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(mvif->link); i++) {
+ mlink = mt76_dereference(mvif->link[i], dev);
+ if (!mlink)
+ continue;
+
+ if (mt76_vif_link_phy(mlink) == phy)
+ return mlink;
+ }
+
+ if (!dev->drv->vif_link_add)
+ return ERR_PTR(-EINVAL);
+
+ mlink = mt76_alloc_mlink(dev, mvif);
+ if (!mlink)
+ return ERR_PTR(-ENOMEM);
+
+ mlink->offchannel = true;
+ ret = dev->drv->vif_link_add(phy, vif, &vif->bss_conf, mlink);
+ if (ret) {
+ kfree(mlink);
+ return ERR_PTR(ret);
+ }
+
+ return mlink;
+}
+
+void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct mt76_vif_link *mlink)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ if (IS_ERR_OR_NULL(mlink) || !mlink->offchannel)
+ return;
+
+ dev->drv->vif_link_remove(phy, vif, &vif->bss_conf, mlink);
+ kfree(mlink);
+}
+
+static void mt76_roc_complete(struct mt76_phy *phy)
+{
+ struct mt76_vif_link *mlink = phy->roc_link;
+
+ if (!phy->roc_vif)
+ return;
+
+ if (mlink)
+ mlink->mvif->roc_phy = NULL;
+ if (phy->main_chandef.chan)
+ mt76_set_channel(phy, &phy->main_chandef, false);
+ mt76_put_vif_phy_link(phy, phy->roc_vif, phy->roc_link);
+ phy->roc_vif = NULL;
+ phy->roc_link = NULL;
+ ieee80211_remain_on_channel_expired(phy->hw);
+}
+
+void mt76_roc_complete_work(struct work_struct *work)
+{
+ struct mt76_phy *phy = container_of(work, struct mt76_phy, roc_work.work);
+ struct mt76_dev *dev = phy->dev;
+
+ mutex_lock(&dev->mutex);
+ mt76_roc_complete(phy);
+ mutex_unlock(&dev->mutex);
+}
+
+void mt76_abort_roc(struct mt76_phy *phy)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ cancel_delayed_work_sync(&phy->roc_work);
+
+ mutex_lock(&dev->mutex);
+ mt76_roc_complete(phy);
+ mutex_unlock(&dev->mutex);
+}
+
+int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration,
+ enum ieee80211_roc_type type)
+{
+ struct cfg80211_chan_def chandef = {};
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_vif_link *mlink;
+ int ret = 0;
+
+ phy = dev->band_phys[chan->band];
+ if (!phy)
+ return -EINVAL;
+
+ mutex_lock(&dev->mutex);
+
+ if (phy->roc_vif || dev->scan.phy == phy) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mlink = mt76_get_vif_phy_link(phy, vif);
+ if (IS_ERR(mlink)) {
+ ret = PTR_ERR(mlink);
+ goto out;
+ }
+
+ mlink->mvif->roc_phy = phy;
+ phy->roc_vif = vif;
+ phy->roc_link = mlink;
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
+ mt76_set_channel(phy, &chandef, true);
+ ieee80211_ready_on_channel(hw);
+ ieee80211_queue_delayed_work(phy->hw, &phy->roc_work,
+ msecs_to_jiffies(duration));
+
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_remain_on_channel);
+
+int mt76_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt76_phy *phy = mvif->roc_phy;
+
+ if (!phy)
+ return 0;
+
+ mt76_abort_roc(phy);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_cancel_remain_on_channel);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 5f46d6daeaa7..844af16ee551 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -631,7 +631,8 @@ free_skb:
return ret;
}
-int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+static int
+mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct)
{
int len = SKB_WITH_OVERHEAD(q->buf_size);
@@ -640,8 +641,6 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
if (!q->ndesc)
return 0;
- spin_lock_bh(&q->lock);
-
while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf = {};
enum dma_data_direction dir;
@@ -674,6 +673,19 @@ done:
if (frames || mt76_queue_is_wed_rx(q))
mt76_dma_kick_queue(dev, q);
+ return frames;
+}
+
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct)
+{
+ int frames;
+
+ if (!q->ndesc)
+ return 0;
+
+ spin_lock_bh(&q->lock);
+ frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
spin_unlock_bh(&q->lock);
return frames;
@@ -796,7 +808,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
return;
mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill(dev, q, false);
+ mt76_dma_rx_fill_buf(dev, q, false);
}
static void
@@ -969,7 +981,7 @@ mt76_dma_init(struct mt76_dev *dev,
mt76_for_each_q_rx(dev, i) {
netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+ mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
napi_enable(&dev->napi[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 9d5561f44134..508b472408c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -411,13 +411,16 @@ mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
}
if (found) {
- phy->chandef.chan = &sband->channels[0];
+ cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
+ NL80211_CHAN_HT20);
phy->chan_state = &msband->chan[0];
+ phy->dev->band_phys[band] = phy;
return;
}
sband->n_channels = 0;
- phy->hw->wiphy->bands[band] = NULL;
+ if (phy->hw->wiphy->bands[band] == sband)
+ phy->hw->wiphy->bands[band] = NULL;
}
static int
@@ -428,6 +431,10 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
INIT_LIST_HEAD(&phy->tx_list);
spin_lock_init(&phy->tx_lock);
+ INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
+
+ if ((void *)phy != hw->priv)
+ return 0;
SET_IEEE80211_DEV(hw, dev->dev);
SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
@@ -481,6 +488,28 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
}
struct mt76_phy *
+mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
+ u8 band_idx)
+{
+ struct ieee80211_hw *hw = dev->phy.hw;
+ unsigned int phy_size;
+ struct mt76_phy *phy;
+
+ phy_size = ALIGN(sizeof(*phy), 8);
+ phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ phy->dev = dev;
+ phy->hw = hw;
+ phy->priv = (void *)phy + phy_size;
+ phy->band_idx = band_idx;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
+
+struct mt76_phy *
mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
const struct ieee80211_ops *ops, u8 band_idx)
{
@@ -552,9 +581,11 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
- ret = ieee80211_register_hw(phy->hw);
- if (ret)
- return ret;
+ if ((void *)phy == phy->hw->priv) {
+ ret = ieee80211_register_hw(phy->hw);
+ if (ret)
+ return ret;
+ }
set_bit(MT76_STATE_REGISTERED, &phy->state);
phy->dev->phys[phy->band_idx] = phy;
@@ -690,6 +721,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
INIT_LIST_HEAD(&dev->txwi_cache);
INIT_LIST_HEAD(&dev->rxwi_cache);
dev->token_size = dev->drv->token_size;
+ INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
@@ -712,7 +744,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
int ret;
dev_set_drvdata(dev->dev, dev);
- mt76_wcid_init(&dev->global_wcid);
+ mt76_wcid_init(&dev->global_wcid, phy->band_idx);
ret = mt76_phy_init(phy, hw);
if (ret)
return ret;
@@ -784,6 +816,22 @@ void mt76_free_device(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_free_device);
+static struct mt76_phy *
+mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_chanctx *ctx;
+
+ if (!hw->wiphy->n_radio)
+ return hw->priv;
+
+ if (!mlink->ctx)
+ return NULL;
+
+ ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
+ return ctx->phy;
+}
+
static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
{
struct sk_buff *skb = phy->rx_amsdu[q].head;
@@ -929,16 +977,13 @@ void mt76_update_survey(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_update_survey);
-int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
- bool offchannel)
+int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel)
{
struct mt76_dev *dev = phy->dev;
int timeout = HZ / 5;
int ret;
- cancel_delayed_work_sync(&phy->mac_work);
-
- mutex_lock(&dev->mutex);
set_bit(MT76_RESET, &phy->state);
mt76_worker_disable(&dev->tx_worker);
@@ -954,17 +999,30 @@ int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
phy->offchannel = offchannel;
if (!offchannel)
- phy->main_chan = chandef->chan;
+ phy->main_chandef = *chandef;
- if (chandef->chan != phy->main_chan)
+ if (chandef->chan != phy->main_chandef.chan)
memset(phy->chan_state, 0, sizeof(*phy->chan_state));
- mt76_worker_enable(&dev->tx_worker);
ret = dev->drv->set_channel(phy);
clear_bit(MT76_RESET, &phy->state);
+ mt76_worker_enable(&dev->tx_worker);
mt76_worker_schedule(&dev->tx_worker);
+ return ret;
+}
+
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel)
+{
+ struct mt76_dev *dev = phy->dev;
+ int ret;
+
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mutex);
+ ret = __mt76_set_channel(phy, chandef, offchannel);
mutex_unlock(&dev->mutex);
return ret;
@@ -976,37 +1034,59 @@ int mt76_update_channel(struct mt76_phy *phy)
struct cfg80211_chan_def *chandef = &hw->conf.chandef;
bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ phy->radar_enabled = hw->conf.radar_enabled;
+
return mt76_set_channel(phy, chandef, offchannel);
}
EXPORT_SYMBOL_GPL(mt76_update_channel);
+static struct mt76_sband *
+mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
+{
+ if (*idx < phy->sband_2g.sband.n_channels)
+ return &phy->sband_2g;
+
+ *idx -= phy->sband_2g.sband.n_channels;
+ if (*idx < phy->sband_5g.sband.n_channels)
+ return &phy->sband_5g;
+
+ *idx -= phy->sband_5g.sband.n_channels;
+ if (*idx < phy->sband_6g.sband.n_channels)
+ return &phy->sband_6g;
+
+ *idx -= phy->sband_6g.sband.n_channels;
+ return NULL;
+}
+
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
- struct mt76_sband *sband;
+ struct mt76_sband *sband = NULL;
struct ieee80211_channel *chan;
struct mt76_channel_state *state;
+ int phy_idx = 0;
int ret = 0;
mutex_lock(&dev->mutex);
- if (idx == 0 && dev->drv->update_survey)
- mt76_update_survey(phy);
-
- if (idx >= phy->sband_2g.sband.n_channels +
- phy->sband_5g.sband.n_channels) {
- idx -= (phy->sband_2g.sband.n_channels +
- phy->sband_5g.sband.n_channels);
- sband = &phy->sband_6g;
- } else if (idx >= phy->sband_2g.sband.n_channels) {
- idx -= phy->sband_2g.sband.n_channels;
- sband = &phy->sband_5g;
- } else {
- sband = &phy->sband_2g;
+
+ for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
+ sband = NULL;
+ phy = dev->phys[phy_idx];
+ if (!phy || phy->hw != hw)
+ continue;
+
+ sband = mt76_get_survey_sband(phy, &idx);
+
+ if (idx == 0 && phy->dev->drv->update_survey)
+ mt76_update_survey(phy);
+
+ if (sband || !hw->wiphy->n_radio)
+ break;
}
- if (idx >= sband->sband.n_channels) {
+ if (!sband) {
ret = -ENOENT;
goto out;
}
@@ -1021,7 +1101,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
if (state->noise)
survey->filled |= SURVEY_INFO_NOISE_DBM;
- if (chan == phy->main_chan) {
+ if (chan == phy->main_chandef.chan) {
survey->filled |= SURVEY_INFO_IN_USE;
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
@@ -1462,21 +1542,20 @@ mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
ewma_signal_init(&wcid->rssi);
- if (phy->band_idx == MT_BAND1)
- mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
- wcid->phy_idx = phy->band_idx;
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
+ phy->num_sta++;
- mt76_wcid_init(wcid);
+ mt76_wcid_init(wcid, phy->band_idx);
out:
mutex_unlock(&dev->mutex);
return ret;
}
-void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int i, idx = wcid->idx;
@@ -1489,16 +1568,18 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
mt76_wcid_cleanup(dev, wcid);
mt76_wcid_mask_clear(dev->wcid_mask, idx);
- mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
+ phy->num_sta--;
}
EXPORT_SYMBOL_GPL(__mt76_sta_remove);
static void
-mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct mt76_dev *dev = phy->dev;
+
mutex_lock(&dev->mutex);
- __mt76_sta_remove(dev, vif, sta);
+ __mt76_sta_remove(phy, vif, sta);
mutex_unlock(&dev->mutex);
}
@@ -1511,13 +1592,17 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_dev *dev = phy->dev;
enum mt76_sta_event ev;
+ phy = mt76_vif_phy(hw, vif);
+ if (!phy)
+ return -EINVAL;
+
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
return mt76_sta_add(phy, vif, sta);
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
- mt76_sta_remove(dev, vif, sta);
+ mt76_sta_remove(phy, vif, sta);
if (!dev->drv->sta_event)
return 0;
@@ -1553,14 +1638,19 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
-void mt76_wcid_init(struct mt76_wcid *wcid)
+void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
{
+ wcid->hw_key_idx = -1;
+ wcid->phy_idx = band_idx;
+
INIT_LIST_HEAD(&wcid->tx_list);
skb_queue_head_init(&wcid->tx_pending);
skb_queue_head_init(&wcid->tx_offchannel);
INIT_LIST_HEAD(&wcid->list);
idr_init(&wcid->pktid);
+
+ INIT_LIST_HEAD(&wcid->poll_list);
}
EXPORT_SYMBOL_GPL(mt76_wcid_init);
@@ -1595,13 +1685,29 @@ void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
}
EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
+void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
+{
+ if (test_bit(MT76_MCU_RESET, &dev->phy.state))
+ return;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&wcid->poll_list))
+ list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
+
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
- struct mt76_phy *phy = hw->priv;
- int n_chains = hweight16(phy->chainmask);
- int delta = mt76_tx_power_nss_delta(n_chains);
+ struct mt76_phy *phy = mt76_vif_phy(hw, vif);
+ int n_chains, delta;
+
+ if (!phy)
+ return -EINVAL;
+ n_chains = hweight16(phy->chainmask);
+ delta = mt76_tx_power_nss_delta(n_chains);
*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
return 0;
@@ -1776,10 +1882,14 @@ int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
+ int i;
mutex_lock(&dev->mutex);
- *tx_ant = phy->antenna_mask;
- *rx_ant = phy->antenna_mask;
+ *tx_ant = 0;
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
+ if (dev->phys[i] && dev->phys[i]->hw == hw)
+ *tx_ant |= dev->phys[i]->chainmask;
+ *rx_ant = *tx_ant;
mutex_unlock(&dev->mutex);
return 0;
@@ -1808,30 +1918,6 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
}
EXPORT_SYMBOL_GPL(mt76_init_queue);
-u16 mt76_calculate_default_rate(struct mt76_phy *phy,
- struct ieee80211_vif *vif, int rateidx)
-{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = mvif->ctx ?
- &mvif->ctx->def :
- &phy->chandef;
- int offset = 0;
-
- if (chandef->chan->band != NL80211_BAND_2GHZ)
- offset = 4;
-
- /* pick the lowest rate for hidden nodes */
- if (rateidx < 0)
- rateidx = 0;
-
- rateidx += offset;
- if (rateidx >= ARRAY_SIZE(mt76_rates))
- rateidx = offset;
-
- return mt76_rates[rateidx].hw_value;
-}
-EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
-
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
struct mt76_sta_stats *stats, bool eht)
{
@@ -1892,7 +1978,7 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
test_bit(MT76_SCANNING, &phy->state))
return MT_DFS_STATE_DISABLED;
- if (!hw->conf.radar_enabled) {
+ if (!phy->radar_enabled) {
if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
(phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
return MT_DFS_STATE_ACTIVE;
@@ -1906,3 +1992,15 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
return MT_DFS_STATE_ACTIVE;
}
EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
+
+void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+
+ rcu_assign_pointer(mvif->link[0], NULL);
+ mt76_abort_scan(dev);
+ if (mvif->roc_phy)
+ mt76_abort_roc(mvif->roc_phy);
+}
+EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 0b75a45ad2e8..132148f7b107 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -50,6 +50,8 @@ struct mt76_dev;
struct mt76_phy;
struct mt76_wcid;
struct mt76s_intr;
+struct mt76_chanctx;
+struct mt76_vif_link;
struct mt76_reg_pair {
u32 reg;
@@ -497,6 +499,8 @@ struct mt76_driver_ops {
u16 token_size;
u8 mcs_rates;
+ unsigned int link_data_size;
+
void (*update_survey)(struct mt76_phy *phy);
int (*set_channel)(struct mt76_phy *phy);
@@ -528,6 +532,15 @@ struct mt76_driver_ops {
void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+
+ int (*vif_link_add)(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink);
+
+ void (*vif_link_remove)(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink);
};
struct mt76_channel_state {
@@ -636,6 +649,7 @@ struct mt76_sdio {
u8 hw_ver;
wait_queue_head_t wait;
+ int pse_mcu_quota_max;
struct {
int pse_data_quota;
int ple_data_quota;
@@ -753,7 +767,7 @@ struct mt76_testmode_data {
} rx_stats;
};
-struct mt76_vif {
+struct mt76_vif_link {
u8 idx;
u8 omac_idx;
u8 band_idx;
@@ -763,7 +777,19 @@ struct mt76_vif {
u8 basic_rates_idx;
u8 mcast_rates_idx;
u8 beacon_rates_idx;
+ bool offchannel;
struct ieee80211_chanctx_conf *ctx;
+ struct mt76_wcid *wcid;
+ struct mt76_vif_data *mvif;
+ struct rcu_head rcu_head;
+};
+
+struct mt76_vif_data {
+ struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct mt76_phy *roc_phy;
+ u16 valid_links;
+ u8 deflink_id;
};
struct mt76_phy {
@@ -772,6 +798,7 @@ struct mt76_phy {
void *priv;
unsigned long state;
+ unsigned int num_sta;
u8 band_idx;
spinlock_t tx_lock;
@@ -779,8 +806,15 @@ struct mt76_phy {
struct mt76_queue *q_tx[__MT_TXQ_MAX];
struct cfg80211_chan_def chandef;
- struct ieee80211_channel *main_chan;
+ struct cfg80211_chan_def main_chandef;
bool offchannel;
+ bool radar_enabled;
+
+ struct delayed_work roc_work;
+ struct ieee80211_vif *roc_vif;
+ struct mt76_vif_link *roc_link;
+
+ struct mt76_chanctx *chanctx;
struct mt76_channel_state *chan_state;
enum mt76_dfs_state dfs_state;
@@ -825,6 +859,7 @@ struct mt76_phy {
struct mt76_dev {
struct mt76_phy phy; /* must be first */
struct mt76_phy *phys[__MT_MAX_BAND];
+ struct mt76_phy *band_phys[NUM_NL80211_BANDS];
struct ieee80211_hw *hw;
@@ -880,7 +915,6 @@ struct mt76_dev {
spinlock_t status_lock;
u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
- u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
u64 vif_mask;
@@ -909,6 +943,16 @@ struct mt76_dev {
u32 rxfilter;
+ struct delayed_work scan_work;
+ struct {
+ struct cfg80211_scan_request *req;
+ struct ieee80211_channel *chan;
+ struct ieee80211_vif *vif;
+ struct mt76_vif_link *mlink;
+ struct mt76_phy *phy;
+ int chan_idx;
+ } scan;
+
#ifdef CONFIG_NL80211_TESTMODE
const struct mt76_testmode_ops *test_ops;
struct {
@@ -1036,6 +1080,10 @@ struct mt76_ethtool_worker_info {
int sta_count;
};
+struct mt76_chanctx {
+ struct mt76_phy *phy;
+};
+
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
@@ -1156,6 +1204,10 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
if ((dev)->q_rx[i].ndesc)
+
+#define mt76_dereference(p, dev) \
+ rcu_dereference_protected(p, lockdep_is_held(&(dev)->mutex))
+
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops);
@@ -1165,6 +1217,8 @@ void mt76_unregister_device(struct mt76_dev *dev);
void mt76_free_device(struct mt76_dev *dev);
void mt76_unregister_phy(struct mt76_phy *phy);
+struct mt76_phy *mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
+ u8 band_idx);
struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
const struct ieee80211_ops *ops,
u8 band_idx);
@@ -1191,8 +1245,6 @@ int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep,
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
int ring_base, void *wed, u32 flags);
-u16 mt76_calculate_default_rate(struct mt76_phy *phy,
- struct ieee80211_vif *vif, int rateidx);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
int n_desc, int ring_base, void *wed,
u32 flags)
@@ -1423,15 +1475,15 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state);
-void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm);
+ unsigned int link_id, int *dbm);
int mt76_init_sar_power(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
int mt76_get_sar_power(struct mt76_phy *phy,
@@ -1447,11 +1499,38 @@ void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
int mt76_get_rate(struct mt76_dev *dev,
struct ieee80211_supported_band *sband,
int idx, bool cck);
+int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req);
+void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
+int mt76_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void mt76_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void mt76_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ u32 changed);
+int mt76_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf);
+void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf);
+int mt76_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode);
+int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration,
+ enum ieee80211_roc_type type);
+int mt76_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -1497,8 +1576,18 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_phy *phy);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
+int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel);
int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
bool offchannel);
+void mt76_scan_work(struct work_struct *work);
+void mt76_abort_scan(struct mt76_dev *dev);
+void mt76_roc_complete_work(struct work_struct *work);
+void mt76_abort_roc(struct mt76_phy *phy);
+struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
+ struct ieee80211_vif *vif);
+void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct mt76_vif_link *mlink);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@@ -1734,7 +1823,54 @@ mt76_token_put(struct mt76_dev *dev, int token)
return txwi;
}
-void mt76_wcid_init(struct mt76_wcid *wcid);
+void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx);
void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid);
+void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid);
+
+static inline void
+mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+
+ mlink->mvif = mvif;
+ rcu_assign_pointer(mvif->link[0], mlink);
+}
+
+void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif);
+
+static inline struct mt76_vif_link *
+mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+
+ return mt76_dereference(mvif->link[link_id], dev);
+}
+
+static inline struct mt76_vif_link *
+mt76_vif_conf_link(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+
+ if (link_conf == &vif->bss_conf)
+ return mlink;
+
+ return mt76_dereference(mvif->link[link_conf->link_id], dev);
+}
+
+static inline struct mt76_phy *
+mt76_vif_link_phy(struct mt76_vif_link *mlink)
+{
+ struct mt76_chanctx *ctx;
+
+ if (!mlink->ctx)
+ return NULL;
+
+ ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
+
+ return ctx->phy;
+}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index dc8a77f0a1cc..a259f4dd9540 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1277,12 +1277,7 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
msta = container_of(wcid, struct mt7603_sta, wcid);
sta = wcid_to_sta(wcid);
-
- if (list_empty(&msta->wcid.poll_list)) {
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
- }
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
goto out;
@@ -1793,7 +1788,7 @@ mt7603_false_cca_check(struct mt7603_dev *dev)
mt7603_cca_stats_reset(dev);
- min_signal = mt76_get_min_avg_rssi(&dev->mt76, false);
+ min_signal = mt76_get_min_avg_rssi(&dev->mt76, 0);
if (!min_signal) {
dev->sensitivity = 0;
dev->last_cca_adj = jiffies;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 574f74ad325d..3e8b1ec76169 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -66,11 +66,9 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
dev->mt76.vif_mask |= BIT_ULL(mvif->idx);
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.vif = mvif;
- mt76_wcid_init(&mvif->sta.wcid);
+ mt76_wcid_init(&mvif->sta.wcid, 0);
eth_broadcast_addr(bc_addr);
mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 7ba789834e8d..3ca4fae7c4b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -387,11 +387,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_sta *msta;
msta = container_of(status->wcid, struct mt7615_sta, wcid);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
}
if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
@@ -734,7 +730,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
u16 seqno = 0;
if (vif) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -1514,11 +1510,7 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
msta = container_of(wcid, struct mt7615_sta, wcid);
sta = wcid_to_sta(wcid);
-
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 376975388007..2e7b05eeef7a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -209,6 +209,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
mvif->mt76.band_idx = ext_phy;
mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ mvif->mt76.wcid = &mvif->sta.wcid;
if (ext_phy)
mvif->mt76.wmm_idx += 2;
@@ -224,9 +225,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
- mvif->sta.wcid.hw_key_idx = -1;
- mt76_wcid_init(&mvif->sta.wcid);
+ mt76_wcid_init(&mvif->sta.wcid, mvif->mt76.band_idx);
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -463,7 +462,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
int err;
@@ -1249,7 +1248,7 @@ static int mt7615_suspend(struct ieee80211_hw *hw,
phy->mt76);
if (!mt7615_dev_running(dev))
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
mt7615_mutex_release(dev);
@@ -1271,7 +1270,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
if (!running) {
int err;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
if (err < 0) {
mt7615_mutex_release(dev);
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 96e34277fece..b8fcd4eb3fbb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -865,8 +865,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
mvif->sta_added = true;
}
conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta,
- conn_state, new_entry);
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, &vif->bss_conf,
+ link_sta, conn_state, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1113,7 +1113,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf,
+ return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf, &mvif->mt76,
&mvif->sta.wcid, enable);
}
@@ -1700,7 +1700,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
};
int ret;
- dev->mt76.mcu_ops = &mt7615_mcu_ops,
+ dev->mt76.mcu_ops = &mt7615_mcu_ops;
ret = mt7615_mcu_drv_pmctrl(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 530da48ce3ea..9bdd29e8d25e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -139,7 +139,7 @@ struct mt7615_sta {
};
struct mt7615_vif {
- struct mt76_vif mt76; /* must be first */
+ struct mt76_vif_link mt76; /* must be first */
struct mt7615_sta sta;
bool sta_added;
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 9f43e673518b..9a278589df4e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -83,7 +83,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev);
if (hif_suspend) {
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true, true);
if (err)
return err;
}
@@ -131,7 +131,7 @@ restore:
}
napi_enable(&mdev->tx_napi);
if (hif_suspend)
- mt76_connac_mcu_set_hif_suspend(mdev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false, true);
return err;
}
@@ -175,7 +175,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev))
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index fbb1181c58ff..a0ca3bbdfcaf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -48,7 +48,7 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
if (vif) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
txp->bss_idx = mvif->idx;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index aebfc4576aa4..f56038cd4d3a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -191,7 +191,7 @@ static int mt7663s_suspend(struct device *dev)
mt7615_firmware_offload(mdev)) {
int err;
- err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true);
+ err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true, true);
if (err < 0)
return err;
}
@@ -230,7 +230,7 @@ static int mt7663s_resume(struct device *dev)
if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
mt7615_firmware_offload(mdev))
- err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false);
+ err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false, true);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index b0094205ba95..a7b8acb2da83 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -147,7 +147,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
if (ret)
return ret;
- dev->mt76.mcu_ops = &mt7663s_mcu_ops,
+ dev->mt76.mcu_ops = &mt7663s_mcu_ops;
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
if (ret) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 5020af52c68c..4aa9fa1c4a23 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -225,7 +225,7 @@ static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state)
mt7615_firmware_offload(dev)) {
int err;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
if (err < 0)
return err;
}
@@ -253,7 +253,7 @@ static int mt7663u_resume(struct usb_interface *intf)
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev))
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index a8b1a0f8b2d7..33c01f8ce8e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -72,7 +72,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
};
int ret;
- dev->mt76.mcu_ops = &mt7663u_mcu_ops,
+ dev->mt76.mcu_ops = &mt7663u_mcu_ops;
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 445d0f0ab779..455979476d11 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -405,7 +405,7 @@ mt76_connac_mutex_release(struct mt76_dev *dev, struct mt76_connac_pm *pm)
mutex_unlock(&dev->mutex);
}
-void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss);
+void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss, enum nl80211_band band);
int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
int ring_base, void *wed, u32 flags);
@@ -427,7 +427,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
bool beacon, bool mcast);
bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
__le32 *txs_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
index 92ad1ecf6c9d..2d300948308d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
@@ -231,7 +231,8 @@ void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv,
EHT_PREP(DATA0_PE_DISAMBIGUITY_OM, PE_DISAMBIG, rxv[5]) |
EHT_PREP(DATA0_LDPC_EXTRA_SYM_OM, LDPC_EXT_SYM, rxv[4]);
- eht->data[7] |= le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+ /* iwlwifi and wireshark expect radiotap to report zero-based NSS, so subtract 1. */
+ eht->data[7] |= le32_encode_bits(status->nss - 1, IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
eht->user_info[0] |=
EHT_BITS(USER_INFO_MCS_KNOWN) |
@@ -240,7 +241,7 @@ void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv,
EHT_BITS(USER_INFO_BEAMFORMING_KNOWN_O) |
EHT_BITS(USER_INFO_DATA_FOR_USER) |
le32_encode_bits(status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
- le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O);
+ le32_encode_bits(status->nss - 1, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O);
if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
eht->user_info[0] |= EHT_BITS(USER_INFO_BEAMFORMING_O);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index a3db65254e37..e9ac8a7317a1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -9,10 +9,13 @@
#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
IEEE80211_RADIOTAP_HE_##f)
-void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss)
+void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss, enum nl80211_band band)
{
static const u8 ppet16_ppet8_ru3_ru0[] = { 0x1c, 0xc7, 0x71 };
- u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
+ u8 i, ppet_bits, ppet_size, ru_bit_mask = 0xf;
+
+ if (band == NL80211_BAND_2GHZ)
+ ru_bit_mask = 0x3;
he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
@@ -291,27 +294,28 @@ EXPORT_SYMBOL_GPL(mt76_connac_init_tx_queues);
})
u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
bool beacon, bool mcast)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = mt76_vif_conf_link(mphy->dev, conf->vif, conf);
struct cfg80211_chan_def *chandef = mvif->ctx ?
&mvif->ctx->def : &mphy->chandef;
u8 nss = 0, mode = 0, band = chandef->chan->band;
int rateidx = 0, mcast_rate;
+ int offset = 0;
- if (!vif)
+ if (!conf)
goto legacy;
if (is_mt7921(mphy->dev)) {
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ rateidx = ffs(conf->basic_rates) - 1;
goto legacy;
}
if (beacon) {
struct cfg80211_bitrate_mask *mask;
- mask = &vif->bss_conf.beacon_tx_rate;
+ mask = &conf->beacon_tx_rate;
__bitrate_mask_check(he_mcs, HE_SU);
__bitrate_mask_check(vht_mcs, VHT);
@@ -323,14 +327,25 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
}
}
- mcast_rate = vif->bss_conf.mcast_rate[band];
+ mcast_rate = conf->mcast_rate[band];
if (mcast && mcast_rate > 0)
rateidx = mcast_rate - 1;
else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ rateidx = ffs(conf->basic_rates) - 1;
legacy:
- rateidx = mt76_calculate_default_rate(mphy, vif, rateidx);
+ if (band != NL80211_BAND_2GHZ)
+ offset = 4;
+
+ /* pick the lowest rate for hidden nodes */
+ if (rateidx < 0)
+ rateidx = 0;
+
+ rateidx += offset;
+ if (rateidx >= ARRAY_SIZE(mt76_rates))
+ rateidx = offset;
+
+ rateidx = mt76_rates[rateidx].hw_value;
mode = rateidx >> 8;
rateidx &= GENMASK(7, 0);
out:
@@ -493,7 +508,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
bool amsdu_en = wcid->amsdu;
if (vif) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -569,7 +584,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool multicast = ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1);
- u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
+ u16 rate = mt76_connac2_mac_tx_rate_val(mphy, &vif->bss_conf, beacon,
multicast);
u32 val = MT_TXD6_FIXED_BW;
@@ -1162,11 +1177,7 @@ void mt76_connac2_txwi_free(struct mt76_dev *dev, struct mt76_txwi_cache *t,
if (wcid && wcid->sta) {
sta = container_of((void *)wcid, struct ieee80211_sta,
drv_priv);
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&wcid->poll_list))
- list_add_tail(&wcid->poll_list,
- &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ mt76_wcid_add_poll(dev, wcid);
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 864246f94088..f30cf9e71610 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_mac_enable);
int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
u8 bss_idx;
u8 ps_state; /* 0: device awake
@@ -232,7 +232,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rts_thresh);
void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_connac_beacon_loss_event *event = priv;
if (mvif->idx != event->bss_idx)
@@ -273,7 +273,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
struct sk_buff *
-__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct mt76_wcid *wcid, int len)
{
struct sta_req_hdr hdr = {
@@ -329,7 +329,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
u8 omac_idx = mvif->omac_idx;
struct bss_info_omac *omac;
struct tlv *tlv;
@@ -369,10 +369,11 @@ void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_link_sta *link_sta,
int conn_state, bool newly)
{
+ struct ieee80211_vif *vif = link_conf->vif;
struct sta_rec_basic *basic;
struct tlv *tlv;
int conn_type;
@@ -390,8 +391,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
if (vif->type == NL80211_IFTYPE_STATION &&
- !is_zero_ether_addr(vif->bss_conf.bssid)) {
- memcpy(basic->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ !is_zero_ether_addr(link_conf->bssid)) {
+ memcpy(basic->peer_addr, link_conf->bssid, ETH_ALEN);
basic->aid = cpu_to_le16(vif->cfg.aid);
} else {
eth_broadcast_addr(basic->peer_addr);
@@ -497,7 +498,7 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid, int cmd)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
@@ -545,7 +546,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct ieee80211_sta *sta,
void *sta_wtbl, void *wtbl_tlv)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct wtbl_generic *generic;
struct wtbl_rx *rx;
struct wtbl_spe *spe;
@@ -849,7 +850,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_vif *vif,
u8 rcpi, u8 sta_state)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_chan_def *chandef = mvif->ctx ?
&mvif->ctx->def : &mphy->chandef;
enum nl80211_band band = chandef->chan->band;
@@ -1041,7 +1042,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info)
{
- struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)info->vif->drv_priv;
struct ieee80211_link_sta *link_sta;
struct mt76_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
@@ -1049,6 +1050,9 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct sk_buff *skb;
int conn_state;
+ if (!info->link_conf)
+ info->link_conf = &info->vif->bss_conf;
+
skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1057,7 +1061,7 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
CONN_STATE_DISCONNECT;
link_sta = info->sta ? &info->sta->deflink : NULL;
if (info->sta || !info->offload_fw)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
link_sta, conn_state,
info->newly);
if (info->sta && info->enable)
@@ -1137,10 +1141,10 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv);
int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
struct ieee80211_bss_conf *bss_conf,
+ struct mt76_vif_link *mvif,
struct mt76_wcid *wcid,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)bss_conf->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct {
struct {
@@ -1202,6 +1206,9 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
case NL80211_IFTYPE_STATION:
basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_P2P_GO);
+ break;
case NL80211_IFTYPE_ADHOC:
basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
break;
@@ -1263,7 +1270,7 @@ int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_wed_update);
-int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
int cmd, bool enable, bool tx)
{
@@ -1364,7 +1371,7 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
+u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_bss_conf *conf,
enum nl80211_band band)
{
const struct ieee80211_sta_eht_cap *eht_cap;
@@ -1375,9 +1382,9 @@ u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
mode |= PHY_MODE_AX_6G;
sband = phy->hw->wiphy->bands[band];
- eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, conf->vif->type);
- if (!eht_cap || !eht_cap->has_eht || !vif->bss_conf.eht_support)
+ if (!eht_cap || !eht_cap->has_eht || !conf->eht_support)
return mode;
switch (band) {
@@ -1401,7 +1408,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_chan_def *chandef = mvif->ctx ?
&mvif->ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
@@ -1450,7 +1457,7 @@ mt76_connac_mcu_uni_bss_he_tlv(struct mt76_phy *phy, struct ieee80211_vif *vif,
he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
}
-int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_chanctx_conf *ctx)
{
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
@@ -1538,7 +1545,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
bool enable,
struct ieee80211_chanctx_conf *ctx)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
struct mt76_dev *mdev = phy->dev;
@@ -1664,7 +1671,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_scan_request *sreq = &scan_req->req;
int n_ssids = 0, err, i, duration;
int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
@@ -1770,7 +1777,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_hw_scan);
int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
u8 seq_num;
u8 is_ext_channel;
@@ -1796,7 +1803,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *sreq)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_connac_sched_scan_req *req;
@@ -2208,7 +2215,7 @@ int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
- struct mt76_vif *vif,
+ struct mt76_vif_link *vif,
struct ieee80211_bss_conf *info)
{
struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif,
@@ -2251,7 +2258,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter);
int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
int ct_window = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
struct mt76_phy *phy = hw->priv;
struct {
@@ -2318,7 +2325,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *key)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_connac_gtk_rekey_tlv *gtk_tlv;
struct mt76_phy *phy = hw->priv;
struct sk_buff *skb;
@@ -2359,7 +2366,7 @@ static int
mt76_connac_mcu_set_arp_filter(struct mt76_dev *dev, struct ieee80211_vif *vif,
bool suspend)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct {
u8 bss_idx;
@@ -2385,7 +2392,7 @@ int
mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif,
bool suspend)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct {
u8 bss_idx;
@@ -2414,7 +2421,7 @@ mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev,
bool enable, u8 mdtim,
bool wow_suspend)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct {
u8 bss_idx;
@@ -2445,7 +2452,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
u8 index, bool enable,
struct cfg80211_pkt_pattern *pattern)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_connac_wow_pattern_tlv *ptlv;
struct sk_buff *skb;
struct req_hdr {
@@ -2477,7 +2484,7 @@ int
mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
bool suspend, struct cfg80211_wowlan *wowlan)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct {
struct {
@@ -2527,7 +2534,7 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_wow_ctrl);
-int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
+int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend, bool wait_resp)
{
struct {
struct {
@@ -2559,7 +2566,7 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
req.hdr.hif_type = 0;
return mt76_mcu_send_msg(dev, MCU_UNI_CMD(HIF_CTRL), &req,
- sizeof(req), true);
+ sizeof(req), wait_resp);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_hif_suspend);
@@ -2686,7 +2693,7 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct sk_buff *skb;
int ret;
@@ -2708,7 +2715,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
/* SIFS 20us + 512 byte beacon transmitted by 1Mbps (3906us) */
#define BCN_TX_ESTIMATE_TIME (4096 + 20)
-void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif_link *mvif)
{
struct bss_info_ext_bss *ext;
int ext_bss_idx, tsf_offset;
@@ -2732,7 +2739,7 @@ int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
struct mt76_phy *phy, u16 wlan_idx,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
struct bss_info_basic *bss;
struct tlv *tlv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 1b0e80dfc346..43237e518373 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -1043,12 +1043,14 @@ enum {
MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
MCU_EXT_EVENT_WA_TX_STAT = 0x74,
MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
+ MCU_EXT_EVENT_WF_RF_PIN_CTRL = 0x9a,
MCU_EXT_EVENT_MURU_CTRL = 0x9f,
};
/* unified event table */
enum {
MCU_UNI_EVENT_RESULT = 0x01,
+ MCU_UNI_EVENT_HIF_CTRL = 0x03,
MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04,
MCU_UNI_EVENT_ACCESS_REG = 0x6,
MCU_UNI_EVENT_IE_COUNTDOWN = 0x09,
@@ -1251,6 +1253,7 @@ enum {
MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab,
MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac,
MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
+ MCU_EXT_CMD_WF_RF_PIN_CTRL = 0xbd,
};
enum {
@@ -1756,6 +1759,7 @@ struct mt76_sta_cmd_info {
struct mt76_wcid *wcid;
struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link_conf;
bool offload_fw;
bool enable;
@@ -1876,10 +1880,10 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
struct sk_buff *
-__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct mt76_wcid *wcid, int len);
static inline struct sk_buff *
-mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct mt76_wcid *wcid)
{
return __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
@@ -1901,7 +1905,7 @@ mt76_connac_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy);
int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_link_sta *link_sta,
int state, bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
@@ -1938,13 +1942,14 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
bool enable, bool tx);
int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
struct ieee80211_bss_conf *bss_conf,
+ struct mt76_vif_link *mvif,
struct mt76_wcid *wcid,
bool enable);
-int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
int cmd, bool enable, bool tx);
int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy,
- struct mt76_vif *vif,
+ struct mt76_vif_link *vif,
struct ieee80211_chanctx_conf *ctx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
@@ -1975,7 +1980,7 @@ int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
struct ieee80211_vif *vif,
bool enable);
int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
- struct mt76_vif *vif,
+ struct mt76_vif_link *vif,
struct ieee80211_bss_conf *info);
int mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif,
bool suspend);
@@ -1988,7 +1993,7 @@ int mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev,
struct ieee80211_vif *vif,
bool enable, u8 mdtim,
bool wow_suspend);
-int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
+int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend, bool wait_resp);
void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt76_connac_sta_state_dp(struct mt76_dev *dev,
@@ -2014,7 +2019,7 @@ mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
enum nl80211_band band,
struct ieee80211_link_sta *sta);
-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
+u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_bss_conf *conf,
enum nl80211_band band);
int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
@@ -2022,7 +2027,7 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd);
-void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif);
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif_link *mvif);
void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif);
int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index d543ef3de65b..ec554a059216 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -1071,7 +1071,7 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
u8 gain_delta;
int low_gain;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, 0);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 4a49a3036a46..7d840ad4ae65 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -423,7 +423,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
priv = msta->vif;
vif = container_of(priv, struct ieee80211_vif, drv_priv);
- __mt76_sta_remove(&dev->mt76, vif, sta);
+ __mt76_sta_remove(&dev->mphy, vif, sta);
memset(msta, 0, sizeof(*msta));
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 8020446be37b..4fb30589fa7a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -287,8 +287,7 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
- mvif->group_wcid.hw_key_idx = -1;
- mt76_wcid_init(&mvif->group_wcid);
+ mt76_wcid_init(&mvif->group_wcid, 0);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index f84517d932dc..e2b4cf30dc44 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -280,7 +280,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, 0);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index bfdbc15abaa9..928e0b07a9bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -2,9 +2,14 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/firmware.h>
+#include <linux/moduleparam.h>
#include "mt7915.h"
#include "eeprom.h"
+static bool enable_6ghz;
+module_param(enable_6ghz, bool, 0644);
+MODULE_PARM_DESC(enable_6ghz, "Enable 6 GHz instead of 5 GHz on hardware that supports both");
+
static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
@@ -170,8 +175,20 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
phy->mt76->cap.has_6ghz = true;
return;
case MT_EE_V2_BAND_SEL_5GHZ_6GHZ:
- phy->mt76->cap.has_5ghz = true;
- phy->mt76->cap.has_6ghz = true;
+ if (enable_6ghz) {
+ phy->mt76->cap.has_6ghz = true;
+ u8p_replace_bits(&eeprom[MT_EE_WIFI_CONF + band],
+ MT_EE_V2_BAND_SEL_6GHZ,
+ MT_EE_WIFI_CONF0_BAND_SEL);
+ } else {
+ phy->mt76->cap.has_5ghz = true;
+ u8p_replace_bits(&eeprom[MT_EE_WIFI_CONF + band],
+ MT_EE_V2_BAND_SEL_5GHZ,
+ MT_EE_WIFI_CONF0_BAND_SEL);
+ }
+ /* force to buffer mode */
+ dev->flash_mode = true;
+
return;
default:
phy->mt76->cap.has_2ghz = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 6bef96e3d2a3..bee4beabc4eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -53,7 +53,9 @@ static ssize_t mt7915_thermal_temp_show(struct device *dev,
switch (i) {
case 0:
+ mutex_lock(&phy->dev->mt76.mutex);
temperature = mt7915_mcu_get_temperature(phy);
+ mutex_unlock(&phy->dev->mt76.mutex);
if (temperature < 0)
return temperature;
/* display in millidegree celcius */
@@ -82,7 +84,7 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
return ret;
mutex_lock(&phy->dev->mt76.mutex);
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 60 * 1000, 130 * 1000), 1000);
if ((i - 1 == MT7915_CRIT_TEMP_IDX &&
val > phy->throttle_temp[MT7915_MAX_TEMP_IDX]) ||
@@ -95,9 +97,8 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
}
phy->throttle_temp[i - 1] = val;
- mutex_unlock(&phy->dev->mt76.mutex);
-
ret = mt7915_mcu_set_thermal_protect(phy);
+ mutex_unlock(&phy->dev->mt76.mutex);
if (ret)
return ret;
@@ -159,7 +160,9 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
* cooling_device convention: 0 = no cooling, more = more cooling
* mcu convention: 1 = max cooling, more = less cooling
*/
+ mutex_lock(&phy->dev->mt76.mutex);
ret = mt7915_mcu_set_thermal_throttling(phy, throttling);
+ mutex_unlock(&phy->dev->mt76.mutex);
if (ret)
return ret;
@@ -512,6 +515,15 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME4(band),
MT_WF_RMAC_MIB_QOS23_BACKOFF);
+ /* clear backoff time for Tx duration */
+ mt76_clear(dev, MT_WTBLOFF_TOP_ACR(band),
+ MT_WTBLOFF_TOP_ADM_BACKOFFTIME);
+
+ /* exclude estimated backoff time for Tx duration on MT7915 */
+ if (is_mt7915(&dev->mt76))
+ mt76_set(dev, MT_AGG_ATCR0(band),
+ MT_AGG_ATCR_MAC_BFF_TIME_EN);
+
/* clear backoff time and set software compensation for OBSS time */
mask = MT_WF_RMAC_MIB_OBSS_BACKOFF | MT_WF_RMAC_MIB_ED_OFFSET;
set = FIELD_PREP(MT_WF_RMAC_MIB_OBSS_BACKOFF, 0) |
@@ -1114,7 +1126,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss, band);
} else {
he_cap_elem->phy_cap_info[9] |=
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
@@ -1239,14 +1251,14 @@ int mt7915_register_device(struct mt7915_dev *dev)
if (ret)
goto unreg_dev;
- ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
-
if (phy2) {
ret = mt7915_register_ext_phy(dev, phy2);
if (ret)
goto unreg_thermal;
}
+ ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+
dev->recovery.hw_init_done = true;
ret = mt7915_init_debugfs(&dev->phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index cf77ce0c8759..13bdc0a7174c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -333,11 +333,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb,
if (status->wcid) {
msta = container_of(status->wcid, struct mt7915_sta, wcid);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
}
status->freq = mphy->chandef.chan->center_freq;
@@ -927,11 +923,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
continue;
msta = container_of(wcid, struct mt7915_sta, wcid);
- spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &mdev->sta_poll_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
continue;
}
@@ -1040,10 +1032,7 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
if (!wcid->sta)
goto out;
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
out:
rcu_read_unlock();
@@ -1163,7 +1152,7 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
u8 band = phy->mt76->band_idx;
- int eifs_ofdm = 360, sifs = 10, offset;
+ int eifs_ofdm = 84, sifs = 10, offset;
bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
@@ -1388,6 +1377,8 @@ mt7915_mac_restart(struct mt7915_dev *dev)
if (dev_is_pci(mdev->dev)) {
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
if (dev->hif2) {
+ mt76_wr(dev, MT_PCIE_RECOG_ID,
+ dev->hif2->index | MT_PCIE_RECOG_ID_SEM);
if (is_mt7915(mdev))
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
else
@@ -1442,9 +1433,11 @@ static void
mt7915_mac_full_reset(struct mt7915_dev *dev)
{
struct mt76_phy *ext_phy;
+ struct mt7915_phy *phy2;
int i;
ext_phy = dev->mt76.phys[MT_BAND1];
+ phy2 = ext_phy ? ext_phy->priv : NULL;
dev->recovery.hw_full_reset = true;
@@ -1474,6 +1467,9 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
dev->mt76.vif_mask = 0;
+ dev->phy.omac_mask = 0;
+ if (phy2)
+ phy2->omac_mask = 0;
i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
dev->mt76.global_wcid.idx = i;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index c6f498fc81ff..3aa31c5cefa6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -233,6 +233,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
mvif->mt76.band_idx = phy->mt76->band_idx;
+ mvif->mt76.wcid = &mvif->sta.wcid;
mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
if (ext_phy)
@@ -246,16 +247,15 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7915_wtbl_size(dev));
- if (idx < 0)
- return -ENOSPC;
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
INIT_LIST_HEAD(&mvif->sta.rc_list);
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = ext_phy;
- mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mvif->sta.wcid);
+ mt76_wcid_init(&mvif->sta.wcid, phy->mt76->band_idx);
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -366,8 +366,12 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int err = 0;
- if (sta && !wcid->sta)
+ if (sta && !wcid->sta) {
+ if (cmd != SET_KEY)
+ return 0;
+
return -EOPNOTSUPP;
+ }
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -619,8 +623,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC)
set_bss_info = vif->cfg.assoc;
if (changed & BSS_CHANGED_BEACON_ENABLED &&
+ info->enable_beacon &&
vif->type != NL80211_IFTYPE_AP)
- set_bss_info = set_sta = info->enable_beacon;
+ set_bss_info = set_sta = 1;
if (set_bss_info == 1)
mt7915_mcu_add_bss_info(phy, vif, true);
@@ -631,7 +636,11 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
if (changed & BSS_CHANGED_ERP_SLOT) {
- int slottime = info->use_short_slot ? 9 : 20;
+ int slottime = 9;
+
+ if (phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ &&
+ !info->use_short_slot)
+ slottime = 20;
if (slottime != phy->slottime) {
phy->slottime = slottime;
@@ -758,6 +767,57 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return 0;
}
+struct drop_sta_iter {
+ struct mt7915_dev *dev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ u8 sta_addr[ETH_ALEN];
+};
+
+static void
+__mt7915_drop_sta(void *ptr, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct drop_sta_iter *data = ptr;
+ struct ieee80211_sta *sta;
+ struct mt7915_sta *msta;
+
+ if (vif == data->vif || vif->type != NL80211_IFTYPE_AP)
+ return;
+
+ sta = ieee80211_find_sta_by_ifaddr(data->hw, data->sta_addr, mac);
+ if (!sta)
+ return;
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+ mt7915_mcu_add_sta(data->dev, vif, sta, CONN_STATE_DISCONNECT, false);
+ msta->wcid.sta_disabled = 1;
+ msta->wcid.sta = 0;
+}
+
+static void
+mt7915_drop_other_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_phy *ext_phy = dev->mt76.phys[MT_BAND1];
+ struct drop_sta_iter data = {
+ .dev = dev,
+ .hw = dev->mphy.hw,
+ .vif = vif,
+ };
+
+ if (vif->type != NL80211_IFTYPE_AP)
+ return;
+
+ memcpy(data.sta_addr, sta->addr, ETH_ALEN);
+ ieee80211_iterate_active_interfaces(data.hw, 0, __mt7915_drop_sta, &data);
+
+ if (!ext_phy)
+ return;
+
+ data.hw = ext_phy->hw;
+ ieee80211_iterate_active_interfaces(data.hw, 0, __mt7915_drop_sta, &data);
+}
+
int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
@@ -786,6 +846,7 @@ int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return 0;
case MT76_STA_EVENT_AUTHORIZE:
+ mt7915_drop_other_sta(dev, vif, sta);
return mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_PORT_SECURE, false);
case MT76_STA_EVENT_DISASSOC:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 87d0dd040001..9d790f234e82 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -194,6 +194,25 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return ret;
}
+static void
+mt7915_mcu_set_timeout(struct mt76_dev *mdev, int cmd)
+{
+ if ((cmd & __MCU_CMD_FIELD_ID) != MCU_CMD_EXT_CID)
+ return;
+
+ switch (FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd)) {
+ case MCU_EXT_CMD_THERMAL_CTRL:
+ case MCU_EXT_CMD_GET_MIB_INFO:
+ case MCU_EXT_CMD_PHY_STAT_INFO:
+ case MCU_EXT_CMD_STA_REC_UPDATE:
+ case MCU_EXT_CMD_BSS_INFO_UPDATE:
+ mdev->mcu.timeout = 2 * HZ;
+ return;
+ default:
+ break;
+ }
+}
+
static int
mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq)
@@ -208,6 +227,8 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
qid = MT_MCUQ_WM;
+ mt7915_mcu_set_timeout(mdev, cmd);
+
return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
}
@@ -1678,7 +1699,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, &vif->bss_conf, link_sta,
conn_state, newly);
/* tag order is in accordance with firmware dependency. */
if (sta && conn_state != CONN_STATE_DISCONNECT) {
@@ -2388,7 +2409,7 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
- .max_retry = 3,
+ .max_retry = 1,
.headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_prepare_msg = mt76_connac2_mcu_fill_message,
.mcu_skb_send_msg = mt7915_mcu_send_message,
@@ -3150,8 +3171,13 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
res = (struct mt7915_mcu_mib *)(skb->data + offs_cc);
#define __res_u64(s) le64_to_cpu(res[s].data)
- /* subtract Tx backoff time from Tx duration */
- cc_tx = is_mt7915(&dev->mt76) ? __res_u64(1) - __res_u64(4) : __res_u64(1);
+ /* subtract Tx backoff time from Tx duration for MT7915 */
+ if (is_mt7915(&dev->mt76)) {
+ u64 backoff = (__res_u64(4) & 0xffff) * 79; /* 16us + 9us * 7 */
+ cc_tx = __res_u64(1) - backoff;
+ } else {
+ cc_tx = __res_u64(1);
+ }
if (chan_switch)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 44e112b8b5b3..876f0692850a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -138,6 +138,7 @@ static const u32 mt7915_offs[] = {
[AGG_ACR0] = 0x084,
[AGG_ACR4] = 0x08c,
[AGG_MRCR] = 0x098,
+ [AGG_ATCR0] = 0x0ec,
[AGG_ATCR1] = 0x0f0,
[AGG_ATCR3] = 0x0f4,
[LPON_UTTR0] = 0x080,
@@ -212,6 +213,7 @@ static const u32 mt7916_offs[] = {
[AGG_ACR0] = 0x054,
[AGG_ACR4] = 0x05c,
[AGG_MRCR] = 0x068,
+ [AGG_ATCR0] = 0x1a4,
[AGG_ATCR1] = 0x1a8,
[AGG_ATCR3] = 0x080,
[LPON_UTTR0] = 0x360,
@@ -484,7 +486,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
continue;
ofs = addr - dev->reg.map[i].phys;
- if (ofs > dev->reg.map[i].size)
+ if (ofs >= dev->reg.map[i].size)
continue;
return dev->reg.map[i].maps + ofs;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index ac0b1f0eb27c..533939f2b7ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -166,7 +166,7 @@ struct mt7915_vif_cap {
};
struct mt7915_vif {
- struct mt76_vif mt76; /* must be first */
+ struct mt76_vif_link mt76; /* must be first */
struct mt7915_vif_cap cap;
struct mt7915_sta sta;
@@ -191,6 +191,7 @@ struct mt7915_hif {
struct device *dev;
void __iomem *regs;
int irq;
+ u32 index;
};
struct mt7915_phy {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 39132894e8ea..07b0a5766eab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -42,6 +42,7 @@ static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
continue;
get_device(hif->dev);
+ hif->index = idx;
goto out;
}
hif = NULL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 89ac8e6707b8..c5ec63a25a42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -66,6 +66,7 @@ enum offs_rev {
AGG_ACR0,
AGG_ACR4,
AGG_MRCR,
+ AGG_ATCR0,
AGG_ATCR1,
AGG_ATCR3,
LPON_UTTR0,
@@ -254,6 +255,9 @@ enum offs_rev {
#define MT_WTBLOFF_TOP_RSCR_RCPI_MODE GENMASK(31, 30)
#define MT_WTBLOFF_TOP_RSCR_RCPI_PARAM GENMASK(25, 24)
+#define MT_WTBLOFF_TOP_ACR(_band) MT_WTBLOFF_TOP(_band, 0x010)
+#define MT_WTBLOFF_TOP_ADM_BACKOFFTIME BIT(29)
+
/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
@@ -505,6 +509,9 @@ enum offs_rev {
#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24)
+#define MT_AGG_ATCR0(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR0))
+#define MT_AGG_ATCR_MAC_BFF_TIME_EN BIT(30)
+
#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR1))
#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR3))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index d1d64fa7d35d..14e17dc90256 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -137,6 +137,13 @@ mt7921_regd_notifier(struct wiphy *wiphy,
dev->mt76.region = request->dfs_region;
dev->country_ie_env = request->country_ie_env;
+ if (request->initiator == NL80211_REGDOM_SET_BY_USER) {
+ if (dev->mt76.alpha2[0] == '0' && dev->mt76.alpha2[1] == '0')
+ wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE;
+ else
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ }
+
if (pm->suspended)
return;
@@ -227,6 +234,7 @@ static void mt7921_init_work(struct work_struct *work)
mt76_set_stream_caps(&dev->mphy, true);
mt7921_set_stream_he_caps(&dev->phy);
+ mt792x_config_mac_addr_list(dev);
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 047106b65d2b..5dd57de59f27 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -216,11 +216,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
if (status->wcid) {
mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
msta = container_of(mlink, struct mt792x_sta, deflink);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
}
mt792x_get_status_freq_info(status, chfreq);
@@ -479,10 +475,7 @@ void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data)
if (!wcid->sta)
goto out;
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
out:
rcu_read_unlock();
@@ -529,11 +522,7 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
continue;
mlink = container_of(wcid, struct mt792x_link_sta, wcid);
- spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list,
- &mdev->sta_poll_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
continue;
}
@@ -647,6 +636,7 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
ieee80211_disconnect(vif, true);
mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
+ &mvif->bss_conf.mt76,
&mvif->sta.deflink.wcid, true);
mt7921_mcu_set_tx(dev, vif);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index a7f5bfbc02ed..13e58c328aff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -147,7 +147,7 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss, band);
} else {
he_cap_elem->phy_cap_info[9] |=
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
@@ -252,6 +252,11 @@ int __mt7921_start(struct mt792x_phy *phy)
return err;
}
+ if (phy->chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN) {
+ mt7921_mcu_wf_rf_pin_ctrl(phy, WF_RF_PIN_INIT);
+ wiphy_rfkill_start_polling(mphy->hw->wiphy);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(__mt7921_start);
@@ -308,6 +313,7 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
+ &mvif->bss_conf.mt76,
&mvif->sta.deflink.wcid, true);
if (ret)
goto out;
@@ -319,10 +325,8 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
INIT_LIST_HEAD(&mvif->sta.deflink.wcid.poll_list);
mvif->sta.deflink.wcid.idx = idx;
- mvif->sta.deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
- mvif->sta.deflink.wcid.hw_key_idx = -1;
mvif->sta.deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mvif->sta.deflink.wcid);
+ mt76_wcid_init(&mvif->sta.deflink.wcid, mvif->bss_conf.mt76.band_idx);
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -338,6 +342,9 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
if (phy->chip_cap & MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN)
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
+ INIT_WORK(&mvif->csa_work, mt7921_csa_work);
+ timer_setup(&mvif->csa_timer, mt792x_csa_timer, 0);
out:
mt792x_mutex_release(dev);
@@ -360,9 +367,9 @@ void mt7921_roc_abort_sync(struct mt792x_dev *dev)
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
- ieee80211_iterate_active_interfaces(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7921_roc_iter, (void *)phy);
+ ieee80211_iterate_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_roc_iter, (void *)phy);
}
EXPORT_SYMBOL_GPL(mt7921_roc_abort_sync);
@@ -531,7 +538,13 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
} else {
if (idx == *wcid_keyidx)
*wcid_keyidx = -1;
- goto out;
+
+ /* For security issue we don't trigger the key deletion when
+ * reassociating. But we should trigger the deletion process
+ * to avoid using incorrect cipher after disconnection,
+ */
+ if (vif->type != NL80211_IFTYPE_STATION || vif->cfg.assoc)
+ goto out;
}
mt76_wcid_key_setup(&dev->mt76, wcid, key);
@@ -858,6 +871,7 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ mt7921_roc_abort_sync(dev);
mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->deflink.wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
@@ -1334,6 +1348,9 @@ static int
mt7921_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+
+ dev->new_ctx = ctx;
return 0;
}
@@ -1341,6 +1358,10 @@ static void
mt7921_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+
+ if (dev->new_ctx == ctx)
+ dev->new_ctx = NULL;
}
static void
@@ -1391,6 +1412,101 @@ static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw,
mt7921_abort_roc(mvif->phy, mvif);
}
+static int mt7921_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ return mt792x_assign_vif_chanctx(hw, vifs->vif, vifs->link_conf,
+ vifs->new_ctx);
+}
+
+void mt7921_csa_work(struct work_struct *work)
+{
+ struct mt792x_vif *mvif;
+ struct mt792x_dev *dev;
+ struct ieee80211_vif *vif;
+ int ret;
+
+ mvif = (struct mt792x_vif *)container_of(work, struct mt792x_vif,
+ csa_work);
+ dev = mvif->phy->dev;
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ mt792x_mutex_acquire(dev);
+ ret = mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->bss_conf.mt76,
+ dev->new_ctx);
+ mt792x_mutex_release(dev);
+
+ ieee80211_chswitch_done(vif, !ret, 0);
+}
+
+static int mt7921_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
+ return -EOPNOTSUPP;
+
+ /* Avoid beacon loss due to the CAC(Channel Availability Check) time
+ * of the AP.
+ */
+ if (!cfg80211_chandef_usable(hw->wiphy, &chsw->chandef,
+ IEEE80211_CHAN_RADAR))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void mt7921_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ u16 beacon_interval = vif->bss_conf.beacon_int;
+
+ mvif->csa_timer.expires = TU_TO_EXP_TIME(beacon_interval * chsw->count);
+ add_timer(&mvif->csa_timer);
+}
+
+static void mt7921_abort_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+
+ del_timer_sync(&mvif->csa_timer);
+ cancel_work_sync(&mvif->csa_work);
+}
+
+static void mt7921_channel_switch_rx_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ u16 beacon_interval = vif->bss_conf.beacon_int;
+
+ if (cfg80211_chandef_identical(&chsw->chandef,
+ &dev->new_ctx->def) &&
+ chsw->count) {
+ mod_timer(&mvif->csa_timer,
+ TU_TO_EXP_TIME(beacon_interval * chsw->count));
+ }
+}
+
+static void mt7921_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ int ret = 0;
+
+ mt792x_mutex_acquire(phy->dev);
+ ret = mt7921_mcu_wf_rf_pin_ctrl(phy, WF_RF_PIN_POLL);
+ mt792x_mutex_release(phy->dev);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, ret ? false : true);
+}
+
const struct ieee80211_ops mt7921_ops = {
.tx = mt792x_tx,
.start = mt7921_start,
@@ -1441,6 +1557,7 @@ const struct ieee80211_ops mt7921_ops = {
#endif /* CONFIG_PM */
.flush = mt792x_flush,
.set_sar_specs = mt7921_set_sar_specs,
+ .rfkill_poll = mt7921_rfkill_poll,
.remain_on_channel = mt7921_remain_on_channel,
.cancel_remain_on_channel = mt7921_cancel_remain_on_channel,
.add_chanctx = mt7921_add_chanctx,
@@ -1450,6 +1567,11 @@ const struct ieee80211_ops mt7921_ops = {
.unassign_vif_chanctx = mt792x_unassign_vif_chanctx,
.mgd_prepare_tx = mt7921_mgd_prepare_tx,
.mgd_complete_tx = mt7921_mgd_complete_tx,
+ .switch_vif_chanctx = mt7921_switch_vif_chanctx,
+ .pre_channel_switch = mt7921_pre_channel_switch,
+ .channel_switch = mt7921_channel_switch,
+ .abort_channel_switch = mt7921_abort_channel_switch,
+ .channel_switch_rx_beacon = mt7921_channel_switch_rx_beacon,
};
EXPORT_SYMBOL_GPL(mt7921_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 02c1de8620a7..86bd33b916a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -61,6 +61,12 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
skb_pull(skb, sizeof(*rxd));
event = (struct mt76_connac_mcu_reg_event *)skb->data;
ret = (int)le32_to_cpu(event->val);
+ } else if (cmd == MCU_EXT_CMD(WF_RF_PIN_CTRL)) {
+ struct mt7921_wf_rf_pin_ctrl_event *event;
+
+ skb_pull(skb, sizeof(*rxd));
+ event = (struct mt7921_wf_rf_pin_ctrl_event *)skb->data;
+ ret = (int)event->result;
} else {
skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
}
@@ -174,7 +180,7 @@ static void
mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_connac_beacon_loss_event *event = priv;
if (mvif->idx != event->bss_idx)
@@ -507,7 +513,10 @@ static void mt7921_mcu_parse_tx_resource(struct mt76_dev *dev,
tx_res = (struct mt7921_tx_resource *)skb->data;
sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota);
- sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota);
+ sdio->pse_mcu_quota_max = le32_to_cpu(tx_res->pse_mcu_quota);
+ /* The mcu quota usage of this function itself must be taken into consideration */
+ sdio->sched.pse_mcu_quota =
+ sdio->sched.pse_mcu_quota ? sdio->pse_mcu_quota_max : sdio->pse_mcu_quota_max - 1;
sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota);
sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size);
sdio->sched.deficit = tx_res->pp_padding;
@@ -1122,7 +1131,7 @@ int mt7921_get_txpwr_info(struct mt792x_dev *dev, struct mt7921_txpwr *txpwr)
int mt7921_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct {
u8 band_idx;
@@ -1424,6 +1433,21 @@ int mt7921_mcu_get_temperature(struct mt792x_phy *phy)
sizeof(req), true);
}
+int mt7921_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy, u8 action)
+{
+ struct mt792x_dev *dev = phy->dev;
+ struct {
+ u8 action;
+ u8 value;
+ } req = {
+ .action = action,
+ .value = 0,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WF_RF_PIN_CTRL), &req,
+ sizeof(req), action ? true : false);
+}
+
int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index f9a259ee6b82..2834c6c53e58 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -74,6 +74,11 @@ struct mt7921_txpwr_event {
struct mt7921_txpwr txpwr;
} __packed;
+struct mt7921_wf_rf_pin_ctrl_event {
+ u8 result;
+ u8 value;
+} __packed;
+
enum {
TM_SWITCH_MODE,
TM_SET_AT_CMD,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 16c89815c0b8..c88793fcec64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -31,6 +31,9 @@
#define EXT_CMD_RADIO_ON_LED 0x2
#define EXT_CMD_RADIO_OFF_LED 0x3
+#define WF_RF_PIN_INIT 0x0
+#define WF_RF_PIN_POLL 0x1
+
enum {
UNI_ROC_ACQUIRE,
UNI_ROC_ABORT,
@@ -202,6 +205,7 @@ void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb);
int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map);
int mt7921_mcu_radio_led_ctrl(struct mt792x_dev *dev, u8 value);
+int mt7921_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy, u8 action);
static inline u32
mt7921_reg_map_l1(struct mt792x_dev *dev, u32 addr)
@@ -273,6 +277,7 @@ int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev,
bool enable);
void mt7921_scan_work(struct work_struct *work);
void mt7921_roc_work(struct work_struct *work);
+void mt7921_csa_work(struct work_struct *work);
int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif);
void mt7921_coredump_work(struct work_struct *work);
int mt7921_get_txpwr_info(struct mt792x_dev *dev, struct mt7921_txpwr *txpwr);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 67723c22aea6..ba870e1b05fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -42,6 +42,10 @@ static void mt7921e_unregister_device(struct mt792x_dev *dev)
{
int i;
struct mt76_connac_pm *pm = &dev->pm;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ if (dev->phy.chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN)
+ wiphy_rfkill_stop_polling(hw->wiphy);
cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
@@ -435,7 +439,7 @@ static int mt7921_pci_suspend(struct device *device)
if (err < 0)
goto restore_suspend;
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true, true);
if (err)
goto restore_suspend;
@@ -481,7 +485,7 @@ restore_napi:
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
- mt76_connac_mcu_set_hif_suspend(mdev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false, true);
restore_suspend:
pm->suspended = false;
@@ -532,7 +536,7 @@ static int mt7921_pci_resume(struct device *device)
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
if (err < 0)
goto failed;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 95f526f7bb99..45b9f35aab17 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -240,7 +240,7 @@ static int mt7921s_suspend(struct device *__dev)
mt76s_txqs_empty(&dev->mt76), 5 * HZ);
/* It is supposed that SDIO bus is idle at the point */
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true, true);
if (err)
goto restore_worker;
@@ -258,7 +258,7 @@ static int mt7921s_suspend(struct device *__dev)
restore_txrx_worker:
mt76_worker_enable(&mdev->sdio.net_worker);
mt76_worker_enable(&mdev->sdio.txrx_worker);
- mt76_connac_mcu_set_hif_suspend(mdev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false, true);
restore_worker:
mt76_worker_enable(&mdev->tx_worker);
@@ -302,7 +302,7 @@ static int mt7921s_resume(struct device *__dev)
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(mdev, false);
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
failed:
pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 8aa4f0203208..fe9751851ff7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = {
/* Netgear, Inc. [A8000,AXE3000] */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ /* TP-Link TXE50UH */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ },
};
@@ -257,7 +260,7 @@ static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state)
pm->suspended = true;
flush_work(&dev->reset_work);
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
if (err)
goto failed;
@@ -307,7 +310,7 @@ static int mt7921u_resume(struct usb_interface *intf)
if (err < 0)
goto failed;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
failed:
pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index 039949b344b9..f41ca4248497 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -57,6 +57,22 @@ static int mt7925_thermal_init(struct mt792x_phy *phy)
mt7925_hwmon_groups);
return PTR_ERR_OR_ZERO(hwmon);
}
+
+void mt7925_regd_update(struct mt792x_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct ieee80211_hw *hw = mdev->hw;
+
+ if (!dev->regd_change)
+ return;
+
+ mt7925_mcu_set_clc(dev, mdev->alpha2, dev->country_ie_env);
+ mt7925_mcu_set_channel_domain(hw->priv);
+ mt7925_set_tx_sar_pwr(hw, NULL);
+ dev->regd_change = false;
+}
+EXPORT_SYMBOL_GPL(mt7925_regd_update);
+
static void
mt7925_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
@@ -64,6 +80,7 @@ mt7925_regd_notifier(struct wiphy *wiphy,
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_connac_pm *pm = &dev->pm;
/* allow world regdom at the first boot only */
if (!memcmp(req->alpha2, "00", 2) &&
@@ -78,12 +95,17 @@ mt7925_regd_notifier(struct wiphy *wiphy,
memcpy(mdev->alpha2, req->alpha2, 2);
mdev->region = req->dfs_region;
dev->country_ie_env = req->country_ie_env;
+ dev->regd_change = true;
+ if (pm->suspended)
+ return;
+
+ dev->regd_in_progress = true;
mt792x_mutex_acquire(dev);
- mt7925_mcu_set_clc(dev, req->alpha2, req->country_ie_env);
- mt7925_mcu_set_channel_domain(hw->priv);
- mt7925_set_tx_sar_pwr(hw, NULL);
+ mt7925_regd_update(dev);
mt792x_mutex_release(dev);
+ dev->regd_in_progress = false;
+ wake_up(&dev->wait);
}
static void mt7925_mac_init_basic_rates(struct mt792x_dev *dev)
@@ -178,6 +200,7 @@ static void mt7925_init_work(struct work_struct *work)
mt76_set_stream_caps(&dev->mphy, true);
mt7925_set_stream_he_eht_caps(&dev->phy);
+ mt792x_config_mac_addr_list(dev);
ret = mt7925_init_mlo_caps(&dev->phy);
if (ret) {
@@ -225,6 +248,7 @@ int mt7925_register_device(struct mt792x_dev *dev)
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
+ init_waitqueue_head(&dev->wait);
spin_lock_init(&dev->pm.txq_lock);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt792x_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7925_scan_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index 634c42bbf23f..c871d2f9688b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -49,7 +49,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
break;
mlink = list_first_entry(&sta_poll_list,
struct mt792x_link_sta, wcid.poll_list);
- msta = container_of(mlink, struct mt792x_sta, deflink);
+ msta = mlink->sta;
spin_lock_bh(&dev->mt76.sta_poll_lock);
list_del_init(&mlink->wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
@@ -395,11 +395,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
if (status->wcid) {
mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
}
mt792x_get_status_freq_info(status, chfreq);
@@ -734,7 +730,7 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- struct mt76_vif *mvif;
+ struct mt76_vif_link *mvif;
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED));
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -743,7 +739,7 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv,
wcid->link_id) : NULL;
- mvif = mconf ? (struct mt76_vif *)&mconf->mt76 : NULL;
+ mvif = mconf ? (struct mt76_vif_link *)&mconf->mt76 : NULL;
if (mvif) {
omac_idx = mvif->omac_idx;
@@ -1054,10 +1050,7 @@ void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
if (!wcid->sta)
goto out;
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
out:
rcu_read_unlock();
@@ -1135,11 +1128,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
continue;
mlink = container_of(wcid, struct mt792x_link_sta, wcid);
- spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list,
- &mdev->sta_poll_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
continue;
}
@@ -1271,6 +1260,7 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
struct mt792x_dev *dev = mvif->phy->dev;
struct ieee80211_hw *hw = mt76_hw(dev);
struct ieee80211_bss_conf *bss_conf;
+ struct mt792x_bss_conf *mconf;
int i;
if (vif->type == NL80211_IFTYPE_STATION)
@@ -1278,8 +1268,9 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mconf = mt792x_vif_to_link(mvif, i);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf,
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf, &mconf->mt76,
&mvif->sta.deflink.wcid, true);
mt7925_mcu_set_tx(dev, bss_conf);
}
@@ -1309,6 +1300,7 @@ void mt7925_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&dev->mphy.mac_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ dev->sar_inited = false;
for (i = 0; i < 10; i++) {
mutex_lock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 791c8b00e112..98daf80ac131 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -130,7 +130,7 @@ mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss, band);
} else {
he_cap_elem->phy_cap_info[9] |=
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
@@ -310,6 +310,7 @@ void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy)
int __mt7925_start(struct mt792x_phy *phy)
{
struct mt76_phy *mphy = phy->mt76;
+ struct mt792x_dev *dev = phy->dev;
int err;
err = mt7925_mcu_set_channel_domain(mphy);
@@ -320,9 +321,12 @@ int __mt7925_start(struct mt792x_phy *phy)
if (err)
return err;
- err = mt7925_set_tx_sar_pwr(mphy->hw, NULL);
- if (err)
- return err;
+ if (!dev->sar_inited) {
+ err = mt7925_set_tx_sar_pwr(mphy->hw, NULL);
+ if (err)
+ return err;
+ dev->sar_inited = true;
+ }
mt792x_mac_reset_counters(phy);
set_bit(MT76_STATE_RUNNING, &mphy->state);
@@ -365,29 +369,22 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ?
0 : mconf->mt76.idx;
mconf->mt76.band_idx = 0xff;
- mconf->mt76.wmm_idx = mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ mconf->mt76.wmm_idx = ieee80211_vif_is_mld(vif) ?
+ 0 : mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
else
mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL;
- ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf,
- &mlink->wcid, true);
- if (ret)
- goto out;
-
dev->mt76.vif_mask |= BIT_ULL(mconf->mt76.idx);
mvif->phy->omac_mask |= BIT_ULL(mconf->mt76.omac_idx);
idx = MT792x_WTBL_RESERVED - mconf->mt76.idx;
- INIT_LIST_HEAD(&mlink->wcid.poll_list);
mlink->wcid.idx = idx;
- mlink->wcid.phy_idx = mconf->mt76.band_idx;
- mlink->wcid.hw_key_idx = -1;
mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mlink->wcid);
+ mt76_wcid_init(&mlink->wcid, 0);
mt7925_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -395,6 +392,12 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
ewma_rssi_init(&mconf->rssi);
rcu_assign_pointer(dev->mt76.wcid[idx], &mlink->wcid);
+
+ ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76,
+ &mlink->wcid, true);
+ if (ret)
+ goto out;
+
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = idx;
@@ -801,12 +804,12 @@ static u8
mt7925_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bool beacon, bool mcast)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_phy *mphy = hw->priv;
u16 rate;
u8 i, idx, ht;
- rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon, mcast);
+ rate = mt76_connac2_mac_tx_rate_val(mphy, &vif->bss_conf, beacon, mcast);
ht = FIELD_GET(MT_TX_RATE_MODE, rate) > MT_PHY_TYPE_OFDM;
if (beacon && ht) {
@@ -837,6 +840,7 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
u8 link_id = link_sta->link_id;
struct mt792x_link_sta *mlink;
struct mt792x_sta *msta;
+ struct mt76_wcid *wcid;
int ret, idx;
msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
@@ -847,14 +851,22 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
return -ENOSPC;
mconf = mt792x_vif_to_link(mvif, link_id);
- INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ mt76_wcid_init(&mlink->wcid, 0);
mlink->wcid.sta = 1;
mlink->wcid.idx = idx;
- mlink->wcid.phy_idx = mconf->mt76.band_idx;
mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
mlink->last_txs = jiffies;
mlink->wcid.link_id = link_sta->link_id;
mlink->wcid.link_valid = !!link_sta->sta->valid_links;
+ mlink->sta = msta;
+
+ wcid = &mlink->wcid;
+ ewma_signal_init(&wcid->rssi);
+ rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
+ mt76_wcid_init(wcid, 0);
+ ewma_avg_signal_init(&mlink->avg_ack_signal);
+ memset(mlink->airtime_ac, 0,
+ sizeof(msta->deflink.airtime_ac));
ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
if (ret)
@@ -866,9 +878,14 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
link_conf = mt792x_vif_to_bss_conf(vif, link_id);
/* should update bss info before STA add */
- if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls)
- mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
- link_conf, link_sta, false);
+ if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) {
+ if (ieee80211_vif_is_mld(vif))
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+ link_conf, link_sta, link_sta != mlink->pri_link);
+ else
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+ link_conf, link_sta, false);
+ }
if (ieee80211_vif_is_mld(vif) &&
link_sta == mlink->pri_link) {
@@ -904,7 +921,6 @@ mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, unsigned long new_links)
{
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct mt76_wcid *wcid;
unsigned int link_id;
int err = 0;
@@ -921,14 +937,6 @@ mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
err = -ENOMEM;
break;
}
-
- wcid = &mlink->wcid;
- ewma_signal_init(&wcid->rssi);
- rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
- mt76_wcid_init(wcid);
- ewma_avg_signal_init(&mlink->avg_ack_signal);
- memset(mlink->airtime_ac, 0,
- sizeof(msta->deflink.airtime_ac));
}
msta->valid_links |= BIT(link_id);
@@ -1141,8 +1149,7 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
struct mt792x_bss_conf *mconf;
mconf = mt792x_link_conf_to_mconf(link_conf);
- mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
- link_sta, false);
+ mt792x_mac_link_bss_remove(dev, mconf, mlink);
}
spin_lock_bh(&mdev->sta_poll_lock);
@@ -1185,7 +1192,6 @@ mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
if (link_sta != mlink->pri_link) {
mt76_wcid_cleanup(mdev, wcid);
mt76_wcid_mask_clear(mdev->wcid_mask, wcid->idx);
- mt76_wcid_mask_clear(mdev->wcid_phy_mask, wcid->idx);
}
if (msta->deflink_id == link_id)
@@ -1200,12 +1206,45 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct {
+ struct {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 pad;
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 link_idx; /* hw link idx */
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } dev_req = {
+ .hdr = {
+ .omac_idx = 0,
+ .band_idx = 0,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = true,
+ },
+ };
unsigned long rem;
rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+ if (ieee80211_vif_is_mld(vif)) {
+ mt7925_mcu_set_dbdc(&dev->mphy, false);
+
+ /* recovery omac address for the legacy interface */
+ memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+ mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
+ &dev_req, sizeof(dev_req), true);
+ }
+
if (vif->type == NL80211_IFTYPE_STATION) {
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
@@ -1250,22 +1289,22 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
params->buf_size);
- mt7925_mcu_uni_rx_ba(dev, params, true);
+ mt7925_mcu_uni_rx_ba(dev, vif, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
- mt7925_mcu_uni_rx_ba(dev, params, false);
+ mt7925_mcu_uni_rx_ba(dev, vif, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7925_mcu_uni_tx_ba(dev, params, true);
+ mt7925_mcu_uni_tx_ba(dev, vif, params, true);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->deflink.wcid.ampdu_state);
- mt7925_mcu_uni_tx_ba(dev, params, false);
+ mt7925_mcu_uni_tx_ba(dev, vif, params, false);
break;
case IEEE80211_AMPDU_TX_START:
set_bit(tid, &msta->deflink.wcid.ampdu_state);
@@ -1274,7 +1313,7 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->deflink.wcid.ampdu_state);
- mt7925_mcu_uni_tx_ba(dev, params, false);
+ mt7925_mcu_uni_tx_ba(dev, vif, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
@@ -1895,6 +1934,13 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
mt7925_mcu_set_tx(dev, info);
+ if (changed & BSS_CHANGED_BSSID) {
+ if (ieee80211_vif_is_mld(vif) &&
+ hweight16(mvif->valid_links) == 2)
+ /* Indicate the secondary setup done */
+ mt7925_mcu_uni_bss_bcnft(dev, info, true);
+ }
+
mt792x_mutex_release(dev);
}
@@ -1946,6 +1992,8 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
GFP_KERNEL);
mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink),
GFP_KERNEL);
+ if (!mconf || !mlink)
+ return -ENOMEM;
}
mconfs[link_id] = mconf;
@@ -1974,6 +2022,8 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto free;
if (mconf != &mvif->bss_conf) {
+ mt7925_mcu_set_bss_pm(dev, link_conf, true);
+
err = mt7925_set_mlo_roc(phy, &mvif->bss_conf,
vif->active_links);
if (err < 0)
@@ -2071,18 +2121,16 @@ static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
- struct ieee80211_bss_conf *pri_link_conf;
struct mt792x_bss_conf *mconf;
mutex_lock(&dev->mt76.mutex);
if (ieee80211_vif_is_mld(vif)) {
mconf = mt792x_vif_to_link(mvif, link_conf->link_id);
- pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
if (vif->type == NL80211_IFTYPE_STATION &&
mconf == &mvif->bss_conf)
- mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf,
+ mt7925_mcu_add_bss_info(&dev->phy, NULL, link_conf,
NULL, false);
} else {
mconf = &mvif->bss_conf;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 748ea6adbc6b..15815ad84713 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -39,7 +39,6 @@ int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd,
} else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
- cmd == MCU_UNI_CMD(HIF_CTRL) ||
cmd == MCU_UNI_CMD(OFFLOAD) ||
cmd == MCU_UNI_CMD(SUSPEND)) {
struct mt7925_mcu_uni_event *event;
@@ -123,10 +122,8 @@ EXPORT_SYMBOL_GPL(mt7925_mcu_regval);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
struct ieee80211_bss_conf *link_conf)
{
- struct ieee80211_vif *mvif = container_of((void *)link_conf->vif,
- struct ieee80211_vif,
- drv_priv);
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct ieee80211_vif *mvif = link_conf->vif;
struct sk_buff *skb;
int i, len = min_t(int, mvif->cfg.arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
@@ -166,7 +163,7 @@ static int
mt7925_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
bool suspend, struct cfg80211_wowlan *wowlan)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct {
struct {
@@ -221,7 +218,7 @@ mt7925_mcu_set_wow_pattern(struct mt76_dev *dev,
u8 index, bool enable,
struct cfg80211_pkt_pattern *pattern)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt7925_wow_pattern_tlv *tlv;
struct sk_buff *skb;
struct {
@@ -276,7 +273,7 @@ static void
mt7925_mcu_connection_loss_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt7925_uni_beacon_loss_event *event = priv;
if (mvif->idx != event->hdr.bss_idx)
@@ -306,7 +303,7 @@ mt7925_mcu_connection_loss_event(struct mt792x_dev *dev, struct sk_buff *skb)
static void
mt7925_mcu_roc_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt7925_roc_grant_tlv *grant = priv;
if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION)
@@ -344,6 +341,51 @@ static void mt7925_mcu_roc_handle_grant(struct mt792x_dev *dev,
}
static void
+mt7925_mcu_handle_hif_ctrl_basic(struct mt792x_dev *dev, struct tlv *tlv)
+{
+ struct mt7925_mcu_hif_ctrl_basic_tlv *basic;
+
+ basic = (struct mt7925_mcu_hif_ctrl_basic_tlv *)tlv;
+
+ if (basic->hifsuspend) {
+ if (basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE &&
+ basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE)
+ /* success */
+ dev->hif_idle = true;
+ else
+ /* busy */
+ /* invalid */
+ dev->hif_idle = false;
+ } else {
+ dev->hif_resumed = true;
+ }
+ wake_up(&dev->wait);
+}
+
+static void
+mt7925_mcu_uni_hif_ctrl_event(struct mt792x_dev *dev, struct sk_buff *skb)
+{
+ struct tlv *tlv;
+ u32 tlv_len;
+
+ skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4);
+ tlv = (struct tlv *)skb->data;
+ tlv_len = skb->len;
+
+ while (tlv_len > 0 && le16_to_cpu(tlv->len) <= tlv_len) {
+ switch (le16_to_cpu(tlv->tag)) {
+ case UNI_EVENT_HIF_CTRL_BASIC:
+ mt7925_mcu_handle_hif_ctrl_basic(dev, tlv);
+ break;
+ default:
+ break;
+ }
+ tlv_len -= le16_to_cpu(tlv->len);
+ tlv = (struct tlv *)((char *)(tlv) + le16_to_cpu(tlv->len));
+ }
+}
+
+static void
mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
{
struct tlv *tlv;
@@ -388,7 +430,7 @@ mt7925_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb)
struct mt7925_mcu_txs_event {
u8 ver;
u8 rsv[3];
- u8 data[0];
+ u8 data[];
} __packed * txs;
struct tlv *tlv;
u32 tlv_len;
@@ -489,6 +531,9 @@ mt7925_mcu_uni_rx_unsolicited_event(struct mt792x_dev *dev,
rxd = (struct mt7925_mcu_rxd *)skb->data;
switch (rxd->eid) {
+ case MCU_UNI_EVENT_HIF_CTRL:
+ mt7925_mcu_uni_hif_ctrl_event(dev, skb);
+ break;
case MCU_UNI_EVENT_FW_LOG_2_HOST:
mt7925_mcu_uni_debug_msg_event(dev, skb);
break;
@@ -530,11 +575,11 @@ void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
}
static int
-mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
+ struct mt76_wcid *wcid,
struct ieee80211_ampdu_params *params,
bool enable, bool tx)
{
- struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct sta_rec_ba_uni *ba;
struct sk_buff *skb;
struct tlv *tlv;
@@ -562,28 +607,60 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
/** starec & wtbl **/
int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
+ struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable)
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- struct mt792x_vif *mvif = msta->vif;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_bss_conf *mconf;
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ struct mt76_wcid *wcid;
+ u8 link_id, ret;
- if (enable && !params->amsdu)
- msta->deflink.wcid.amsdu = false;
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ mlink = mt792x_sta_to_link(msta, link_id);
+ wcid = &mlink->wcid;
+
+ if (enable && !params->amsdu)
+ mlink->wcid.amsdu = false;
+
+ ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
+ enable, true);
+ if (ret < 0)
+ break;
+ }
- return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
- enable, true);
+ return ret;
}
int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
+ struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable)
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- struct mt792x_vif *mvif = msta->vif;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_bss_conf *mconf;
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ struct mt76_wcid *wcid;
+ u8 link_id, ret;
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ mlink = mt792x_sta_to_link(msta, link_id);
+ wcid = &mlink->wcid;
+
+ ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
+ enable, false);
+ if (ret < 0)
+ break;
+ }
- return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
- enable, false);
+ return ret;
}
static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
@@ -638,7 +715,7 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
clc = (const struct mt7925_clc *)(clc_base + offset);
- if (clc->idx > ARRAY_SIZE(phy->clc))
+ if (clc->idx >= ARRAY_SIZE(phy->clc))
break;
/* do not init buf again if chip reset triggered */
@@ -823,7 +900,7 @@ mt7925_mcu_get_nic_capability(struct mt792x_dev *dev)
mt7925_mcu_parse_phy_cap(dev, tlv->data);
break;
case MT_NIC_CAP_CHIP_CAP:
- memcpy(&dev->phy.chip_cap, (void *)skb->data, sizeof(u64));
+ dev->phy.chip_cap = le64_to_cpu(*(__le64 *)tlv->data);
break;
case MT_NIC_CAP_EML_CAP:
mt7925_mcu_parse_eml_cap(dev, tlv->data);
@@ -1153,7 +1230,12 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
u8 rsv[4];
} __packed hdr;
struct roc_acquire_tlv roc[2];
- } __packed req;
+ } __packed req = {
+ .roc[0].tag = cpu_to_le16(UNI_ROC_NUM),
+ .roc[0].len = cpu_to_le16(sizeof(struct roc_acquire_tlv)),
+ .roc[1].tag = cpu_to_le16(UNI_ROC_NUM),
+ .roc[1].len = cpu_to_le16(sizeof(struct roc_acquire_tlv))
+ };
if (!mconf || hweight16(vif->valid_links) < 2 ||
hweight16(sel_links) != 2)
@@ -1200,6 +1282,8 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
req.roc[i].bw_from_ap = CMD_CBW_20MHZ;
req.roc[i].center_chan = center_ch;
req.roc[i].center_chan_from_ap = center_ch;
+ req.roc[i].center_chan2 = 0;
+ req.roc[i].center_chan2_from_ap = 0;
/* STR : 0xfe indicates BAND_ALL with enabling DBDC
* EMLSR : 0xff indicates (BAND_AUTO) without DBDC
@@ -1215,7 +1299,7 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
}
return mt76_mcu_send_msg(&mvif->phy->dev->mt76, MCU_UNI_CMD(ROC),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
@@ -1264,7 +1348,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
}
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
@@ -1294,7 +1378,7 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
@@ -1357,7 +1441,7 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
&ps_req, sizeof(ps_req), true);
}
-static int
+int
mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
struct ieee80211_bss_conf *link_conf, bool enable)
{
@@ -1447,12 +1531,12 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev,
int err;
err = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
- &req1, sizeof(req1), false);
+ &req1, sizeof(req1), true);
if (err < 0 || !enable)
return err;
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
static void
@@ -1770,7 +1854,7 @@ static int
mt7925_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info)
{
- struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *skb;
int conn_state;
@@ -1783,7 +1867,7 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
conn_state = info->enable ? CONN_STATE_PORT_SECURE :
CONN_STATE_DISCONNECT;
if (info->link_sta)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
info->link_sta,
conn_state, info->newly);
if (info->link_sta && info->enable) {
@@ -1837,7 +1921,7 @@ mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy,
return PTR_ERR(skb);
if (info->enable)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
info->link_sta,
info->enable, info->newly);
@@ -1883,6 +1967,7 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev,
struct mt76_sta_cmd_info info = {
.link_sta = link_sta,
.vif = vif,
+ .link_conf = &vif->bss_conf,
.enable = enable,
.cmd = MCU_UNI_CMD(STA_REC_UPDATE),
.state = state,
@@ -1898,7 +1983,11 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev,
mlink = mt792x_sta_to_link(msta, link_sta->link_id);
}
info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid;
- info.newly = link_sta ? state != MT76_STA_INFO_STATE_ASSOC : true;
+
+ if (link_sta)
+ info.newly = state != MT76_STA_INFO_STATE_ASSOC;
+ else
+ info.newly = state == MT76_STA_INFO_STATE_ASSOC ? false : true;
if (ieee80211_vif_is_mld(vif))
err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info);
@@ -1914,32 +2003,21 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
{
#define MT7925_FIF_BIT_CLR BIT(1)
#define MT7925_FIF_BIT_SET BIT(0)
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- unsigned long valid = ieee80211_vif_is_mld(vif) ?
- mvif->valid_links : BIT(0);
- struct ieee80211_bss_conf *bss_conf;
int err = 0;
- int i;
if (enable) {
- for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
- bss_conf = mt792x_vif_to_bss_conf(vif, i);
- err = mt7925_mcu_uni_bss_bcnft(dev, bss_conf, true);
- if (err < 0)
- return err;
- }
+ err = mt7925_mcu_uni_bss_bcnft(dev, &vif->bss_conf, true);
+ if (err < 0)
+ return err;
return mt7925_mcu_set_rxfilter(dev, 0,
MT7925_FIF_BIT_SET,
MT_WF_RFCR_DROP_OTHER_BEACON);
}
- for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
- bss_conf = mt792x_vif_to_bss_conf(vif, i);
- err = mt7925_mcu_set_bss_pm(dev, bss_conf, false);
- if (err)
- return err;
- }
+ err = mt7925_mcu_set_bss_pm(dev, &vif->bss_conf, false);
+ if (err < 0)
+ return err;
return mt7925_mcu_set_rxfilter(dev, 0,
MT7925_FIF_BIT_CLR,
@@ -1976,8 +2054,6 @@ int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, struct mt7925_txp
int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
-
struct {
struct {
u8 band_idx;
@@ -1991,7 +2067,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed enable;
} __packed req = {
.hdr = {
- .band_idx = mvif->bss_conf.mt76.band_idx,
+ .band_idx = 0,
},
.enable = {
.tag = cpu_to_le16(UNI_SNIFFER_ENABLE),
@@ -2050,7 +2126,7 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
} __packed tlv;
} __packed req = {
.hdr = {
- .band_idx = vif->bss_conf.mt76.band_idx,
+ .band_idx = 0,
},
.tlv = {
.tag = cpu_to_le16(UNI_SNIFFER_CONFIG),
@@ -2179,11 +2255,27 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
req = (struct bss_rlm_tlv *)tlv;
req->control_channel = chandef->chan->hw_value;
req->center_chan = ieee80211_frequency_to_channel(freq1);
- req->center_chan2 = ieee80211_frequency_to_channel(freq2);
+ req->center_chan2 = 0;
req->tx_streams = hweight8(phy->antenna_mask);
req->ht_op_info = 4; /* set HT 40M allowed */
req->rx_streams = hweight8(phy->antenna_mask);
- req->band = band;
+ req->center_chan2 = 0;
+ req->sco = 0;
+ req->band = 1;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ req->band = 1;
+ break;
+ case NL80211_BAND_5GHZ:
+ req->band = 2;
+ break;
+ case NL80211_BAND_6GHZ:
+ req->band = 3;
+ break;
+ default:
+ break;
+ }
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
@@ -2194,6 +2286,7 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
break;
case NL80211_CHAN_WIDTH_80P80:
req->bw = CMD_CBW_8080MHZ;
+ req->center_chan2 = ieee80211_frequency_to_channel(freq2);
break;
case NL80211_CHAN_WIDTH_160:
req->bw = CMD_CBW_160MHZ;
@@ -2219,7 +2312,7 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
}
static struct sk_buff *
-__mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
+__mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int len)
{
struct bss_req_hdr hdr = {
.bss_idx = mvif->idx,
@@ -2235,7 +2328,7 @@ __mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
return skb;
}
-int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
@@ -2388,7 +2481,7 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb,
struct ieee80211_bss_conf *link_conf)
{
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
- struct mt76_vif *mvif = &mconf->mt76;
+ struct mt76_vif_link *mvif = &mconf->mt76;
struct bss_sec_tlv {
__le16 tag;
__le16 len;
@@ -2439,7 +2532,7 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
&link_conf->chanreq.oper;
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
enum nl80211_band band = chandef->chan->band;
- struct mt76_vif *mvif = &mconf->mt76;
+ struct mt76_vif_link *mvif = &mconf->mt76;
struct bss_rate_tlv *bmc;
struct tlv *tlv;
u8 idx = mvif->mcast_rates_idx ?
@@ -2463,6 +2556,7 @@ static void
mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
struct ieee80211_bss_conf *link_conf)
{
+ struct ieee80211_vif *vif = link_conf->vif;
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
struct bss_mld_tlv *mld;
@@ -2483,7 +2577,7 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
IEEE80211_EML_CAP_EMLSR_SUPP);
- memcpy(mld->mac_addr, link_conf->addr, ETH_ALEN);
+ memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
}
static void
@@ -2614,7 +2708,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
}
-int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
+int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable)
{
struct mt76_dev *mdev = phy->dev;
@@ -2634,7 +2728,7 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
tlv = mt76_connac_mcu_add_tlv(skb, UNI_MBMC_SETTING, sizeof(*conf));
conf = (struct mbmc_conf_tlv *)tlv;
- conf->mbmc_en = 1;
+ conf->mbmc_en = enable;
conf->band = 0; /* unused */
err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS),
@@ -2643,14 +2737,12 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
return err;
}
-#define MT76_CONNAC_SCAN_CHANNEL_TIME 60
-
int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_scan_request *sreq = &scan_req->req;
- int n_ssids = 0, err, i, duration;
+ int n_ssids = 0, err, i;
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_dev *mdev = phy->dev;
struct mt76_connac_mcu_scan_channel *chan;
@@ -2686,14 +2778,6 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req->scan_type = sreq->n_ssids ? 1 : 0;
req->probe_req_num = sreq->n_ssids ? 2 : 0;
- duration = MT76_CONNAC_SCAN_CHANNEL_TIME;
- /* increase channel time for passive scan */
- if (!sreq->n_ssids)
- duration *= 2;
- req->timeout_value = cpu_to_le16(sreq->n_channels * duration);
- req->channel_min_dwell_time = cpu_to_le16(duration);
- req->channel_dwell_time = cpu_to_le16(duration);
-
tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SSID, sizeof(*ssid));
ssid = (struct scan_ssid_tlv *)tlv;
for (i = 0; i < sreq->n_ssids; i++) {
@@ -2765,7 +2849,7 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *sreq)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_dev *mdev = phy->dev;
@@ -2901,7 +2985,7 @@ mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct scan_hdr {
u8 seq_num;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index ac53bdc99332..1e47d2c61b54 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -616,7 +616,7 @@ mt7925_mcu_get_cipher(int cipher)
}
}
-int mt7925_mcu_set_dbdc(struct mt76_phy *phy);
+int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable);
int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
@@ -637,10 +637,13 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy,
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
-int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
struct ieee80211_bss_conf *link_conf);
+int
+mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf, bool enable);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index f5c02e5f5066..8707b5d04743 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -27,6 +27,26 @@
#define MCU_UNI_EVENT_ROC 0x27
+#define HIF_TRAFFIC_IDLE 0x2
+
+enum {
+ UNI_EVENT_HIF_CTRL_BASIC = 0,
+ UNI_EVENT_HIF_CTRL_TAG_NUM
+};
+
+struct mt7925_mcu_hif_ctrl_basic_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 cid;
+ u8 pad[3];
+ u32 status;
+ u8 hif_type;
+ u8 hif_tx_traffic_status;
+ u8 hif_rx_traffic_status;
+ u8 hifsuspend;
+ u8 rsv[4];
+} __packed;
+
enum {
UNI_ROC_ACQUIRE,
UNI_ROC_ABORT,
@@ -215,6 +235,7 @@ int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd);
int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map);
+void mt7925_regd_update(struct mt792x_dev *dev);
int mt7925_mac_init(struct mt792x_dev *dev);
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -242,9 +263,11 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
bool enable);
int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
+ struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable);
int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
+ struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable);
void mt7925_scan_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 9aec675450f2..f36893e20c61 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -442,9 +442,10 @@ static int mt7925_pci_suspend(struct device *device)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
- int i, err;
+ int i, err, ret;
pm->suspended = true;
+ dev->hif_resumed = false;
flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -455,14 +456,21 @@ static int mt7925_pci_suspend(struct device *device)
if (err < 0)
goto restore_suspend;
+ wait_event_timeout(dev->wait,
+ !dev->regd_in_progress, 5 * HZ);
+
/* always enable deep sleep during suspend to reduce
* power consumption
*/
mt7925_mcu_set_deep_sleep(dev, true);
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
- if (err)
+ mt76_connac_mcu_set_hif_suspend(mdev, true, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_idle, 3 * HZ);
+ if (!ret) {
+ err = -ETIMEDOUT;
goto restore_suspend;
+ }
napi_disable(&mdev->tx_napi);
mt76_worker_disable(&mdev->tx_worker);
@@ -506,8 +514,11 @@ restore_napi:
if (!pm->ds_enable)
mt7925_mcu_set_deep_sleep(dev, false);
- mt76_connac_mcu_set_hif_suspend(mdev, false);
-
+ mt76_connac_mcu_set_hif_suspend(mdev, false, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_resumed, 3 * HZ);
+ if (!ret)
+ err = -ETIMEDOUT;
restore_suspend:
pm->suspended = false;
@@ -523,8 +534,9 @@ static int mt7925_pci_resume(struct device *device)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
- int i, err;
+ int i, err, ret;
+ dev->hif_idle = false;
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto failed;
@@ -553,12 +565,19 @@ static int mt7925_pci_resume(struct device *device)
napi_schedule(&mdev->tx_napi);
local_bh_enable();
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_resumed, 3 * HZ);
+ if (!ret) {
+ err = -ETIMEDOUT;
+ goto failed;
+ }
/* restore previous ds setting */
if (!pm->ds_enable)
mt7925_mcu_set_deep_sleep(dev, false);
+ mt7925_regd_update(dev);
failed:
pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 682db1bab21c..4dfbc1b6cfdd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -243,14 +243,19 @@ static int mt7925u_suspend(struct usb_interface *intf, pm_message_t state)
{
struct mt792x_dev *dev = usb_get_intfdata(intf);
struct mt76_connac_pm *pm = &dev->pm;
- int err;
+ int err, ret;
pm->suspended = true;
+ dev->hif_resumed = false;
flush_work(&dev->reset_work);
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
- if (err)
+ mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_idle, 3 * HZ);
+ if (!ret) {
+ err = -ETIMEDOUT;
goto failed;
+ }
mt76u_stop_rx(&dev->mt76);
mt76u_stop_tx(&dev->mt76);
@@ -271,8 +276,9 @@ static int mt7925u_resume(struct usb_interface *intf)
struct mt792x_dev *dev = usb_get_intfdata(intf);
struct mt76_connac_pm *pm = &dev->pm;
bool reinit = true;
- int err, i;
+ int err, i, ret;
+ dev->hif_idle = false;
for (i = 0; i < 10; i++) {
u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT);
@@ -298,7 +304,11 @@ static int mt7925u_resume(struct usb_interface *intf)
if (err < 0)
goto failed;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_resumed, 3 * HZ);
+ if (!ret)
+ err = -ETIMEDOUT;
failed:
pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index ab12616ec2b8..32ed01a96bf7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -28,6 +28,7 @@
#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0)
#define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1)
#define MT792x_CHIP_CAP_MLO_EVT_EN BIT(2)
+#define MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN BIT(3)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
#define MT792x_BASIC_RATES_TBL 11
@@ -116,7 +117,7 @@ struct mt792x_chanctx {
};
struct mt792x_bss_conf {
- struct mt76_vif mt76; /* must be first */
+ struct mt76_vif_link mt76; /* must be first */
struct mt792x_vif *vif;
struct ewma_rssi rssi;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
@@ -133,6 +134,9 @@ struct mt792x_vif {
struct mt792x_phy *phy;
u16 valid_links;
u8 deflink_id;
+
+ struct work_struct csa_work;
+ struct timer_list csa_timer;
};
struct mt792x_phy {
@@ -206,6 +210,8 @@ struct mt792x_dev {
struct mt76_phy mphy;
};
+ struct mac_address macaddr_list[8];
+
const struct mt76_bus_ops *bus_ops;
struct mt792x_phy phy;
@@ -216,6 +222,10 @@ struct mt792x_dev {
bool has_eht:1;
bool regd_in_progress:1;
bool aspm_supported:1;
+ bool hif_idle:1;
+ bool hif_resumed:1;
+ bool sar_inited:1;
+ bool regd_change:1;
wait_queue_head_t wait;
struct work_struct init_work;
@@ -235,12 +245,15 @@ struct mt792x_dev {
enum environment_cap country_ie_env;
u32 backup_l1;
u32 backup_l2;
+
+ struct ieee80211_chanctx_conf *new_ctx;
};
static inline struct mt792x_bss_conf *
mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
{
struct ieee80211_vif *vif;
+ struct mt792x_bss_conf *bss_conf;
vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
@@ -248,8 +261,10 @@ mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
link_id >= IEEE80211_LINK_UNSPECIFIED)
return &mvif->bss_conf;
- return rcu_dereference_protected(mvif->link_conf[link_id],
- lockdep_is_held(&mvif->phy->dev->mt76.mutex));
+ bss_conf = rcu_dereference_protected(mvif->link_conf[link_id],
+ lockdep_is_held(&mvif->phy->dev->mt76.mutex));
+
+ return bss_conf ? bss_conf : &mvif->bss_conf;
}
static inline struct mt792x_link_sta *
@@ -364,6 +379,7 @@ void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u64 timestamp);
void mt792x_tx_worker(struct mt76_worker *w);
void mt792x_roc_timer(struct timer_list *timer);
+void mt792x_csa_timer(struct timer_list *timer);
void mt792x_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw,
@@ -414,6 +430,7 @@ int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev);
void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
struct mt792x_bss_conf *mconf,
struct mt792x_link_sta *mlink);
+void mt792x_config_mac_addr_list(struct mt792x_dev *dev);
static inline char *mt792x_ram_name(struct mt792x_dev *dev)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index 78fe37c2e07b..8799627f6292 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -38,6 +38,10 @@ static const struct ieee80211_iface_limit if_limits_chanctx[] = {
.max = 1,
.types = BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
}
};
@@ -45,7 +49,7 @@ static const struct ieee80211_iface_combination if_comb_chanctx[] = {
{
.limits = if_limits_chanctx,
.n_limits = ARRAY_SIZE(if_limits_chanctx),
- .max_interfaces = 2,
+ .max_interfaces = 3,
.num_different_channels = 2,
.beacon_int_infra_match = false,
}
@@ -147,7 +151,8 @@ void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mlink->wcid, false);
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76,
+ &mlink->wcid, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
@@ -284,6 +289,14 @@ void mt792x_roc_timer(struct timer_list *timer)
}
EXPORT_SYMBOL_GPL(mt792x_roc_timer);
+void mt792x_csa_timer(struct timer_list *timer)
+{
+ struct mt792x_vif *mvif = from_timer(mvif, timer, csa_timer);
+
+ ieee80211_queue_work(mvif->phy->mt76->hw, &mvif->csa_work);
+}
+EXPORT_SYMBOL_GPL(mt792x_csa_timer);
+
void mt792x_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
@@ -325,6 +338,11 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw,
mctx->bss_conf = NULL;
mvif->bss_conf.mt76.ctx = NULL;
mutex_unlock(&dev->mt76.mutex);
+
+ if (vif->bss_conf.csa_active) {
+ del_timer_sync(&mvif->csa_timer);
+ cancel_work_sync(&mvif->csa_work);
+ }
}
EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx);
@@ -614,7 +632,8 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
wiphy->max_remain_on_channel_duration = 5000;
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_scan_ssids = 4;
@@ -646,6 +665,7 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
if (dev->pm.enable)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -911,6 +931,28 @@ int mt792x_load_firmware(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt792x_load_firmware);
+void mt792x_config_mac_addr_list(struct mt792x_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+ u8 *addr = dev->macaddr_list[i].addr;
+
+ memcpy(addr, dev->mphy.macaddr, ETH_ALEN);
+
+ if (!i)
+ continue;
+
+ addr[0] |= BIT(1);
+ addr[0] ^= ((i - 1) << 2);
+ }
+ wiphy->addresses = dev->macaddr_list;
+ wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+}
+EXPORT_SYMBOL_GPL(mt792x_config_mac_addr_list);
+
MODULE_DESCRIPTION("MediaTek MT792x core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
index 106273935b26..05978d9c7b91 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
@@ -153,7 +153,7 @@ struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
return NULL;
link = container_of(wcid, struct mt792x_link_sta, wcid);
- sta = container_of(link, struct mt792x_sta, deflink);
+ sta = link->sta;
if (!sta->vif)
return NULL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 62c03d088925..7b2bb72b407d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -51,12 +51,10 @@ static ssize_t
mt7996_sys_recovery_set(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct mt7996_phy *phy = file->private_data;
- struct mt7996_dev *dev = phy->dev;
- bool band = phy->mt76->band_idx;
- char buf[16];
+ struct mt7996_dev *dev = file->private_data;
+ char buf[16], *sep;
int ret = 0;
- u16 val;
+ u16 band, val;
if (count >= sizeof(buf))
return -EINVAL;
@@ -69,21 +67,26 @@ mt7996_sys_recovery_set(struct file *file, const char __user *user_buf,
else
buf[count] = '\0';
- if (kstrtou16(buf, 0, &val))
+ sep = strchr(buf, ',');
+ if (!sep)
+ return -EINVAL;
+
+ *sep = 0;
+ if (kstrtou16(buf, 0, &band) || kstrtou16(sep + 1, 0, &val))
return -EINVAL;
switch (val) {
/*
- * 0: grab firmware current SER state.
- * 1: trigger & enable system error L1 recovery.
- * 2: trigger & enable system error L2 recovery.
- * 3: trigger & enable system error L3 rx abort.
- * 4: trigger & enable system error L3 tx abort
- * 5: trigger & enable system error L3 tx disable.
- * 6: trigger & enable system error L3 bf recovery.
- * 7: trigger & enable system error L4 mdp recovery.
- * 8: trigger & enable system error full recovery.
- * 9: trigger firmware crash.
+ * <band>,0: grab firmware current SER state.
+ * <band>,1: trigger & enable system error L1 recovery.
+ * <band>,2: trigger & enable system error L2 recovery.
+ * <band>,3: trigger & enable system error L3 rx abort.
+ * <band>,4: trigger & enable system error L3 tx abort
+ * <band>,5: trigger & enable system error L3 tx disable.
+ * <band>,6: trigger & enable system error L3 bf recovery.
+ * <band>,7: trigger & enable system error L4 mdp recovery.
+ * <band>,8: trigger & enable system error full recovery.
+ * <band>,9: trigger firmware crash.
*/
case UNI_CMD_SER_QUERY:
ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_QUERY, 0, band);
@@ -126,8 +129,7 @@ static ssize_t
mt7996_sys_recovery_get(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct mt7996_phy *phy = file->private_data;
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = file->private_data;
char *buff;
int desc = 0;
ssize_t ret;
@@ -141,25 +143,25 @@ mt7996_sys_recovery_get(struct file *file, char __user *user_buf,
desc += scnprintf(buff + desc, bufsz - desc,
"Please echo the correct value ...\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "0: grab firmware transient SER state\n");
+ "<band>,0: grab firmware transient SER state\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "1: trigger system error L1 recovery\n");
+ "<band>,1: trigger system error L1 recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "2: trigger system error L2 recovery\n");
+ "<band>,2: trigger system error L2 recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "3: trigger system error L3 rx abort\n");
+ "<band>,3: trigger system error L3 rx abort\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "4: trigger system error L3 tx abort\n");
+ "<band>,4: trigger system error L3 tx abort\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "5: trigger system error L3 tx disable\n");
+ "<band>,5: trigger system error L3 tx disable\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "6: trigger system error L3 bf recovery\n");
+ "<band>,6: trigger system error L3 bf recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "7: trigger system error L4 mdp recovery\n");
+ "<band>,7: trigger system error L4 mdp recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "8: trigger system error full recovery\n");
+ "<band>,8: trigger system error full recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "9: trigger firmware crash\n");
+ "<band>,9: trigger firmware crash\n");
/* SER statistics */
desc += scnprintf(buff + desc, bufsz - desc,
@@ -524,16 +526,12 @@ mt7996_txbf_stat_read_phy(struct mt7996_phy *phy, struct seq_file *s)
seq_puts(s, "\n");
}
-static int
-mt7996_tx_stats_show(struct seq_file *file, void *data)
+static void
+mt7996_tx_stats_show_phy(struct seq_file *file, struct mt7996_phy *phy)
{
- struct mt7996_phy *phy = file->private;
- struct mt7996_dev *dev = phy->dev;
struct mt76_mib_stats *mib = &phy->mib;
- int i;
u32 attempts, success, per;
-
- mutex_lock(&dev->mt76.mutex);
+ int i;
mt7996_mac_update_stats(phy);
mt7996_ampdu_stat_read_phy(phy, file);
@@ -558,6 +556,23 @@ mt7996_tx_stats_show(struct seq_file *file, void *data)
else
seq_puts(file, "\n");
}
+}
+
+static int
+mt7996_tx_stats_show(struct seq_file *file, void *data)
+{
+ struct mt7996_dev *dev = file->private;
+ struct mt7996_phy *phy = &dev->phy;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7996_tx_stats_show_phy(file, phy);
+ phy = mt7996_phy2(dev);
+ if (phy)
+ mt7996_tx_stats_show_phy(file, phy);
+ phy = mt7996_phy3(dev);
+ if (phy)
+ mt7996_tx_stats_show_phy(file, phy);
mutex_unlock(&dev->mt76.mutex);
@@ -601,7 +616,7 @@ static void
mt7996_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_dev *dev = msta->vif->phy->dev;
+ struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
struct seq_file *s = data;
u8 ac;
@@ -621,15 +636,15 @@ mt7996_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
GENMASK(11, 0));
seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
sta->addr, msta->wcid.idx,
- msta->vif->mt76.wmm_idx, ac, qlen);
+ msta->vif->deflink.mt76.wmm_idx, ac, qlen);
}
}
static int
mt7996_hw_queues_show(struct seq_file *file, void *data)
{
- struct mt7996_phy *phy = file->private;
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = file->private;
+ struct mt7996_phy *phy = &dev->phy;
static const struct hw_queue_map ple_queue_map[] = {
{ "CPU_Q0", 0, 1, MT_CTX0 },
{ "CPU_Q1", 1, 1, MT_CTX0 + 1 },
@@ -685,6 +700,15 @@ mt7996_hw_queues_show(struct seq_file *file, void *data)
/* iterate per-sta ple queue */
ieee80211_iterate_stations_atomic(phy->mt76->hw,
mt7996_sta_hw_queue_read, file);
+ phy = mt7996_phy2(dev);
+ if (phy)
+ ieee80211_iterate_stations_atomic(phy->mt76->hw,
+ mt7996_sta_hw_queue_read, file);
+ phy = mt7996_phy3(dev);
+ if (phy)
+ ieee80211_iterate_stations_atomic(phy->mt76->hw,
+ mt7996_sta_hw_queue_read, file);
+
/* pse queue */
seq_puts(file, "PSE non-empty queue info:\n");
mt7996_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
@@ -698,19 +722,29 @@ DEFINE_SHOW_ATTRIBUTE(mt7996_hw_queues);
static int
mt7996_xmit_queues_show(struct seq_file *file, void *data)
{
- struct mt7996_phy *phy = file->private;
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = file->private;
+ struct mt7996_phy *phy;
struct {
struct mt76_queue *q;
char *queue;
} queue_map[] = {
- { phy->mt76->q_tx[MT_TXQ_BE], " MAIN" },
+ { dev->mphy.q_tx[MT_TXQ_BE], " MAIN0" },
+ { NULL, " MAIN1" },
+ { NULL, " MAIN2" },
{ dev->mt76.q_mcu[MT_MCUQ_WM], " MCUWM" },
{ dev->mt76.q_mcu[MT_MCUQ_WA], " MCUWA" },
{ dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWDL" },
};
int i;
+ phy = mt7996_phy2(dev);
+ if (phy)
+ queue_map[1].q = phy->mt76->q_tx[MT_TXQ_BE];
+
+ phy = mt7996_phy3(dev);
+ if (phy)
+ queue_map[2].q = phy->mt76->q_tx[MT_TXQ_BE];
+
seq_puts(file, " queue | hw-queued | head | tail |\n");
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
struct mt76_queue *q = queue_map[i].q;
@@ -785,20 +819,20 @@ mt7996_rf_regval_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7996_rf_regval_get,
mt7996_rf_regval_set, "0x%08llx\n");
-int mt7996_init_debugfs(struct mt7996_phy *phy)
+int mt7996_init_debugfs(struct mt7996_dev *dev)
{
- struct mt7996_dev *dev = phy->dev;
struct dentry *dir;
- dir = mt76_register_debugfs_fops(phy->mt76, NULL);
+ dir = mt76_register_debugfs_fops(&dev->mphy, NULL);
if (!dir)
return -ENOMEM;
- debugfs_create_file("hw-queues", 0400, dir, phy,
+
+ debugfs_create_file("hw-queues", 0400, dir, dev,
&mt7996_hw_queues_fops);
- debugfs_create_file("xmit-queues", 0400, dir, phy,
+ debugfs_create_file("xmit-queues", 0400, dir, dev,
&mt7996_xmit_queues_fops);
- debugfs_create_file("tx_stats", 0400, dir, phy, &mt7996_tx_stats_fops);
- debugfs_create_file("sys_recovery", 0600, dir, phy,
+ debugfs_create_file("tx_stats", 0400, dir, dev, &mt7996_tx_stats_fops);
+ debugfs_create_file("sys_recovery", 0600, dir, dev,
&mt7996_sys_recovery_ops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
@@ -812,17 +846,13 @@ int mt7996_init_debugfs(struct mt7996_phy *phy)
mt7996_twt_stats);
debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
- if (phy->mt76->cap.has_5ghz) {
- debugfs_create_u32("dfs_hw_pattern", 0400, dir,
- &dev->hw_pattern);
- debugfs_create_file("radar_trigger", 0200, dir, dev,
- &fops_radar_trigger);
- debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
- mt7996_rdd_monitor);
- }
+ debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
+ debugfs_create_file("radar_trigger", 0200, dir, dev,
+ &fops_radar_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
+ mt7996_rdd_monitor);
- if (phy == &dev->phy)
- dev->debugfs_dir = dir;
+ dev->debugfs_dir = dir;
return 0;
}
@@ -899,7 +929,7 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
#define LONG_PREAMBLE 1
struct ieee80211_sta *sta = file->private_data;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_dev *dev = msta->vif->phy->dev;
+ struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
struct ra_rate phy = {};
char buf[100];
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
index 4a8237118287..53dfac02f8af 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -25,17 +25,108 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev)
static char *mt7996_eeprom_name(struct mt7996_dev *dev)
{
switch (mt76_chip(&dev->mt76)) {
- case 0x7990:
- return MT7996_EEPROM_DEFAULT;
case 0x7992:
- return MT7992_EEPROM_DEFAULT;
+ switch (dev->var.type) {
+ case MT7992_VAR_TYPE_23:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7992_EEPROM_DEFAULT_23_INT;
+ return MT7992_EEPROM_DEFAULT_23;
+ case MT7992_VAR_TYPE_44:
+ default:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7992_EEPROM_DEFAULT_INT;
+ if (dev->var.fem == MT7996_FEM_MIX)
+ return MT7992_EEPROM_DEFAULT_MIX;
+ return MT7992_EEPROM_DEFAULT;
+ }
+ case 0x7990:
+ default:
+ switch (dev->var.type) {
+ case MT7996_VAR_TYPE_233:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7996_EEPROM_DEFAULT_233_INT;
+ return MT7996_EEPROM_DEFAULT_233;
+ case MT7996_VAR_TYPE_444:
+ default:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7996_EEPROM_DEFAULT_INT;
+ return MT7996_EEPROM_DEFAULT;
+ }
+ }
+}
+
+static void
+mt7996_eeprom_parse_stream(const u8 *eeprom, u8 band_idx, u8 *path,
+ u8 *rx_path, u8 *nss)
+{
+ switch (band_idx) {
+ case MT_BAND1:
+ *path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND1,
+ eeprom[MT_EE_WIFI_CONF + 2]);
+ *rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND1,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ *nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND1,
+ eeprom[MT_EE_WIFI_CONF + 5]);
+ break;
+ case MT_BAND2:
+ *path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND2,
+ eeprom[MT_EE_WIFI_CONF + 2]);
+ *rx_path = FIELD_GET(MT_EE_WIFI_CONF4_RX_PATH_BAND2,
+ eeprom[MT_EE_WIFI_CONF + 4]);
+ *nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND2,
+ eeprom[MT_EE_WIFI_CONF + 5]);
+ break;
default:
- return MT7996_EEPROM_DEFAULT;
+ *path = FIELD_GET(MT_EE_WIFI_CONF1_TX_PATH_BAND0,
+ eeprom[MT_EE_WIFI_CONF + 1]);
+ *rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ *nss = FIELD_GET(MT_EE_WIFI_CONF4_STREAM_NUM_BAND0,
+ eeprom[MT_EE_WIFI_CONF + 4]);
+ break;
}
}
+static bool mt7996_eeprom_variant_valid(struct mt7996_dev *dev, const u8 *def)
+{
+#define FEM_INT 0
+#define FEM_EXT 3
+ u8 *eeprom = dev->mt76.eeprom.data, fem[2];
+ int i;
+
+ for (i = 0; i < 2; i++)
+ fem[i] = u8_get_bits(eeprom[MT_EE_WIFI_CONF + 6 + i],
+ MT_EE_WIFI_PA_LNA_CONFIG);
+
+ if (dev->var.fem == MT7996_FEM_EXT &&
+ !(fem[0] == FEM_EXT && fem[1] == FEM_EXT))
+ return false;
+ else if (dev->var.fem == MT7996_FEM_INT &&
+ !(fem[0] == FEM_INT && fem[1] == FEM_INT))
+ return false;
+ else if (dev->var.fem == MT7996_FEM_MIX &&
+ !(fem[0] == FEM_INT && fem[1] == FEM_EXT))
+ return false;
+
+ for (i = 0; i < __MT_MAX_BAND; i++) {
+ u8 path, rx_path, nss;
+ u8 def_path, def_rx_path, def_nss;
+
+ if (!dev->mt76.phys[i])
+ continue;
+
+ mt7996_eeprom_parse_stream(eeprom, i, &path, &rx_path, &nss);
+ mt7996_eeprom_parse_stream(def, i, &def_path, &def_rx_path,
+ &def_nss);
+ if (path > def_path || rx_path > def_rx_path || nss > def_nss)
+ return false;
+ }
+
+ return true;
+}
+
static int
-mt7996_eeprom_load_default(struct mt7996_dev *dev)
+mt7996_eeprom_check_or_use_default(struct mt7996_dev *dev, bool use_default)
{
u8 *eeprom = dev->mt76.eeprom.data;
const struct firmware *fw = NULL;
@@ -51,6 +142,10 @@ mt7996_eeprom_load_default(struct mt7996_dev *dev)
goto out;
}
+ if (!use_default && mt7996_eeprom_variant_valid(dev, fw->data))
+ goto out;
+
+ dev_warn(dev->mt76.dev, "eeprom load fail, use default bin\n");
memcpy(eeprom, fw->data, MT7996_EEPROM_SIZE);
dev->flash_mode = true;
@@ -62,43 +157,68 @@ out:
static int mt7996_eeprom_load(struct mt7996_dev *dev)
{
+ bool use_default = false;
int ret;
ret = mt76_eeprom_init(&dev->mt76, MT7996_EEPROM_SIZE);
if (ret < 0)
return ret;
- if (ret) {
+ if (ret && !mt7996_check_eeprom(dev)) {
dev->flash_mode = true;
- } else {
- u8 free_block_num;
- u32 block_num, i;
+ goto out;
+ }
+
+ if (!dev->flash_mode) {
u32 eeprom_blk_size = MT7996_EEPROM_BLOCK_SIZE;
+ u32 block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, eeprom_blk_size);
+ u8 free_block_num;
+ int i;
+ memset(dev->mt76.eeprom.data, 0, MT7996_EEPROM_SIZE);
ret = mt7996_mcu_get_eeprom_free_block(dev, &free_block_num);
if (ret < 0)
return ret;
/* efuse info isn't enough */
- if (free_block_num >= 59)
- return -EINVAL;
-
- /* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, eeprom_blk_size);
- for (i = 0; i < block_num; i++) {
- ret = mt7996_mcu_get_eeprom(dev, i * eeprom_blk_size);
- if (ret < 0)
- return ret;
+ if (free_block_num >= 59) {
+ use_default = true;
+ goto out;
+ }
+
+ /* check if eeprom data from fw is valid */
+ if (mt7996_mcu_get_eeprom(dev, 0, NULL, 0) ||
+ mt7996_check_eeprom(dev)) {
+ use_default = true;
+ goto out;
+ }
+
+ /* read eeprom data from fw */
+ for (i = 1; i < block_num; i++) {
+ u32 len = eeprom_blk_size;
+
+ if (i == block_num - 1)
+ len = MT7996_EEPROM_SIZE % eeprom_blk_size;
+ ret = mt7996_mcu_get_eeprom(dev, i * eeprom_blk_size,
+ NULL, len);
+ if (ret && ret != -EINVAL) {
+ use_default = true;
+ goto out;
+ }
}
}
- return mt7996_check_eeprom(dev);
+out:
+ return mt7996_eeprom_check_or_use_default(dev, use_default);
}
-static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_dev *dev)
+static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_phy *phy,
+ u8 *path, u8 *rx_path, u8 *nss)
{
#define MODE_HE_ONLY BIT(0)
#define WTBL_SIZE_GROUP GENMASK(31, 28)
+#define STREAM_CAP(_offs) ((cap & (0x7 << (_offs))) >> (_offs))
+ struct mt7996_dev *dev = phy->dev;
u32 cap = 0;
int ret;
@@ -107,13 +227,17 @@ static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_dev *dev)
return ret;
if (cap) {
+ u8 band_offs = phy->mt76->band_idx * 3;
+
dev->has_eht = !(cap & MODE_HE_ONLY);
dev->wtbl_size_group = u32_get_bits(cap, WTBL_SIZE_GROUP);
+ *nss = min_t(u8, *nss, STREAM_CAP(1 + band_offs));
+ *path = min_t(u8, *path, STREAM_CAP(10 + band_offs));
+ *rx_path = min_t(u8, *rx_path, STREAM_CAP(19 + band_offs));
}
- if (dev->wtbl_size_group < 2 || dev->wtbl_size_group > 4 ||
- is_mt7992(&dev->mt76))
- dev->wtbl_size_group = 2; /* set default */
+ if (dev->wtbl_size_group < 2 || dev->wtbl_size_group > 4)
+ dev->wtbl_size_group = is_mt7996(&dev->mt76) ? 4 : 2;
return 0;
}
@@ -163,32 +287,10 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
int max_path = 5, max_nss = 4;
int ret;
- switch (band_idx) {
- case MT_BAND1:
- path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND1,
- eeprom[MT_EE_WIFI_CONF + 2]);
- rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND1,
- eeprom[MT_EE_WIFI_CONF + 3]);
- nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND1,
- eeprom[MT_EE_WIFI_CONF + 5]);
- break;
- case MT_BAND2:
- path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND2,
- eeprom[MT_EE_WIFI_CONF + 2]);
- rx_path = FIELD_GET(MT_EE_WIFI_CONF4_RX_PATH_BAND2,
- eeprom[MT_EE_WIFI_CONF + 4]);
- nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND2,
- eeprom[MT_EE_WIFI_CONF + 5]);
- break;
- default:
- path = FIELD_GET(MT_EE_WIFI_CONF1_TX_PATH_BAND0,
- eeprom[MT_EE_WIFI_CONF + 1]);
- rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND0,
- eeprom[MT_EE_WIFI_CONF + 3]);
- nss = FIELD_GET(MT_EE_WIFI_CONF4_STREAM_NUM_BAND0,
- eeprom[MT_EE_WIFI_CONF + 4]);
- break;
- }
+ mt7996_eeprom_parse_stream(eeprom, band_idx, &path, &rx_path, &nss);
+ ret = mt7996_eeprom_parse_efuse_hw_cap(phy, &path, &rx_path, &nss);
+ if (ret)
+ return ret;
if (!path || path > max_path)
path = max_path;
@@ -203,15 +305,12 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
mphy->antenna_mask = BIT(nss) - 1;
mphy->chainmask = (BIT(path) - 1) << dev->chainshift[band_idx];
+ phy->orig_chainmask = mphy->chainmask;
dev->chainmask |= mphy->chainmask;
if (band_idx < MT_BAND2)
dev->chainshift[band_idx + 1] = dev->chainshift[band_idx] +
hweight16(mphy->chainmask);
- ret = mt7996_eeprom_parse_efuse_hw_cap(dev);
- if (ret)
- return ret;
-
return mt7996_eeprom_parse_band_config(phy);
}
@@ -220,15 +319,8 @@ int mt7996_eeprom_init(struct mt7996_dev *dev)
int ret;
ret = mt7996_eeprom_load(dev);
- if (ret < 0) {
- if (ret != -EINVAL)
- return ret;
-
- dev_warn(dev->mt76.dev, "eeprom load fail, use default bin\n");
- ret = mt7996_eeprom_load_default(dev);
- if (ret)
- return ret;
- }
+ if (ret < 0)
+ return ret;
ret = mt7996_eeprom_parse_hw_cap(dev, &dev->phy);
if (ret < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
index 412d6e2f8014..7a771ca2434c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
@@ -40,6 +40,8 @@ enum mt7996_eeprom_field {
#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND1 GENMASK(2, 0)
#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND2 GENMASK(5, 3)
+#define MT_EE_WIFI_PA_LNA_CONFIG GENMASK(1, 0)
+
#define MT_EE_RATE_DELTA_MASK GENMASK(5, 0)
#define MT_EE_RATE_DELTA_SIGN BIT(6)
#define MT_EE_RATE_DELTA_EN BIT(7)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 5e96973226bb..6b660424aedc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -14,11 +14,30 @@
#include "coredump.h"
#include "eeprom.h"
+static const struct ieee80211_iface_limit if_limits_global = {
+ .max = MT7996_MAX_INTERFACES * MT7996_MAX_RADIOS,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_ADHOC)
+ | BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+};
+
+static const struct ieee80211_iface_combination if_comb_global = {
+ .limits = &if_limits_global,
+ .n_limits = 1,
+ .max_interfaces = MT7996_MAX_INTERFACES * MT7996_MAX_RADIOS,
+ .num_different_channels = MT7996_MAX_RADIOS,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+};
+
static const struct ieee80211_iface_limit if_limits[] = {
{
- .max = 1,
- .types = BIT(NL80211_IFTYPE_ADHOC)
- }, {
.max = 16,
.types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
@@ -30,20 +49,18 @@ static const struct ieee80211_iface_limit if_limits[] = {
}
};
-static const struct ieee80211_iface_combination if_comb[] = {
- {
- .limits = if_limits,
- .n_limits = ARRAY_SIZE(if_limits),
- .max_interfaces = MT7996_MAX_INTERFACES,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
- .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160),
- .beacon_int_min_gcd = 100,
- }
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = MT7996_MAX_INTERFACES,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ .beacon_int_min_gcd = 100,
};
static ssize_t mt7996_thermal_temp_show(struct device *dev,
@@ -85,7 +102,7 @@ static ssize_t mt7996_thermal_temp_store(struct device *dev,
return ret;
mutex_lock(&phy->dev->mt76.mutex);
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 40, 130);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 40 * 1000, 130 * 1000), 1000);
/* add a safety margin ~10 */
if ((i - 1 == MT7996_CRIT_TEMP_IDX &&
@@ -180,28 +197,32 @@ static const struct thermal_cooling_device_ops mt7996_thermal_ops = {
static void mt7996_unregister_thermal(struct mt7996_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ char name[sizeof("cooling_deviceXXX")];
if (!phy->cdev)
return;
- sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
+ snprintf(name, sizeof(name), "cooling_device%d", phy->mt76->band_idx);
+ sysfs_remove_link(&wiphy->dev.kobj, name);
thermal_cooling_device_unregister(phy->cdev);
}
static int mt7996_thermal_init(struct mt7996_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ char cname[sizeof("cooling_deviceXXX")];
struct thermal_cooling_device *cdev;
struct device *hwmon;
const char *name;
- name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7996_%s",
- wiphy_name(wiphy));
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7996_%s.%d",
+ wiphy_name(wiphy), phy->mt76->band_idx);
+ snprintf(cname, sizeof(cname), "cooling_device%d", phy->mt76->band_idx);
cdev = thermal_cooling_device_register(name, phy, &mt7996_thermal_ops);
if (!IS_ERR(cdev)) {
if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj,
- "cooling_device") < 0)
+ cname) < 0)
thermal_cooling_device_unregister(cdev);
else
phy->cdev = cdev;
@@ -333,28 +354,88 @@ mt7996_regd_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_phy *phy;
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
- if (dev->mt76.region == NL80211_DFS_UNSET)
- mt7996_mcu_rdd_background_enable(phy, NULL);
+ mt7996_for_each_phy(dev, phy) {
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ mt7996_mcu_rdd_background_enable(phy, NULL);
- mt7996_init_txpower(phy);
+ mt7996_init_txpower(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ mt7996_dfs_init_radar_detector(phy);
+ }
+}
+
+static void
+mt7996_init_wiphy_band(struct ieee80211_hw *hw, struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct wiphy *wiphy = hw->wiphy;
+ int n_radios = hw->wiphy->n_radio;
+ struct wiphy_radio_freq_range *freq = &dev->radio_freqs[n_radios];
+ struct wiphy_radio *radio = &dev->radios[n_radios];
+
+ phy->slottime = 9;
+ phy->beacon_rate = -1;
- phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
- mt7996_dfs_init_radar_detector(phy);
+ if (phy->mt76->cap.has_2ghz) {
+ phy->mt76->sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ phy->mt76->sband_2g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_2;
+ freq->start_freq = 2400000;
+ freq->end_freq = 2500000;
+ } else if (phy->mt76->cap.has_5ghz) {
+ phy->mt76->sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ phy->mt76->sband_5g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_1;
+
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ freq->start_freq = 5000000;
+ freq->end_freq = 5900000;
+ } else if (phy->mt76->cap.has_6ghz) {
+ freq->start_freq = 5900000;
+ freq->end_freq = 7200000;
+ } else {
+ return;
+ }
+
+ dev->radio_phy[n_radios] = phy;
+ radio->freq_range = freq;
+ radio->n_freq_range = 1;
+ radio->iface_combinations = &if_comb;
+ radio->n_iface_combinations = 1;
+ hw->wiphy->n_radio++;
+
+ wiphy->available_antennas_rx |= phy->mt76->chainmask;
+ wiphy->available_antennas_tx |= phy->mt76->chainmask;
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7996_set_stream_vht_txbf_caps(phy);
+ mt7996_set_stream_he_eht_caps(phy);
+ mt7996_init_txpower(phy);
}
static void
mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt76_dev *mdev = &phy->dev->mt76;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt76_dev *mdev = &dev->mt76;
struct wiphy *wiphy = hw->wiphy;
- u16 max_subframes = phy->dev->has_eht ? IEEE80211_MAX_AMPDU_BUF_EHT :
- IEEE80211_MAX_AMPDU_BUF_HE;
+ u16 max_subframes = dev->has_eht ? IEEE80211_MAX_AMPDU_BUF_EHT :
+ IEEE80211_MAX_AMPDU_BUF_HE;
hw->queues = 4;
hw->max_rx_aggregation_subframes = max_subframes;
@@ -366,14 +447,15 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
hw->radiotap_timestamp.units_pos =
IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
- phy->slottime = 9;
- phy->beacon_rate = -1;
-
hw->sta_data_size = sizeof(struct mt7996_sta);
hw->vif_data_size = sizeof(struct mt7996_vif);
+ hw->chanctx_data_size = sizeof(struct mt76_chanctx);
+
+ wiphy->iface_combinations = &if_comb_global;
+ wiphy->n_iface_combinations = 1;
+
+ wiphy->radio = dev->radios;
- wiphy->iface_combinations = if_comb;
- wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7996_regd_notifier;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->mbssid_max_interfaces = 16;
@@ -390,57 +472,31 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
- if (!mdev->dev->of_node ||
- !of_property_read_bool(mdev->dev->of_node,
- "mediatek,disable-radar-background"))
+ if (mt7996_has_background_radar(dev) &&
+ (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background")))
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_RADAR_BACKGROUND);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
- ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, NO_VIRTUAL_MONITOR);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
hw->max_tx_fragments = 4;
- if (phy->mt76->cap.has_2ghz) {
- phy->mt76->sband_2g.sband.ht_cap.cap |=
- IEEE80211_HT_CAP_LDPC_CODING |
- IEEE80211_HT_CAP_MAX_AMSDU;
- phy->mt76->sband_2g.sband.ht_cap.ampdu_density =
- IEEE80211_HT_MPDU_DENSITY_2;
- }
-
- if (phy->mt76->cap.has_5ghz) {
- phy->mt76->sband_5g.sband.ht_cap.cap |=
- IEEE80211_HT_CAP_LDPC_CODING |
- IEEE80211_HT_CAP_MAX_AMSDU;
-
- phy->mt76->sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
- phy->mt76->sband_5g.sband.ht_cap.ampdu_density =
- IEEE80211_HT_MPDU_DENSITY_1;
-
- ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
- }
-
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- phy->mt76->leds.cdev.brightness_set = mt7996_led_set_brightness;
- phy->mt76->leds.cdev.blink_set = mt7996_led_set_blink;
+ dev->mphy.leds.cdev.brightness_set = mt7996_led_set_brightness;
+ dev->mphy.leds.cdev.blink_set = mt7996_led_set_blink;
}
- mt76_set_stream_caps(phy->mt76, true);
- mt7996_set_stream_vht_txbf_caps(phy);
- mt7996_set_stream_he_eht_caps(phy);
- mt7996_init_txpower(phy);
+ wiphy->max_scan_ssids = 4;
+ wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
- wiphy->available_antennas_rx = phy->mt76->antenna_mask;
- wiphy->available_antennas_tx = phy->mt76->antenna_mask;
+ mt7996_init_wiphy_band(hw, &dev->phy);
}
static void
@@ -459,6 +515,10 @@ mt7996_mac_init_band(struct mt7996_dev *dev, u8 band)
mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME4(band),
MT_WF_RMAC_MIB_QOS23_BACKOFF);
+ /* clear backoff time for Tx duration */
+ mt76_clear(dev, MT_WTBLOFF_ACR(band),
+ MT_WTBLOFF_ADM_BACKOFFTIME);
+
/* clear backoff time and set software compensation for OBSS time */
mask = MT_WF_RMAC_MIB_OBSS_BACKOFF | MT_WF_RMAC_MIB_ED_OFFSET;
set = FIELD_PREP(MT_WF_RMAC_MIB_OBSS_BACKOFF, 0) |
@@ -557,18 +617,15 @@ int mt7996_txbf_init(struct mt7996_dev *dev)
return mt7996_mcu_set_txbf(dev, BF_HW_EN_UPDATE);
}
-static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
- enum mt76_band_id band)
+static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
{
+ struct mt7996_phy *phy;
struct mt76_phy *mphy;
u32 mac_ofs, hif1_ofs = 0;
int ret;
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
- if (!mt7996_band_valid(dev, band) || band == MT_BAND0)
- return 0;
-
- if (phy)
+ if (!mt7996_band_valid(dev, band))
return 0;
if (is_mt7996(&dev->mt76) && band == MT_BAND2 && dev->hif2) {
@@ -576,7 +633,7 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
wed = &dev->mt76.mmio.wed_hif2;
}
- mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7996_ops, band);
+ mphy = mt76_alloc_radio_phy(&dev->mt76, sizeof(*phy), band);
if (!mphy)
return -ENOMEM;
@@ -607,7 +664,7 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
mt76_eeprom_override(mphy);
/* init wiphy according to mphy and phy */
- mt7996_init_wiphy(mphy->hw, wed);
+ mt7996_init_wiphy_band(mphy->hw, phy);
ret = mt7996_init_tx_queues(mphy->priv,
MT_TXQ_ID(band),
MT7996_TX_RING_SIZE,
@@ -621,14 +678,6 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
if (ret)
goto error;
- ret = mt7996_thermal_init(phy);
- if (ret)
- goto error;
-
- ret = mt7996_init_debugfs(phy);
- if (ret)
- goto error;
-
if (wed == &dev->mt76.mmio.wed_hif2 && mtk_wed_device_active(wed)) {
u32 irq_mask = dev->mt76.mmio.irqmask | MT_INT_TX_DONE_BAND2;
@@ -640,24 +689,14 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
error:
mphy->dev->phys[band] = NULL;
- ieee80211_free_hw(mphy->hw);
return ret;
}
static void
-mt7996_unregister_phy(struct mt7996_phy *phy, enum mt76_band_id band)
+mt7996_unregister_phy(struct mt7996_phy *phy)
{
- struct mt76_phy *mphy;
-
- if (!phy)
- return;
-
- mt7996_unregister_thermal(phy);
-
- mphy = phy->dev->mt76.phys[band];
- mt76_unregister_phy(mphy);
- ieee80211_free_hw(mphy->hw);
- phy->dev->mt76.phys[band] = NULL;
+ if (phy)
+ mt7996_unregister_thermal(phy);
}
static void mt7996_init_work(struct work_struct *work)
@@ -884,6 +923,76 @@ out:
#endif
}
+static int mt7996_variant_type_init(struct mt7996_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_PAD_GPIO);
+ u8 var_type;
+
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7990:
+ if (val & MT_PAD_GPIO_2ADIE_TBTC)
+ var_type = MT7996_VAR_TYPE_233;
+ else
+ var_type = MT7996_VAR_TYPE_444;
+ break;
+ case 0x7992:
+ if (val & MT_PAD_GPIO_ADIE_SINGLE)
+ var_type = MT7992_VAR_TYPE_23;
+ else if (u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB_7992))
+ var_type = MT7992_VAR_TYPE_44;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev->var.type = var_type;
+ return 0;
+}
+
+static int mt7996_variant_fem_init(struct mt7996_dev *dev)
+{
+#define MT7976C_EFUSE_OFFSET 0x470
+ u8 buf[MT7996_EEPROM_BLOCK_SIZE], idx, adie_idx, adie_comb;
+ u32 regval, val = mt76_rr(dev, MT_PAD_GPIO);
+ u16 adie_id, adie_ver;
+ bool is_7976c;
+ int ret;
+
+ if (is_mt7992(&dev->mt76)) {
+ adie_idx = (val & MT_PAD_GPIO_ADIE_SINGLE) ? 0 : 1;
+ adie_comb = u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB_7992);
+ } else {
+ adie_idx = 0;
+ adie_comb = u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB);
+ }
+
+ ret = mt7996_mcu_rf_regval(dev, MT_ADIE_CHIP_ID(adie_idx), &regval, false);
+ if (ret)
+ return ret;
+
+ ret = mt7996_mcu_get_eeprom(dev, MT7976C_EFUSE_OFFSET, buf, sizeof(buf));
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ adie_ver = u32_get_bits(regval, MT_ADIE_VERSION_MASK);
+ idx = MT7976C_EFUSE_OFFSET % MT7996_EEPROM_BLOCK_SIZE;
+ is_7976c = adie_ver == 0x8a10 || adie_ver == 0x8b00 ||
+ adie_ver == 0x8c10 || buf[idx] == 0xc;
+
+ adie_id = u32_get_bits(regval, MT_ADIE_CHIP_ID_MASK);
+ if (adie_id == 0x7975 || adie_id == 0x7979 ||
+ (adie_id == 0x7976 && is_7976c))
+ dev->var.fem = MT7996_FEM_INT;
+ else if (adie_id == 0x7977 && adie_comb == 1)
+ dev->var.fem = MT7996_FEM_MIX;
+ else
+ dev->var.fem = MT7996_FEM_EXT;
+
+ return 0;
+}
+
static int mt7996_init_hardware(struct mt7996_dev *dev)
{
int ret, idx;
@@ -899,6 +1008,10 @@ static int mt7996_init_hardware(struct mt7996_dev *dev)
INIT_LIST_HEAD(&dev->wed_rro.poll_list);
spin_lock_init(&dev->wed_rro.lock);
+ ret = mt7996_variant_type_init(dev);
+ if (ret)
+ return ret;
+
ret = mt7996_dma_init(dev);
if (ret)
return ret;
@@ -913,6 +1026,10 @@ static int mt7996_init_hardware(struct mt7996_dev *dev)
if (ret)
return ret;
+ ret = mt7996_variant_fem_init(dev);
+ if (ret)
+ return ret;
+
ret = mt7996_eeprom_init(dev);
if (ret < 0)
return ret;
@@ -963,10 +1080,12 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
static void
mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
- struct ieee80211_sta_he_cap *he_cap, int vif)
+ struct ieee80211_sta_he_cap *he_cap, int vif,
+ enum nl80211_band band)
{
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
int sts = hweight16(phy->mt76->chainmask);
+ bool non_2g = band != NL80211_BAND_2GHZ;
u8 c;
#ifdef CONFIG_MAC80211_MESH
@@ -996,10 +1115,10 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
if (is_mt7996(phy->mt76->dev))
c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 * non_2g);
else
c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5;
+ (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 * non_2g);
elem->phy_cap_info[4] |= c;
@@ -1025,8 +1144,9 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
- FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
- sts - 1);
+ (FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ sts - 1) * non_2g);
+
elem->phy_cap_info[5] |= c;
if (vif != NL80211_IFTYPE_AP)
@@ -1038,8 +1158,10 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
- c = IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
- IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ c = 0;
+ if (non_2g)
+ c |= IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
elem->phy_cap_info[7] |= c;
}
@@ -1080,6 +1202,9 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
he_cap_elem->phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+ he_cap_elem->phy_cap_info[7] =
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+
switch (iftype) {
case NL80211_IFTYPE_AP:
he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_RES;
@@ -1119,8 +1244,7 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
he_cap_elem->phy_cap_info[7] |=
- IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
@@ -1143,12 +1267,12 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
- mt7996_set_stream_he_txbf_caps(phy, he_cap, iftype);
+ mt7996_set_stream_he_txbf_caps(phy, he_cap, iftype, band);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss, band);
} else {
he_cap_elem->phy_cap_info[9] |=
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
@@ -1190,7 +1314,9 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
eht_cap_elem->mac_cap_info[0] =
IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
- IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
+ IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
eht_cap_elem->phy_cap_info[0] =
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
@@ -1205,13 +1331,20 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
eht_cap_elem->phy_cap_info[1] =
u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
- u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK);
eht_cap_elem->phy_cap_info[2] =
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK);
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK);
+
+ if (band != NL80211_BAND_2GHZ) {
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(val,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK);
+ }
if (band == NL80211_BAND_6GHZ) {
eht_cap_elem->phy_cap_info[0] |=
@@ -1233,21 +1366,20 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
eht_cap_elem->phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
u8_encode_bits(min_t(int, sts - 1, 2),
IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
eht_cap_elem->phy_cap_info[5] =
u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
- u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
+ u8_encode_bits(u8_get_bits(1, GENMASK(1, 0)),
IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK);
val = width == NL80211_CHAN_WIDTH_320 ? 0xf :
width == NL80211_CHAN_WIDTH_160 ? 0x7 :
width == NL80211_CHAN_WIDTH_80 ? 0x3 : 0x1;
eht_cap_elem->phy_cap_info[6] =
- u8_encode_bits(u8_get_bits(0x11, GENMASK(4, 2)),
- IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
@@ -1273,8 +1405,13 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
eht_cap_elem->phy_cap_info[7] =
IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ;
+
+ if (band == NL80211_BAND_2GHZ)
+ return;
+
+ eht_cap_elem->phy_cap_info[7] |=
IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ;
if (band != NL80211_BAND_6GHZ)
@@ -1333,6 +1470,7 @@ void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy)
int mt7996_register_device(struct mt7996_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7996_phy *phy;
int ret;
dev->phy.dev = dev;
@@ -1354,28 +1492,27 @@ int mt7996_register_device(struct mt7996_dev *dev)
mt7996_init_wiphy(hw, &dev->mt76.mmio.wed);
- ret = mt76_register_device(&dev->mt76, true, mt76_rates,
- ARRAY_SIZE(mt76_rates));
+ ret = mt7996_register_phy(dev, MT_BAND1);
if (ret)
return ret;
- ret = mt7996_thermal_init(&dev->phy);
+ ret = mt7996_register_phy(dev, MT_BAND2);
if (ret)
return ret;
- ret = mt7996_register_phy(dev, mt7996_phy2(dev), MT_BAND1);
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
if (ret)
return ret;
- ret = mt7996_register_phy(dev, mt7996_phy3(dev), MT_BAND2);
- if (ret)
- return ret;
+ mt7996_for_each_phy(dev, phy)
+ mt7996_thermal_init(phy);
ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
dev->recovery.hw_init_done = true;
- ret = mt7996_init_debugfs(&dev->phy);
+ ret = mt7996_init_debugfs(dev);
if (ret)
goto error;
@@ -1394,8 +1531,8 @@ error:
void mt7996_unregister_device(struct mt7996_dev *dev)
{
cancel_work_sync(&dev->wed_rro.work);
- mt7996_unregister_phy(mt7996_phy3(dev), MT_BAND2);
- mt7996_unregister_phy(mt7996_phy2(dev), MT_BAND1);
+ mt7996_unregister_phy(mt7996_phy3(dev));
+ mt7996_unregister_phy(mt7996_phy2(dev));
mt7996_unregister_thermal(&dev->phy);
mt7996_coredump_unregister(dev);
mt76_unregister_device(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 0d21414e2c88..bc8cba4dca47 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -72,7 +72,7 @@ static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
if (!sta->vif)
return NULL;
- return &sta->vif->sta.wcid;
+ return &sta->vif->deflink.sta.wcid;
}
bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
@@ -182,7 +182,7 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
rssi[3] = to_rssi(GENMASK(31, 14), val);
msta->ack_signal =
- mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
+ mt76_rx_signal(msta->vif->deflink.phy->mt76->antenna_mask, rssi);
ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
}
@@ -196,7 +196,7 @@ void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
u32 addr;
- addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
+ addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->deflink.sta.wcid.idx, 5);
if (enable)
mt76_set(dev, addr, BIT(5));
else
@@ -478,11 +478,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
if (status->wcid) {
msta = container_of(status->wcid, struct mt7996_sta, wcid);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
}
status->freq = mphy->chandef.chan->center_freq;
@@ -679,14 +675,25 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
*qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
+ skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
} else {
status->flag |= RX_FLAG_8023;
mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
*info);
}
- if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
- mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
+ if (rxv && !(status->flag & RX_FLAG_8023)) {
+ switch (status->encoding) {
+ case RX_ENC_EHT:
+ mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
+ break;
+ case RX_ENC_HE:
+ mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
+ break;
+ default:
+ break;
+ }
+ }
if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
return 0;
@@ -819,12 +826,13 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- struct mt76_vif *mvif;
+ struct mt76_vif_link *mvif;
u16 tx_count = 15;
u32 val;
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -832,7 +840,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
- mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
+ mvif = vif ? (struct mt76_vif_link *)vif->drv_priv : NULL;
if (mvif) {
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -886,8 +894,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
if (is_mt7996(&dev->mt76))
val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
- else
+ else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
+
txwi[6] = cpu_to_le32(val);
txwi[7] = 0;
@@ -897,7 +906,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool mcast = ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1);
u8 idx = MT7996_BASIC_RATES_TBL;
@@ -977,7 +985,7 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (vif) {
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- txp->fw.bss_idx = mvif->mt76.idx;
+ txp->fw.bss_idx = mvif->deflink.mt76.idx;
}
txp->fw.token = cpu_to_le16(id);
@@ -1138,11 +1146,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
continue;
msta = container_of(wcid, struct mt7996_sta, wcid);
- spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &mdev->sta_poll_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
continue;
} else if (info & MT_TXFREE_INFO_HEADER) {
u32 tx_retries = 0, tx_failed = 0;
@@ -1368,10 +1372,7 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
if (!wcid->sta)
goto out;
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
out:
rcu_read_unlock();
@@ -1593,7 +1594,7 @@ mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
- mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
+ mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
break;
default:
break;
@@ -1738,19 +1739,19 @@ mt7996_mac_restart(struct mt7996_dev *dev)
ret = mt7996_txbf_init(dev);
if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
- ret = mt7996_run(dev->mphy.hw);
+ ret = mt7996_run(&dev->phy);
if (ret)
goto out;
}
if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
- ret = mt7996_run(phy2->mt76->hw);
+ ret = mt7996_run(phy2);
if (ret)
goto out;
}
if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
- ret = mt7996_run(phy3->mt76->hw);
+ ret = mt7996_run(phy3);
if (ret)
goto out;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 2b34ae5e0cb5..69dd565d8319 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -7,76 +7,44 @@
#include "mcu.h"
#include "mac.h"
-static bool mt7996_dev_running(struct mt7996_dev *dev)
+int mt7996_run(struct mt7996_phy *phy)
{
- struct mt7996_phy *phy;
-
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
- return true;
-
- phy = mt7996_phy2(dev);
- if (phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
- return true;
-
- phy = mt7996_phy3(dev);
-
- return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
-}
-
-int mt7996_run(struct ieee80211_hw *hw)
-{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- bool running;
+ struct mt7996_dev *dev = phy->dev;
int ret;
- running = mt7996_dev_running(dev);
- if (!running) {
- ret = mt7996_mcu_set_hdr_trans(dev, true);
- if (ret)
- goto out;
-
- if (is_mt7992(&dev->mt76)) {
- u8 queue = mt76_connac_lmac_mapping(IEEE80211_AC_VI);
-
- ret = mt7996_mcu_cp_support(dev, queue);
- if (ret)
- goto out;
- }
- }
-
mt7996_mac_enable_nf(dev, phy->mt76->band_idx);
ret = mt7996_mcu_set_rts_thresh(phy, 0x92b);
if (ret)
- goto out;
+ return ret;
ret = mt7996_mcu_set_radio_en(phy, true);
if (ret)
- goto out;
+ return ret;
ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH);
if (ret)
- goto out;
+ return ret;
ret = mt7996_mcu_set_thermal_throttling(phy, MT7996_THERMAL_THROTTLE_MAX);
if (ret)
- goto out;
+ return ret;
ret = mt7996_mcu_set_thermal_protect(phy, true);
if (ret)
- goto out;
+ return ret;
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
- ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(dev->mphy.hw, &phy->mt76->mac_work,
MT7996_WATCHDOG_TIME);
- if (!running)
+ if (!phy->counter_reset) {
mt7996_mac_reset_counters(phy);
+ phy->counter_reset = true;
+ }
-out:
- return ret;
+ return 0;
}
static int mt7996_start(struct ieee80211_hw *hw)
@@ -87,16 +55,23 @@ static int mt7996_start(struct ieee80211_hw *hw)
flush_work(&dev->init_work);
mutex_lock(&dev->mt76.mutex);
- ret = mt7996_run(hw);
+ ret = mt7996_mcu_set_hdr_trans(dev, true);
+ if (!ret && is_mt7992(&dev->mt76)) {
+ u8 queue = mt76_connac_lmac_mapping(IEEE80211_AC_VI);
+
+ ret = mt7996_mcu_cp_support(dev, queue);
+ }
mutex_unlock(&dev->mt76.mutex);
return ret;
}
-static void mt7996_stop(struct ieee80211_hw *hw, bool suspend)
+static void mt7996_stop_phy(struct mt7996_phy *phy)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = phy->dev;
+
+ if (!phy || !test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
cancel_delayed_work_sync(&phy->mt76->mac_work);
@@ -109,6 +84,10 @@ static void mt7996_stop(struct ieee80211_hw *hw, bool suspend)
mutex_unlock(&dev->mt76.mutex);
}
+static void mt7996_stop(struct ieee80211_hw *hw, bool suspend)
+{
+}
+
static inline int get_free_idx(u32 mask, u8 start, u8 end)
{
return ffs(~mask & GENMASK(end, start));
@@ -157,73 +136,133 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
return -1;
}
-static void mt7996_init_bitrate_mask(struct ieee80211_vif *vif)
+static void
+mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlink)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
int i;
- for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
- mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
- mvif->bitrate_mask.control[i].he_gi = 0xff;
- mvif->bitrate_mask.control[i].he_ltf = 0xff;
- mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
- memset(mvif->bitrate_mask.control[i].ht_mcs, 0xff,
- sizeof(mvif->bitrate_mask.control[i].ht_mcs));
- memset(mvif->bitrate_mask.control[i].vht_mcs, 0xff,
- sizeof(mvif->bitrate_mask.control[i].vht_mcs));
- memset(mvif->bitrate_mask.control[i].he_mcs, 0xff,
- sizeof(mvif->bitrate_mask.control[i].he_mcs));
+ for (i = 0; i < ARRAY_SIZE(mlink->bitrate_mask.control); i++) {
+ mlink->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
+ mlink->bitrate_mask.control[i].he_gi = 0xff;
+ mlink->bitrate_mask.control[i].he_ltf = 0xff;
+ mlink->bitrate_mask.control[i].legacy = GENMASK(31, 0);
+ memset(mlink->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(mlink->bitrate_mask.control[i].ht_mcs));
+ memset(mlink->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(mlink->bitrate_mask.control[i].vht_mcs));
+ memset(mlink->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(mlink->bitrate_mask.control[i].he_mcs));
}
}
-static int mt7996_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int
+mt7996_set_hw_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct mt7996_vif_link *mlink, struct ieee80211_key_conf *key)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt76_txq *mtxq;
- u8 band_idx = phy->mt76->band_idx;
- int idx, ret = 0;
+ struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv :
+ &mlink->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ u8 *wcid_keyidx = &wcid->hw_key_idx;
+ struct mt7996_phy *phy;
+ int idx = key->keyidx;
- mutex_lock(&dev->mt76.mutex);
+ phy = mt7996_vif_link_phy(mlink);
+ if (!phy)
+ return -EINVAL;
- if (vif->type == NL80211_IFTYPE_MONITOR &&
- is_zero_ether_addr(vif->addr))
- phy->monitor_vif = vif;
+ if (sta && !wcid->sta)
+ return -EOPNOTSUPP;
- mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mvif->mt76.idx >= mt7996_max_interface_num(dev)) {
- ret = -ENOSPC;
- goto out;
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ }
+ break;
+ default:
+ break;
}
- idx = get_omac_idx(vif->type, phy->omac_mask);
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
+ if (cmd == SET_KEY && !sta && !mlink->mt76.cipher) {
+ mlink->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7996_mcu_add_bss_info(phy, vif, &vif->bss_conf, &mlink->mt76, true);
+ }
+
+ if (cmd == SET_KEY) {
+ *wcid_keyidx = idx;
+ } else {
+ if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
+ return 0;
}
- mvif->mt76.omac_idx = idx;
- mvif->phy = phy;
- mvif->mt76.band_idx = band_idx;
- mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
- ret = mt7996_mcu_add_dev_info(phy, vif, true);
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (key->keyidx == 6 || key->keyidx == 7)
+ return mt7996_mcu_bcn_prot_enable(dev, vif, key);
+
+ return mt7996_mcu_add_key(&dev->mt76, vif, key,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
+}
+
+static void
+mt7996_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct mt7996_vif_link *mlink = data;
+
+ if (sta)
+ return;
+
+ WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, mlink, key));
+}
+
+int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink)
+{
+ struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76);
+ struct mt7996_phy *phy = mphy->priv;
+ struct mt7996_dev *dev = phy->dev;
+ u8 band_idx = phy->mt76->band_idx;
+ struct mt76_txq *mtxq;
+ int idx, ret;
+
+ mlink->idx = __ffs64(~dev->mt76.vif_mask);
+ if (mlink->idx >= mt7996_max_interface_num(dev))
+ return -ENOSPC;
+
+ idx = get_omac_idx(vif->type, phy->omac_mask);
+ if (idx < 0)
+ return -ENOSPC;
+
+ link->phy = phy;
+ mlink->omac_idx = idx;
+ mlink->band_idx = band_idx;
+ mlink->wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
+ mlink->wcid = &link->sta.wcid;
+
+ ret = mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, true);
if (ret)
- goto out;
+ return ret;
- dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
- phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+ dev->mt76.vif_mask |= BIT_ULL(mlink->idx);
+ phy->omac_mask |= BIT_ULL(mlink->omac_idx);
- idx = MT7996_WTBL_RESERVED - mvif->mt76.idx;
+ idx = MT7996_WTBL_RESERVED - mlink->idx;
- INIT_LIST_HEAD(&mvif->sta.rc_list);
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
- mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = band_idx;
- mvif->sta.wcid.hw_key_idx = -1;
- mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mvif->sta.wcid);
+ INIT_LIST_HEAD(&link->sta.rc_list);
+ link->sta.wcid.idx = idx;
+ link->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_wcid_init(&link->sta.wcid, band_idx);
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -234,54 +273,50 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
}
if (vif->type != NL80211_IFTYPE_AP &&
- (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3))
+ (!mlink->omac_idx || mlink->omac_idx > 3))
vif->offload_flags = 0;
- vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
if (phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
- mvif->mt76.basic_rates_idx = MT7996_BASIC_RATES_TBL + 4;
+ mlink->basic_rates_idx = MT7996_BASIC_RATES_TBL + 4;
else
- mvif->mt76.basic_rates_idx = MT7996_BASIC_RATES_TBL;
+ mlink->basic_rates_idx = MT7996_BASIC_RATES_TBL;
- mt7996_init_bitrate_mask(vif);
+ mt7996_init_bitrate_mask(vif, link);
- mt7996_mcu_add_bss_info(phy, vif, true);
+ mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, true);
/* defer the first STA_REC of BMC entry to BSS_CHANGED_BSSID for STA
* interface, since firmware only records BSSID when the entry is new
*/
if (vif->type != NL80211_IFTYPE_STATION)
- mt7996_mcu_add_sta(dev, vif, NULL, true, true);
- rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_PORT_SECURE, true);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &link->sta.wcid);
-out:
- mutex_unlock(&dev->mt76.mutex);
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, link);
- return ret;
+ return 0;
}
-static void mt7996_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta = &mvif->sta;
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- int idx = msta->wcid.idx;
-
- mt7996_mcu_add_sta(dev, vif, NULL, false, false);
- mt7996_mcu_add_bss_info(phy, vif, false);
+ struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76);
+ struct mt7996_phy *phy = mphy->priv;
+ struct mt7996_dev *dev = phy->dev;
+ struct mt7996_sta *msta;
+ int idx;
- if (vif == phy->monitor_vif)
- phy->monitor_vif = NULL;
+ msta = &link->sta;
+ idx = msta->wcid.idx;
+ mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_DISCONNECT, false);
+ mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, false);
- mt7996_mcu_add_dev_info(phy, vif, false);
+ mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- mutex_lock(&dev->mt76.mutex);
- dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
- phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
- mutex_unlock(&dev->mt76.mutex);
+ dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx);
+ phy->omac_mask &= ~BIT_ULL(mlink->omac_idx);
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (!list_empty(&msta->wcid.poll_list))
@@ -291,6 +326,124 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
+static void mt7996_phy_set_rxfilter(struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
+ u32 filter = phy->rxfilter;
+
+ if (filter & MT_WF_RFCR_DROP_OTHER_UC) {
+ filter |= MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_FCSFAIL;
+ }
+
+ mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), filter);
+ if (filter & MT_WF_RFCR_DROP_CTL_RSV)
+ mt76_set(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
+ else
+ mt76_clear(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
+}
+
+static void mt7996_set_monitor(struct mt7996_phy *phy, bool enabled)
+{
+ struct mt7996_dev *dev = phy->dev;
+
+ if (!phy)
+ return;
+
+ if (enabled == !(phy->rxfilter & MT_WF_RFCR_DROP_OTHER_UC))
+ return;
+
+ if (!enabled)
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_rmw_field(dev, MT_DMA_DCR0(phy->mt76->band_idx),
+ MT_DMA_DCR0_RXD_G5_EN, enabled);
+ mt7996_phy_set_rxfilter(phy);
+ mt7996_mcu_set_sniffer_mode(phy, enabled);
+}
+
+static int mt7996_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct wireless_dev *wdev = ieee80211_vif_to_wdev(vif);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ int i, err = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ for (i = 0; i < MT7996_MAX_RADIOS; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+
+ if (!phy || !(wdev->radio_mask & BIT(i)) ||
+ test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ continue;
+
+ err = mt7996_run(phy);
+ if (err)
+ goto out;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mt7996_set_monitor(phy, true);
+ }
+
+ mt76_vif_init(vif, &mvif->mt76);
+
+ vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+
+struct mt7996_radio_data {
+ u32 active_mask;
+ u32 monitor_mask;
+};
+
+static void mt7996_remove_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct wireless_dev *wdev = ieee80211_vif_to_wdev(vif);
+ struct mt7996_radio_data *rdata = data;
+
+ rdata->active_mask |= wdev->radio_mask;
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ rdata->monitor_mask |= wdev->radio_mask;
+}
+
+static void mt7996_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_radio_data rdata = {};
+ int i;
+
+ ieee80211_iterate_active_interfaces_mtx(hw, 0, mt7996_remove_iter,
+ &rdata);
+ mt76_vif_cleanup(&dev->mt76, vif);
+
+ for (i = 0; i < MT7996_MAX_RADIOS; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+
+ if (!phy)
+ continue;
+ if (!(rdata.monitor_mask & BIT(i)))
+ mt7996_set_monitor(phy, false);
+ if (!(rdata.active_mask & BIT(i)))
+ mt7996_stop_phy(phy);
+ }
+}
+
int mt7996_set_channel(struct mt76_phy *mphy)
{
struct mt7996_phy *phy = mphy->priv;
@@ -304,6 +457,10 @@ int mt7996_set_channel(struct mt76_phy *mphy)
if (ret)
goto out;
+ ret = mt7996_mcu_set_txpower_sku(phy);
+ if (ret)
+ goto out;
+
ret = mt7996_dfs_init_radar_detector(phy);
mt7996_mac_cca_stats_reset(phy);
@@ -322,14 +479,9 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv :
- &mvif->sta;
- struct mt76_wcid *wcid = &msta->wcid;
- u8 *wcid_keyidx = &wcid->hw_key_idx;
- int idx = key->keyidx;
- int err = 0;
+ struct mt7996_vif_link *mlink = &mvif->deflink;
+ int err;
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -354,11 +506,8 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7) {
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ if (key->keyidx == 6 || key->keyidx == 7)
break;
- }
fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -366,30 +515,11 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- mutex_lock(&dev->mt76.mutex);
-
- if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
- mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
- mt7996_mcu_add_bss_info(phy, vif, true);
- }
+ if (!mt7996_vif_link_phy(mlink))
+ return 0; /* defer until after link add */
- if (cmd == SET_KEY) {
- *wcid_keyidx = idx;
- } else {
- if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- goto out;
- }
-
- mt76_wcid_key_setup(&dev->mt76, wcid, key);
-
- if (key->keyidx == 6 || key->keyidx == 7)
- err = mt7996_mcu_bcn_prot_enable(dev, vif, key);
- else
- err = mt7996_mcu_add_key(&dev->mt76, vif, key,
- MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &msta->wcid, cmd);
-out:
+ mutex_lock(&dev->mt76.mutex);
+ err = mt7996_set_hw_key(hw, cmd, vif, sta, mlink, key);
mutex_unlock(&dev->mt76.mutex);
return err;
@@ -397,40 +527,6 @@ out:
static int mt7996_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- int ret;
-
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ret = mt76_update_channel(phy->mt76);
- if (ret)
- return ret;
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_POWER |
- IEEE80211_CONF_CHANGE_CHANNEL)) {
- ret = mt7996_mcu_set_txpower_sku(phy);
- if (ret)
- return ret;
- }
-
- mutex_lock(&dev->mt76.mutex);
-
- if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
-
- if (!enabled)
- phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
- else
- phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
-
- mt76_rmw_field(dev, MT_DMA_DCR0(phy->mt76->band_idx),
- MT_DMA_DCR0_RXD_G5_EN, enabled);
- mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), phy->rxfilter);
- }
-
- mutex_unlock(&dev->mt76.mutex);
-
return 0;
}
@@ -439,7 +535,8 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_vif_link *mlink = mt7996_vif_link(dev, vif, link_id);
static const u8 mq_to_aci[] = {
[IEEE80211_AC_VO] = 3,
[IEEE80211_AC_VI] = 2,
@@ -448,7 +545,7 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
};
/* firmware uses access class index */
- mvif->queue_params[mq_to_aci[queue]] = *params;
+ mlink->queue_params[mq_to_aci[queue]] = *params;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
return 0;
@@ -460,34 +557,18 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
- MT_WF_RFCR1_DROP_BF_POLL |
- MT_WF_RFCR1_DROP_BA |
- MT_WF_RFCR1_DROP_CFEND |
- MT_WF_RFCR1_DROP_CFACK;
+ struct mt7996_phy *phy;
+ u32 filter_mask = 0, filter_set = 0;
u32 flags = 0;
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- phy->rxfilter &= ~(_hw); \
- phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ filter_mask |= (_hw); \
+ filter_set |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
mutex_lock(&dev->mt76.mutex);
- phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
- MT_WF_RFCR_DROP_OTHER_BEACON |
- MT_WF_RFCR_DROP_FRAME_REPORT |
- MT_WF_RFCR_DROP_PROBEREQ |
- MT_WF_RFCR_DROP_MCAST_FILTERED |
- MT_WF_RFCR_DROP_MCAST |
- MT_WF_RFCR_DROP_BCAST |
- MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
- MT_WF_RFCR_DROP_UNWANTED_CTL |
- MT_WF_RFCR_DROP_STBC_MULTI);
-
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
MT_WF_RFCR_DROP_A3_BSSID);
@@ -496,57 +577,42 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
MT_WF_RFCR_DROP_RTS |
- MT_WF_RFCR_DROP_CTL_RSV |
- MT_WF_RFCR_DROP_NDPA);
+ MT_WF_RFCR_DROP_CTL_RSV);
*total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), phy->rxfilter);
- if (*total_flags & FIF_CONTROL)
- mt76_clear(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
- else
- mt76_set(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
+ mt7996_for_each_phy(dev, phy) {
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI |
+ filter_mask);
+ phy->rxfilter |= filter_set;
+ mt7996_phy_set_rxfilter(phy);
+ }
mutex_unlock(&dev->mt76.mutex);
}
-static void
-mt7996_update_bss_color(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_he_bss_color *bss_color)
-{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
-
- switch (vif->type) {
- case NL80211_IFTYPE_AP: {
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
-
- if (mvif->mt76.omac_idx > HW_BSSID_MAX)
- return;
- fallthrough;
- }
- case NL80211_IFTYPE_STATION:
- mt7996_mcu_update_bss_color(dev, vif, bss_color);
- break;
- default:
- break;
- }
-}
-
static u8
-mt7996_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+mt7996_get_rates_table(struct mt7996_phy *phy, struct ieee80211_bss_conf *conf,
bool beacon, bool mcast)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
- struct mt76_phy *mphy = hw->priv;
+ struct mt7996_dev *dev = phy->dev;
+ struct mt76_vif_link *mvif = mt76_vif_conf_link(&dev->mt76, conf->vif, conf);
u16 rate;
u8 i, idx;
- rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon, mcast);
+ rate = mt76_connac2_mac_tx_rate_val(phy->mt76, conf, beacon, mcast);
if (beacon) {
- struct mt7996_phy *phy = mphy->priv;
-
/* odd index for driver, even index for firmware */
idx = MT7996_BEACON_RATES_TBL + 2 * phy->mt76->band_idx;
if (phy->beacon_rate != rate)
@@ -569,7 +635,7 @@ mt7996_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- u8 band = mvif->mt76.band_idx;
+ u8 band = mvif->deflink.mt76.band_idx;
u32 *mu;
mu = (u32 *)info->mu_group.membership;
@@ -588,20 +654,31 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *info,
u64 changed)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt76_vif_link *mvif;
+ struct mt7996_phy *phy;
+ struct mt76_phy *mphy;
mutex_lock(&dev->mt76.mutex);
+ mvif = mt76_vif_conf_link(&dev->mt76, vif, info);
+ if (!mvif)
+ goto out;
+
+ mphy = mt76_vif_link_phy(mvif);
+ if (!mphy)
+ goto out;
+
+ phy = mphy->priv;
+
/* station mode uses BSSID to map the wlan entry to a peer,
* and then peer references bss_info_rfch to set bandwidth cap.
*/
if ((changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) ||
(changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) ||
(changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) {
- mt7996_mcu_add_bss_info(phy, vif, true);
- mt7996_mcu_add_sta(dev, vif, NULL, true,
+ mt7996_mcu_add_bss_info(phy, vif, info, mvif, true);
+ mt7996_mcu_add_sta(dev, vif, mvif, NULL, CONN_STATE_PORT_SECURE,
!!(changed & BSS_CHANGED_BSSID));
}
@@ -613,34 +690,39 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
if (slottime != phy->slottime) {
phy->slottime = slottime;
- mt7996_mcu_set_timing(phy, vif);
+ mt7996_mcu_set_timing(phy, vif, info);
}
}
if (changed & BSS_CHANGED_MCAST_RATE)
mvif->mcast_rates_idx =
- mt7996_get_rates_table(hw, vif, false, true);
+ mt7996_get_rates_table(phy, info, false, true);
if (changed & BSS_CHANGED_BASIC_RATES)
mvif->basic_rates_idx =
- mt7996_get_rates_table(hw, vif, false, false);
+ mt7996_get_rates_table(phy, info, false, false);
/* ensure that enable txcmd_mode after bss_info */
if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
- mt7996_mcu_set_tx(dev, vif);
+ mt7996_mcu_set_tx(dev, vif, info);
if (changed & BSS_CHANGED_HE_OBSS_PD)
mt7996_mcu_add_obss_spr(phy, vif, &info->he_obss_pd);
- if (changed & BSS_CHANGED_HE_BSS_COLOR)
- mt7996_update_bss_color(hw, vif, &info->he_bss_color);
+ if (changed & BSS_CHANGED_HE_BSS_COLOR) {
+ if ((vif->type == NL80211_IFTYPE_AP &&
+ mvif->omac_idx <= HW_BSSID_MAX) ||
+ vif->type == NL80211_IFTYPE_STATION)
+ mt7996_mcu_update_bss_color(dev, mvif,
+ &info->he_bss_color);
+ }
if (changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED)) {
mvif->beacon_rates_idx =
- mt7996_get_rates_table(hw, vif, true, false);
+ mt7996_get_rates_table(phy, info, true, false);
- mt7996_mcu_add_beacon(hw, vif, info->enable_beacon);
+ mt7996_mcu_add_beacon(hw, vif, info);
}
if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -650,6 +732,13 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_MU_GROUPS)
mt7996_update_mu_group(hw, vif, info);
+ if (changed & BSS_CHANGED_TXPOWER &&
+ info->txpower != phy->txpower) {
+ phy->txpower = info->txpower;
+ mt7996_mcu_set_txpower_sku(phy);
+ }
+
+out:
mutex_unlock(&dev->mt76.mutex);
}
@@ -661,7 +750,7 @@ mt7996_channel_switch_beacon(struct ieee80211_hw *hw,
struct mt7996_dev *dev = mt7996_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7996_mcu_add_beacon(hw, vif, true);
+ mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
mutex_unlock(&dev->mt76.mutex);
}
@@ -671,8 +760,9 @@ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- u8 band_idx = mvif->phy->mt76->band_idx;
- int ret, idx;
+ struct mt7996_vif_link *link = &mvif->deflink;
+ u8 band_idx = link->phy->mt76->band_idx;
+ int idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
if (idx < 0)
@@ -684,18 +774,59 @@ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.sta = 1;
msta->wcid.idx = idx;
msta->wcid.phy_idx = band_idx;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
ewma_avg_signal_init(&msta->avg_ack_signal);
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mt7996_mcu_add_sta(dev, vif, &link->mt76, sta, CONN_STATE_DISCONNECT,
+ true);
- ret = mt7996_mcu_add_sta(dev, vif, sta, true, true);
- if (ret)
- return ret;
+ return 0;
+}
+
+int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_vif_link *link = &mvif->deflink;
+ int i, ret;
+
+ switch (ev) {
+ case MT76_STA_EVENT_ASSOC:
+ ret = mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
+ CONN_STATE_CONNECT, true);
+ if (ret)
+ return ret;
- return mt7996_mcu_add_rate_ctrl(dev, vif, sta, false);
+ ret = mt7996_mcu_add_rate_ctrl(dev, vif, sta, false);
+ if (ret)
+ return ret;
+
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->wcid.sta = 1;
+
+ return 0;
+
+ case MT76_STA_EVENT_AUTHORIZE:
+ return mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
+ CONN_STATE_PORT_SECURE, false);
+
+ case MT76_STA_EVENT_DISASSOC:
+ for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
+ mt7996_mac_twt_teardown_flow(dev, msta, i);
+
+ mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
+ CONN_STATE_DISCONNECT, false);
+ msta->wcid.sta_disabled = 1;
+ msta->wcid.sta = 0;
+
+ return 0;
+ }
+
+ return 0;
}
void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -703,16 +834,10 @@ void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- int i;
-
- mt7996_mcu_add_sta(dev, vif, sta, false, false);
mt7996_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
- mt7996_mac_twt_teardown_flow(dev, msta, i);
-
spin_lock_bh(&mdev->sta_poll_lock);
if (!list_empty(&msta->wcid.poll_list))
list_del_init(&msta->wcid.poll_list);
@@ -731,6 +856,22 @@ static void mt7996_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ if (vif) {
+ struct mt7996_vif *mvif;
+
+ mvif = (struct mt7996_vif *)vif->drv_priv;
+ wcid = &mvif->deflink.sta.wcid;
+
+ if (mvif->mt76.roc_phy &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) {
+ mphy = mvif->mt76.roc_phy;
+ if (mphy->roc_link)
+ wcid = mphy->roc_link->wcid;
+ } else {
+ mphy = mt76_vif_link_phy(&mvif->deflink.mt76);
+ }
+ }
+
if (control->sta) {
struct mt7996_sta *sta;
@@ -738,11 +879,9 @@ static void mt7996_tx(struct ieee80211_hw *hw,
wcid = &sta->wcid;
}
- if (vif && !control->sta) {
- struct mt7996_vif *mvif;
-
- mvif = (struct mt7996_vif *)vif->drv_priv;
- wcid = &mvif->sta.wcid;
+ if (!mphy) {
+ ieee80211_free_txskb(hw, skb);
+ return;
}
mt76_tx(mphy, control->sta, wcid, skb);
@@ -750,12 +889,20 @@ static void mt7996_tx(struct ieee80211_hw *hw,
static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- int ret;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ int i, ret;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
- mutex_lock(&phy->dev->mt76.mutex);
- ret = mt7996_mcu_set_rts_thresh(phy, val);
- mutex_unlock(&phy->dev->mt76.mutex);
+ ret = mt7996_mcu_set_rts_thresh(phy, val);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
@@ -818,35 +965,24 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static int
-mt7996_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
- IEEE80211_STA_NONE);
-}
-
-static int
-mt7996_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
- IEEE80211_STA_NOTEXIST);
-}
-
-static int
mt7996_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt76_mib_stats *mib = &phy->mib;
+ int i;
mutex_lock(&dev->mt76.mutex);
- stats->dot11RTSSuccessCount = mib->rts_cnt;
- stats->dot11RTSFailureCount = mib->rts_retries_cnt;
- stats->dot11FCSErrorCount = mib->fcs_err_cnt;
- stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+ struct mt76_mib_stats *mib = &phy->mib;
+
+ stats->dot11RTSSuccessCount += mib->rts_cnt;
+ stats->dot11RTSFailureCount += mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount += mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount += mib->ack_fail_cnt;
+ }
mutex_unlock(&dev->mt76.mutex);
@@ -856,17 +992,20 @@ mt7996_get_stats(struct ieee80211_hw *hw,
u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
union {
u64 t64;
u32 t32[2];
} tsf;
u16 n;
+ if (!phy)
+ return 0;
+
lockdep_assert_held(&dev->mt76.mutex);
- n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->mt76.omac_idx;
+ n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->deflink.mt76.omac_idx;
/* TSF software read */
mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
@@ -896,17 +1035,20 @@ mt7996_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
u16 n;
+ if (!phy)
+ return;
+
mutex_lock(&dev->mt76.mutex);
- n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->mt76.omac_idx;
+ n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->deflink.mt76.omac_idx;
mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
/* TSF software overwrite */
@@ -922,17 +1064,20 @@ mt7996_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
u16 n;
+ if (!phy)
+ return;
+
mutex_lock(&dev->mt76.mutex);
- n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->mt76.omac_idx;
+ n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->deflink.mt76.omac_idx;
mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
/* TSF software adjust*/
@@ -945,12 +1090,14 @@ mt7996_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static void
mt7996_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy;
mutex_lock(&dev->mt76.mutex);
- phy->coverage_class = max_t(s16, coverage_class, 0);
- mt7996_mac_set_coverage_class(phy);
+ mt7996_for_each_phy(dev, phy) {
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7996_mac_set_coverage_class(phy);
+ }
mutex_unlock(&dev->mt76.mutex);
}
@@ -958,33 +1105,33 @@ static int
mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- int max_nss = hweight8(hw->wiphy->available_antennas_tx);
- u8 band_idx = phy->mt76->band_idx, shift = dev->chainshift[band_idx];
+ int i;
- if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ if (tx_ant != rx_ant)
return -EINVAL;
- if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
- tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+
+ if (!(tx_ant & phy->orig_chainmask))
+ return -EINVAL;
+ }
mutex_lock(&dev->mt76.mutex);
- phy->mt76->antenna_mask = tx_ant;
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+ u8 band_idx = phy->mt76->band_idx;
+ u8 shift = dev->chainshift[band_idx];
- /* restore to the origin chainmask which might have auxiliary path */
- if (hweight8(tx_ant) == max_nss && band_idx < MT_BAND2)
- phy->mt76->chainmask = ((dev->chainmask >> shift) &
- (BIT(dev->chainshift[band_idx + 1] - shift) - 1)) << shift;
- else if (hweight8(tx_ant) == max_nss)
- phy->mt76->chainmask = (dev->chainmask >> shift) << shift;
- else
- phy->mt76->chainmask = tx_ant << shift;
+ phy->mt76->chainmask = tx_ant & phy->orig_chainmask;
+ phy->mt76->antenna_mask = phy->mt76->chainmask >> shift;
- mt76_set_stream_caps(phy->mt76, true);
- mt7996_set_stream_vht_txbf_caps(phy);
- mt7996_set_stream_he_eht_caps(phy);
- mt7996_mcu_set_txpower_sku(phy);
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7996_set_stream_vht_txbf_caps(phy);
+ mt7996_set_stream_he_eht_caps(phy);
+ mt7996_mcu_set_txpower_sku(phy);
+ }
mutex_unlock(&dev->mt76.mutex);
@@ -996,7 +1143,7 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct rate_info *txrate = &msta->wcid.rate;
@@ -1030,7 +1177,7 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
- if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
@@ -1048,7 +1195,7 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
static void mt7996_sta_rc_work(void *data, struct ieee80211_sta *sta)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_dev *dev = msta->vif->phy->dev;
+ struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
u32 *changed = data;
spin_lock_bh(&dev->mt76.sta_poll_lock);
@@ -1063,9 +1210,8 @@ static void mt7996_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = link_sta->sta;
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_dev *dev = phy->dev;
mt7996_sta_rc_work(&changed, sta);
ieee80211_queue_work(hw, &dev->rc_work);
@@ -1075,12 +1221,11 @@ static int
mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_dev *dev = phy->dev;
u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
- mvif->bitrate_mask = *mask;
+ mvif->deflink.bitrate_mask = *mask;
/* if multiple rates across different preambles are given we can
* reconfigure this info with all peers using sta_rec command with
@@ -1109,6 +1254,9 @@ static void mt7996_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
}
@@ -1125,6 +1273,9 @@ static void mt7996_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
}
@@ -1258,7 +1409,7 @@ static void mt7996_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
struct mt76_ethtool_worker_info *wi = wi_data;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- if (msta->vif->mt76.idx != wi->idx)
+ if (msta->vif->deflink.mt76.idx != wi->idx)
return;
mt76_ethtool_worker(wi, &msta->wcid.stats, true);
@@ -1270,16 +1421,19 @@ void mt7996_get_et_stats(struct ieee80211_hw *hw,
struct ethtool_stats *stats, u64 *data)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
struct mt76_mib_stats *mib = &phy->mib;
struct mt76_ethtool_worker_info wi = {
.data = data,
- .idx = mvif->mt76.idx,
+ .idx = mvif->deflink.mt76.idx,
};
/* See mt7996_ampdu_stat_read_phy, etc */
int i, ei = 0;
+ if (!phy)
+ return;
+
mutex_lock(&dev->mt76.mutex);
mt7996_mac_update_stats(phy);
@@ -1373,11 +1527,18 @@ static int
mt7996_set_radar_background(struct ieee80211_hw *hw,
struct cfg80211_chan_def *chandef)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy;
int ret = -EINVAL;
bool running;
+ if (chandef)
+ phy = mt7996_band_phy(dev, chandef->chan->band);
+ else
+ phy = dev->rdd2_phy;
+ if (!phy)
+ return -EINVAL;
+
mutex_lock(&dev->mt76.mutex);
if (dev->mt76.region == NL80211_DFS_UNSET)
@@ -1428,9 +1589,14 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_vif_link *mlink = &mvif->deflink;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct mt7996_phy *phy;
+
+ phy = mt7996_vif_link_phy(mlink);
+ if (!phy)
+ return -ENODEV;
if (phy != &dev->phy && phy->mt76->band_idx == MT_BAND2)
wed = &dev->mt76.mmio.wed_hif2;
@@ -1438,13 +1604,13 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
if (!mtk_wed_device_active(wed))
return -ENODEV;
- if (msta->wcid.idx > MT7996_WTBL_STA)
+ if (!msta->wcid.sta || msta->wcid.idx > MT7996_WTBL_STA)
return -EIO;
path->type = DEV_PATH_MTK_WDMA;
path->dev = ctx->dev;
path->mtk_wdma.wdma_idx = wed->wdma_idx;
- path->mtk_wdma.bss = mvif->mt76.idx;
+ path->mtk_wdma.bss = mvif->deflink.mt76.idx;
path->mtk_wdma.queue = 0;
path->mtk_wdma.wcid = msta->wcid.idx;
@@ -1457,10 +1623,12 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
const struct ieee80211_ops mt7996_ops = {
- .add_chanctx = ieee80211_emulate_add_chanctx,
- .remove_chanctx = ieee80211_emulate_remove_chanctx,
- .change_chanctx = ieee80211_emulate_change_chanctx,
- .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
+ .add_chanctx = mt76_add_chanctx,
+ .remove_chanctx = mt76_remove_chanctx,
+ .change_chanctx = mt76_change_chanctx,
+ .assign_vif_chanctx = mt76_assign_vif_chanctx,
+ .unassign_vif_chanctx = mt76_unassign_vif_chanctx,
+ .switch_vif_chanctx = mt76_switch_vif_chanctx,
.tx = mt7996_tx,
.start = mt7996_start,
.stop = mt7996_stop,
@@ -1470,16 +1638,17 @@ const struct ieee80211_ops mt7996_ops = {
.conf_tx = mt7996_conf_tx,
.configure_filter = mt7996_configure_filter,
.bss_info_changed = mt7996_bss_info_changed,
- .sta_add = mt7996_sta_add,
- .sta_remove = mt7996_sta_remove,
+ .sta_state = mt76_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.link_sta_rc_update = mt7996_sta_rc_update,
.set_key = mt7996_set_key,
.ampdu_action = mt7996_ampdu_action,
.set_rts_threshold = mt7996_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
- .sw_scan_start = mt76_sw_scan,
- .sw_scan_complete = mt76_sw_scan_complete,
+ .hw_scan = mt76_hw_scan,
+ .cancel_hw_scan = mt76_cancel_hw_scan,
+ .remain_on_channel = mt76_remain_on_channel,
+ .cancel_remain_on_channel = mt76_cancel_remain_on_channel,
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
.channel_switch_beacon = mt7996_channel_switch_beacon,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 6c445a9dbc03..e4569d032221 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -14,11 +14,23 @@
char *_fw; \
switch (mt76_chip(&(_dev)->mt76)) { \
case 0x7992: \
- _fw = MT7992_##name; \
+ switch ((_dev)->var.type) { \
+ case MT7992_VAR_TYPE_23: \
+ _fw = MT7992_##name##_23; \
+ break; \
+ default: \
+ _fw = MT7992_##name; \
+ } \
break; \
case 0x7990: \
default: \
- _fw = MT7996_##name; \
+ switch ((_dev)->var.type) { \
+ case MT7996_VAR_TYPE_233: \
+ _fw = MT7996_##name##_233; \
+ break; \
+ default: \
+ _fw = MT7996_##name; \
+ } \
break; \
} \
_fw; \
@@ -110,8 +122,8 @@ mt7996_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
u16 mcs_map)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
- const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
+ enum nl80211_band band = msta->vif->deflink.phy->mt76->chandef.chan->band;
+ const u16 *mask = msta->vif->deflink.bitrate_mask.control[band].he_mcs;
int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
for (nss = 0; nss < max_nss; nss++) {
@@ -744,8 +756,7 @@ mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
}
static void
-mt7996_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7996_phy *phy)
+mt7996_mcu_bss_rfch_tlv(struct sk_buff *skb, struct mt7996_phy *phy)
{
static const u8 rlm_ch_band[] = {
[NL80211_BAND_2GHZ] = 1,
@@ -775,8 +786,7 @@ mt7996_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7996_phy *phy)
+mt7996_mcu_bss_ra_tlv(struct sk_buff *skb, struct mt7996_phy *phy)
{
struct bss_ra_tlv *ra;
struct tlv *tlv;
@@ -789,6 +799,7 @@ mt7996_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
static void
mt7996_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct mt7996_phy *phy)
{
#define DEFAULT_HE_PE_DURATION 4
@@ -802,11 +813,11 @@ mt7996_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_uni_he *)tlv;
- he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
+ he->he_pe_duration = link_conf->htc_trig_based_pkt_ext;
if (!he->he_pe_duration)
he->he_pe_duration = DEFAULT_HE_PE_DURATION;
- he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
+ he->he_rts_thres = cpu_to_le16(link_conf->frame_time_rts_th);
if (!he->he_rts_thres)
he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
@@ -816,13 +827,13 @@ mt7996_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, int enable)
+mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
+ bool enable)
{
struct bss_info_uni_mbssid *mbssid;
struct tlv *tlv;
- if (!vif->bss_conf.bssid_indicator && enable)
+ if (!link_conf->bssid_indicator && enable)
return;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid));
@@ -830,23 +841,22 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
mbssid = (struct bss_info_uni_mbssid *)tlv;
if (enable) {
- mbssid->max_indicator = vif->bss_conf.bssid_indicator;
- mbssid->mbss_idx = vif->bss_conf.bssid_index;
+ mbssid->max_indicator = link_conf->bssid_indicator;
+ mbssid->mbss_idx = link_conf->bssid_index;
mbssid->tx_bss_omac_idx = 0;
}
}
static void
-mt7996_mcu_bss_bmc_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+mt7996_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink,
struct mt7996_phy *phy)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct bss_rate_tlv *bmc;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
enum nl80211_band band = chandef->chan->band;
struct tlv *tlv;
- u8 idx = mvif->mcast_rates_idx ?
- mvif->mcast_rates_idx : mvif->basic_rates_idx;
+ u8 idx = mlink->mcast_rates_idx ?
+ mlink->mcast_rates_idx : mlink->basic_rates_idx;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_RATE, sizeof(*bmc));
@@ -870,9 +880,8 @@ mt7996_mcu_bss_txcmd_tlv(struct sk_buff *skb, bool en)
}
static void
-mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct bss_mld_tlv *mld;
struct tlv *tlv;
@@ -880,33 +889,28 @@ mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
mld = (struct bss_mld_tlv *)tlv;
mld->group_mld_id = 0xff;
- mld->own_mld_id = mvif->mt76.idx;
+ mld->own_mld_id = mlink->idx;
mld->remap_idx = 0xff;
}
static void
-mt7996_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7996_mcu_bss_sec_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct bss_sec_tlv *sec;
struct tlv *tlv;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_SEC, sizeof(*sec));
sec = (struct bss_sec_tlv *)tlv;
- sec->cipher = mvif->cipher;
+ sec->cipher = mlink->cipher;
}
static int
-mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif,
- bool bssid, bool enable)
+mt7996_mcu_muar_config(struct mt7996_dev *dev, struct mt76_vif_link *mlink,
+ const u8 *addr, bool bssid, bool enable)
{
#define UNI_MUAR_ENTRY 2
- struct mt7996_dev *dev = phy->dev;
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START;
- const u8 *addr = vif->addr;
-
+ u32 idx = mlink->omac_idx - REPEATER_BSSID_START;
struct {
struct {
u8 band;
@@ -923,7 +927,7 @@ mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif,
u8 addr[ETH_ALEN];
u8 __rsv[2];
} __packed req = {
- .hdr.band = phy->mt76->band_idx,
+ .hdr.band = mlink->band_idx,
.tag = cpu_to_le16(UNI_MUAR_ENTRY),
.len = cpu_to_le16(sizeof(req) - sizeof(req.hdr)),
.smesh = false,
@@ -931,9 +935,6 @@ mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif,
.entry_add = true,
};
- if (bssid)
- addr = vif->bss_conf.bssid;
-
if (enable)
memcpy(req.addr, addr, ETH_ALEN);
@@ -942,10 +943,8 @@ mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_bss_ifs_timing_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7996_mcu_bss_ifs_timing_tlv(struct sk_buff *skb, struct mt7996_phy *phy)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->phy;
struct bss_ifs_time_tlv *ifs_time;
struct tlv *tlv;
bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ;
@@ -972,15 +971,16 @@ mt7996_mcu_bss_ifs_timing_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
static int
mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mvif,
struct mt76_phy *phy, u16 wlan_idx,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct cfg80211_chan_def *chandef = &phy->chandef;
struct mt76_connac_bss_basic_tlv *bss;
u32 type = CONNECTION_INFRA_AP;
u16 sta_wlan_idx = wlan_idx;
+ struct ieee80211_sta *sta;
struct tlv *tlv;
int idx;
@@ -992,9 +992,7 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
case NL80211_IFTYPE_STATION:
if (enable) {
rcu_read_lock();
- if (!sta)
- sta = ieee80211_find_sta(vif,
- vif->bss_conf.bssid);
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
/* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
if (sta) {
struct mt76_wcid *wcid;
@@ -1017,8 +1015,8 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*bss));
bss = (struct mt76_connac_bss_basic_tlv *)tlv;
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->bcn_interval = cpu_to_le16(link_conf->beacon_int);
+ bss->dtim_period = link_conf->dtim_period;
bss->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx);
bss->sta_idx = cpu_to_le16(sta_wlan_idx);
bss->conn_type = cpu_to_le32(type);
@@ -1036,19 +1034,19 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
return 0;
}
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ memcpy(bss->bssid, link_conf->bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(link_conf->beacon_int);
bss->dtim_period = vif->bss_conf.dtim_period;
bss->phymode = mt76_connac_get_phy_mode(phy, vif,
chandef->chan->band, NULL);
- bss->phymode_ext = mt76_connac_get_phy_mode_ext(phy, vif,
+ bss->phymode_ext = mt76_connac_get_phy_mode_ext(phy, &vif->bss_conf,
chandef->chan->band);
return 0;
}
static struct sk_buff *
-__mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
+__mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int len)
{
struct bss_req_hdr hdr = {
.bss_idx = mvif->idx,
@@ -1064,71 +1062,72 @@ __mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
return skb;
}
-int mt7996_mcu_add_bss_info(struct mt7996_phy *phy,
- struct ieee80211_vif *vif, int enable)
+int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink, int enable)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = phy->dev;
struct sk_buff *skb;
- if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) {
- mt7996_mcu_muar_config(phy, vif, false, enable);
- mt7996_mcu_muar_config(phy, vif, true, enable);
+ if (mlink->omac_idx >= REPEATER_BSSID_START) {
+ mt7996_mcu_muar_config(dev, mlink, link_conf->addr, false, enable);
+ mt7996_mcu_muar_config(dev, mlink, link_conf->bssid, true, enable);
}
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink,
MT7996_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* bss_basic must be first */
- mt7996_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76,
- mvif->sta.wcid.idx, enable);
- mt7996_mcu_bss_sec_tlv(skb, vif);
+ mt7996_mcu_bss_basic_tlv(skb, vif, link_conf, mlink, phy->mt76,
+ mlink->wcid->idx, enable);
+ mt7996_mcu_bss_sec_tlv(skb, mlink);
if (vif->type == NL80211_IFTYPE_MONITOR)
goto out;
if (enable) {
- mt7996_mcu_bss_rfch_tlv(skb, vif, phy);
- mt7996_mcu_bss_bmc_tlv(skb, vif, phy);
- mt7996_mcu_bss_ra_tlv(skb, vif, phy);
+ mt7996_mcu_bss_rfch_tlv(skb, phy);
+ mt7996_mcu_bss_bmc_tlv(skb, mlink, phy);
+ mt7996_mcu_bss_ra_tlv(skb, phy);
mt7996_mcu_bss_txcmd_tlv(skb, true);
- mt7996_mcu_bss_ifs_timing_tlv(skb, vif);
+ mt7996_mcu_bss_ifs_timing_tlv(skb, phy);
if (vif->bss_conf.he_support)
- mt7996_mcu_bss_he_tlv(skb, vif, phy);
+ mt7996_mcu_bss_he_tlv(skb, vif, link_conf, phy);
/* this tag is necessary no matter if the vif is MLD */
- mt7996_mcu_bss_mld_tlv(skb, vif);
+ mt7996_mcu_bss_mld_tlv(skb, mlink);
}
- mt7996_mcu_bss_mbssid_tlv(skb, vif, phy, enable);
+ mt7996_mcu_bss_mbssid_tlv(skb, link_conf, enable);
out:
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
}
-int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif)
+int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = phy->dev;
+ struct mt76_vif_link *mlink = mt76_vif_conf_link(&dev->mt76, vif, link_conf);
struct sk_buff *skb;
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink,
MT7996_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7996_mcu_bss_ifs_timing_tlv(skb, vif);
+ mt7996_mcu_bss_ifs_timing_tlv(skb, phy);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
}
static int
-mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif *mvif,
+mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
bool enable, bool tx)
{
@@ -1168,7 +1167,7 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
if (enable && !params->amsdu)
msta->wcid.amsdu = false;
- return mt7996_mcu_sta_ba(dev, &mvif->mt76, params, enable, true);
+ return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, true);
}
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
@@ -1178,7 +1177,7 @@ int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
struct mt7996_vif *mvif = msta->vif;
- return mt7996_mcu_sta_ba(dev, &mvif->mt76, params, enable, false);
+ return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, false);
}
static void
@@ -1461,17 +1460,21 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_sta_sounding_rate(struct sta_rec_bf *bf)
+mt7996_mcu_sta_sounding_rate(struct sta_rec_bf *bf, struct mt7996_phy *phy)
{
bf->sounding_phy = MT_PHY_TYPE_OFDM;
bf->ndp_rate = 0; /* mcs0 */
- bf->ndpa_rate = MT7996_CFEND_RATE_DEFAULT; /* ofdm 24m */
+ if (is_mt7996(phy->mt76->dev))
+ bf->ndpa_rate = MT7996_CFEND_RATE_DEFAULT; /* ofdm 24m */
+ else
+ bf->ndpa_rate = MT7992_CFEND_RATE_DEFAULT; /* ofdm 6m */
+
bf->rept_poll_rate = MT7996_CFEND_RATE_DEFAULT; /* ofdm 24m */
}
static void
mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
- struct sta_rec_bf *bf)
+ struct sta_rec_bf *bf, bool explicit)
{
struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
u8 n = 0;
@@ -1491,7 +1494,8 @@ mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
bf->nrow = hweight8(phy->mt76->antenna_mask) - 1;
bf->ncol = min_t(u8, bf->nrow, n);
- bf->ibf_ncol = n;
+ bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
+ min_t(u8, MT7996_IBF_MAX_NC, n);
}
static void
@@ -1509,7 +1513,7 @@ mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
if (explicit) {
u8 sts, snd_dim;
- mt7996_mcu_sta_sounding_rate(bf);
+ mt7996_mcu_sta_sounding_rate(bf, phy);
sts = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
pc->cap);
@@ -1517,14 +1521,14 @@ mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
vc->cap);
bf->nrow = min_t(u8, min_t(u8, snd_dim, sts), tx_ant);
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
- bf->ibf_ncol = bf->ncol;
+ bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, bf->ncol);
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
bf->nrow = 1;
} else {
bf->nrow = tx_ant;
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
- bf->ibf_ncol = nss_mcs;
+ bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
bf->ibf_nrow = 1;
@@ -1533,7 +1537,8 @@ mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
static void
mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, struct sta_rec_bf *bf)
+ struct mt7996_phy *phy, struct sta_rec_bf *bf,
+ bool explicit)
{
struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
@@ -1549,7 +1554,7 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->tx_mode = MT_PHY_TYPE_HE_SU;
- mt7996_mcu_sta_sounding_rate(bf);
+ mt7996_mcu_sta_sounding_rate(bf, phy);
bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB,
pe->phy_cap_info[6]);
@@ -1561,7 +1566,8 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
pe->phy_cap_info[4]);
bf->nrow = min_t(u8, snd_dim, sts);
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
- bf->ibf_ncol = bf->ncol;
+ bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
+ min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160)
return;
@@ -1596,7 +1602,8 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
static void
mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, struct sta_rec_bf *bf)
+ struct mt7996_phy *phy, struct sta_rec_bf *bf,
+ bool explicit)
{
struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
@@ -1610,7 +1617,7 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->tx_mode = MT_PHY_TYPE_EHT_MU;
- mt7996_mcu_sta_sounding_rate(bf);
+ mt7996_mcu_sta_sounding_rate(bf, phy);
bf->trigger_su = EHT_PHY(CAP3_TRIG_SU_BF_FDBK, pe->phy_cap_info[3]);
bf->trigger_mu = EHT_PHY(CAP3_TRIG_MU_BF_PART_BW_FDBK, pe->phy_cap_info[3]);
@@ -1619,7 +1626,8 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
(EHT_PHY(CAP1_BEAMFORMEE_SS_80MHZ_MASK, pe->phy_cap_info[1]) << 1);
bf->nrow = min_t(u8, snd_dim, sts);
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
- bf->ibf_ncol = bf->ncol;
+ bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
+ min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_160)
return;
@@ -1654,12 +1662,15 @@ static void
mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
+#define EBF_MODE BIT(0)
+#define IBF_MODE BIT(1)
+#define BF_MAT_ORDER 4
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->phy;
+ struct mt7996_phy *phy = mvif->deflink.phy;
int tx_ant = hweight16(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
- static const u8 matrix[4][4] = {
+ static const u8 matrix[BF_MAT_ORDER][BF_MAT_ORDER] = {
{0, 0, 0, 0},
{1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
{2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
@@ -1677,35 +1688,44 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
bf = (struct sta_rec_bf *)tlv;
- /* he/eht: eBF only, in accordance with spec
+ /* he/eht: eBF only, except mt7992 that has 5T on 5GHz also supports iBF
* vht: support eBF and iBF
* ht: iBF only, since mac80211 lacks of eBF support
*/
- if (sta->deflink.eht_cap.has_eht && ebf)
- mt7996_mcu_sta_bfer_eht(sta, vif, phy, bf);
- else if (sta->deflink.he_cap.has_he && ebf)
- mt7996_mcu_sta_bfer_he(sta, vif, phy, bf);
+ if (sta->deflink.eht_cap.has_eht)
+ mt7996_mcu_sta_bfer_eht(sta, vif, phy, bf, ebf);
+ else if (sta->deflink.he_cap.has_he)
+ mt7996_mcu_sta_bfer_he(sta, vif, phy, bf, ebf);
else if (sta->deflink.vht_cap.vht_supported)
mt7996_mcu_sta_bfer_vht(sta, phy, bf, ebf);
else if (sta->deflink.ht_cap.ht_supported)
- mt7996_mcu_sta_bfer_ht(sta, phy, bf);
+ mt7996_mcu_sta_bfer_ht(sta, phy, bf, ebf);
else
return;
- bf->bf_cap = ebf ? ebf : dev->ibf << 1;
+ bf->bf_cap = ebf ? EBF_MODE : (dev->ibf ? IBF_MODE : 0);
+ if (is_mt7992(&dev->mt76) && tx_ant == 4)
+ bf->bf_cap |= IBF_MODE;
bf->bw = sta->deflink.bandwidth;
bf->ibf_dbw = sta->deflink.bandwidth;
bf->ibf_nrow = tx_ant;
- if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
- bf->ibf_timeout = 0x48;
+ if (sta->deflink.eht_cap.has_eht || sta->deflink.he_cap.has_he)
+ bf->ibf_timeout = is_mt7996(&dev->mt76) ? MT7996_IBF_TIMEOUT :
+ MT7992_IBF_TIMEOUT;
+ else if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
+ bf->ibf_timeout = MT7996_IBF_TIMEOUT_LEGACY;
else
- bf->ibf_timeout = 0x18;
+ bf->ibf_timeout = MT7996_IBF_TIMEOUT;
- if (ebf && bf->nrow != tx_ant)
- bf->mem_20m = matrix[tx_ant][bf->ncol];
- else
- bf->mem_20m = matrix[bf->nrow][bf->ncol];
+ if (bf->ncol < BF_MAT_ORDER) {
+ if (ebf)
+ bf->mem_20m = tx_ant < BF_MAT_ORDER ?
+ matrix[tx_ant][bf->ncol] : 0;
+ else
+ bf->mem_20m = bf->nrow < BF_MAT_ORDER ?
+ matrix[bf->nrow][bf->ncol] : 0;
+ }
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
@@ -1726,7 +1746,7 @@ mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->phy;
+ struct mt7996_phy *phy = mvif->deflink.phy;
int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
@@ -1783,11 +1803,9 @@ mt7996_mcu_sta_hdrt_tlv(struct mt7996_dev *dev, struct sk_buff *skb)
static void
mt7996_mcu_sta_hdr_trans_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif, struct mt76_wcid *wcid)
{
struct sta_rec_hdr_trans *hdr_trans;
- struct mt76_wcid *wcid;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HDR_TRANS, sizeof(*hdr_trans));
@@ -1799,10 +1817,9 @@ mt7996_mcu_sta_hdr_trans_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- if (!sta)
+ if (!wcid)
return;
- wcid = (struct mt76_wcid *)sta->drv_priv;
hdr_trans->dis_rx_hdr_tran = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
hdr_trans->to_ds = true;
@@ -1867,7 +1884,7 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif
struct sk_buff *skb;
struct tlv *tlv;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
&msta->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
@@ -1903,8 +1920,8 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
struct ieee80211_sta *sta)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
- struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+ struct cfg80211_chan_def *chandef = &mvif->deflink.phy->mt76->chandef;
+ struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_phy_uni phy = {};
int ret, nrates = 0;
@@ -1991,9 +2008,9 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
{
#define INIT_RCPI 180
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt76_phy *mphy = mvif->phy->mt76;
+ struct mt76_phy *mphy = mvif->deflink.phy->mt76;
struct cfg80211_chan_def *chandef = &mphy->chandef;
- struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+ struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra_uni *ra;
struct tlv *tlv;
@@ -2070,7 +2087,7 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
cap |= STA_CAP_VHT_TX_STBC;
if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
cap |= STA_CAP_VHT_RX_STBC;
- if (vif->bss_conf.vht_ldpc &&
+ if ((vif->type != NL80211_IFTYPE_AP || vif->bss_conf.vht_ldpc) &&
(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
cap |= STA_CAP_VHT_LDPC;
@@ -2099,7 +2116,7 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *skb;
int ret;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
&msta->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
@@ -2146,10 +2163,10 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
.tag = cpu_to_le16(UNI_VOW_DRR_CTRL),
.len = cpu_to_le16(sizeof(req) - 4),
.action = cpu_to_le32(MT_STA_BSS_GROUP),
- .val = cpu_to_le32(mvif->mt76.idx % 16),
+ .val = cpu_to_le32(mvif->deflink.mt76.idx % 16),
};
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
+ msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta;
req.wlan_idx = cpu_to_le16(msta->wcid.idx);
return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(VOW), &req,
@@ -2157,34 +2174,35 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
}
int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable, bool newly)
+ struct mt76_vif_link *mlink,
+ struct ieee80211_sta *sta, int conn_state, bool newly)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct ieee80211_link_sta *link_sta;
- struct mt7996_sta *msta;
+ struct ieee80211_link_sta *link_sta = NULL;
+ struct mt76_wcid *wcid = mlink->wcid;
struct sk_buff *skb;
- int conn_state;
int ret;
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
- link_sta = sta ? &sta->deflink : NULL;
+ if (sta) {
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
- &msta->wcid,
+ wcid = &msta->wcid;
+ link_sta = &sta->deflink;
+ }
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, mlink, wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec basic */
- conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, &vif->bss_conf, link_sta,
conn_state, newly);
- if (!enable)
+ if (conn_state == CONN_STATE_DISCONNECT)
goto out;
/* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, wcid);
/* starec tx proc */
mt7996_mcu_sta_tx_proc_tlv(skb);
@@ -2273,7 +2291,7 @@ int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct sk_buff *skb;
int ret;
@@ -2299,7 +2317,7 @@ static int mt7996_mcu_get_pn(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct tlv *tlv;
int ret;
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &mvif->sta.wcid);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76, &mvif->deflink.sta.wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2335,7 +2353,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
sizeof(struct mt7996_mcu_bcn_prot_tlv);
int ret;
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, len);
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2375,11 +2393,12 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
}
-int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
- struct ieee80211_vif *vif, bool enable)
+
+int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink, bool enable)
{
struct mt7996_dev *dev = phy->dev;
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct {
struct req_hdr {
u8 omac_idx;
@@ -2395,8 +2414,8 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
} __packed tlv;
} data = {
.hdr = {
- .omac_idx = mvif->mt76.omac_idx,
- .band_idx = mvif->mt76.band_idx,
+ .omac_idx = mlink->omac_idx,
+ .band_idx = mlink->band_idx,
},
.tlv = {
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
@@ -2405,18 +2424,18 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
},
};
- if (mvif->mt76.omac_idx >= REPEATER_BSSID_START)
- return mt7996_mcu_muar_config(phy, vif, false, enable);
+ if (mlink->omac_idx >= REPEATER_BSSID_START)
+ return mt7996_mcu_muar_config(dev, mlink, link_conf->addr, false, enable);
- memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ memcpy(data.tlv.omac_addr, link_conf->addr, ETH_ALEN);
return mt76_mcu_send_msg(&dev->mt76, MCU_WMWA_UNI_CMD(DEV_INFO_UPDATE),
&data, sizeof(data), true);
}
static void
-mt7996_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
- struct sk_buff *skb,
- struct ieee80211_mutable_offsets *offs)
+mt7996_mcu_beacon_cntdwn(struct sk_buff *rskb, struct sk_buff *skb,
+ struct ieee80211_mutable_offsets *offs,
+ bool csa)
{
struct bss_bcn_cntdwn_tlv *info;
struct tlv *tlv;
@@ -2425,7 +2444,7 @@ mt7996_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
if (!offs->cntdwn_counter_offs[0])
return;
- tag = vif->bss_conf.csa_active ? UNI_BSS_INFO_BCN_CSA : UNI_BSS_INFO_BCN_BCC;
+ tag = csa ? UNI_BSS_INFO_BCN_CSA : UNI_BSS_INFO_BCN_BCC;
tlv = mt7996_mcu_add_uni_tlv(rskb, tag, sizeof(*info));
@@ -2435,16 +2454,13 @@ mt7996_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
static void
mt7996_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct bss_bcn_content_tlv *bcn,
+ struct bss_bcn_content_tlv *bcn,
struct ieee80211_mutable_offsets *offs)
{
struct bss_bcn_mbss_tlv *mbss;
const struct element *elem;
struct tlv *tlv;
- if (!vif->bss_conf.bssid_indicator)
- return;
-
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_MBSSID, sizeof(*mbss));
mbss = (struct bss_bcn_mbss_tlv *)tlv;
@@ -2487,7 +2503,8 @@ mt7996_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
}
static void
-mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+mt7996_mcu_beacon_cont(struct mt7996_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
struct sk_buff *rskb, struct sk_buff *skb,
struct bss_bcn_content_tlv *bcn,
struct ieee80211_mutable_offsets *offs)
@@ -2501,9 +2518,9 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
if (offs->cntdwn_counter_offs[0]) {
u16 offset = offs->cntdwn_counter_offs[0];
- if (vif->bss_conf.csa_active)
+ if (link_conf->csa_active)
bcn->csa_ie_pos = cpu_to_le16(offset - 4);
- if (vif->bss_conf.color_change_active)
+ if (link_conf->color_change_active)
bcn->bcc_ie_pos = cpu_to_le16(offset - 3);
}
@@ -2514,56 +2531,63 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
}
-int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, int en)
+int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt76_vif_link *mlink = mt76_vif_conf_link(&dev->mt76, vif, link_conf);
struct ieee80211_mutable_offsets offs;
struct ieee80211_tx_info *info;
struct sk_buff *skb, *rskb;
struct tlv *tlv;
struct bss_bcn_content_tlv *bcn;
- int len;
+ int len, extra_len = 0;
- if (vif->bss_conf.nontransmitted)
+ if (link_conf->nontransmitted)
return 0;
- rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ if (!mlink)
+ return -EINVAL;
+
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink,
MT7996_MAX_BSS_OFFLOAD_SIZE);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
- skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
- if (!skb) {
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, link_conf->link_id);
+ if (link_conf->enable_beacon && !skb) {
dev_kfree_skb(rskb);
return -EINVAL;
}
- if (skb->len > MT7996_MAX_BEACON_SIZE) {
- dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
- dev_kfree_skb(rskb);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
+ if (skb) {
+ if (skb->len > MT7996_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
- info = IEEE80211_SKB_CB(skb);
- info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+ extra_len = skb->len;
+ }
- len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + skb->len, 4);
+ len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + extra_len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
bcn = (struct bss_bcn_content_tlv *)tlv;
- bcn->enable = en;
- if (!en)
+ bcn->enable = link_conf->enable_beacon;
+ if (!bcn->enable)
goto out;
- mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
- mt7996_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs);
- mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs);
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, mlink->band_idx);
+
+ mt7996_mcu_beacon_cont(dev, link_conf, rskb, skb, bcn, &offs);
+ if (link_conf->bssid_indicator)
+ mt7996_mcu_beacon_mbss(rskb, skb, bcn, &offs);
+ mt7996_mcu_beacon_cntdwn(rskb, skb, &offs, link_conf->csa_active);
out:
dev_kfree_skb(skb);
- return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ return mt76_mcu_skb_send_msg(&dev->mt76, rskb,
MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
}
@@ -2573,22 +2597,28 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
#define OFFLOAD_TX_MODE_SU BIT(0)
#define OFFLOAD_TX_MODE_MU BIT(1)
struct ieee80211_hw *hw = mt76_hw(dev);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
- enum nl80211_band band = chandef->chan->band;
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
struct bss_inband_discovery_tlv *discov;
struct ieee80211_tx_info *info;
struct sk_buff *rskb, *skb = NULL;
+ struct cfg80211_chan_def *chandef;
+ enum nl80211_band band;
struct tlv *tlv;
u8 *buf, interval;
int len;
+ if (!phy)
+ return -EINVAL;
+
+ chandef = &phy->mt76->chandef;
+ band = chandef->chan->band;
+
if (vif->bss_conf.nontransmitted)
return 0;
- rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76,
MT7996_MAX_BSS_OFFLOAD_SIZE);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
@@ -3147,7 +3177,8 @@ int mt7996_mcu_set_hdr_trans(struct mt7996_dev *dev, bool hdr_trans)
MCU_WM_UNI_CMD(RX_HDR_TRANS), true);
}
-int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
+int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
#define MCU_EDCA_AC_PARAM 0
#define WMM_AIFS_SET BIT(0)
@@ -3156,12 +3187,12 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
#define WMM_TXOP_SET BIT(3)
#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \
WMM_CW_MAX_SET | WMM_TXOP_SET)
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_vif_link *link = mt7996_vif_conf_link(dev, vif, link_conf);
struct {
u8 bss_idx;
u8 __rsv[3];
} __packed hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = link->mt76.idx,
};
struct sk_buff *skb;
int len = sizeof(hdr) + IEEE80211_NUM_ACS * sizeof(struct edca);
@@ -3174,7 +3205,7 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
skb_put_data(skb, &hdr, sizeof(hdr));
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct ieee80211_tx_queue_params *q = &link->queue_params[ac];
struct edca *e;
struct tlv *tlv;
@@ -3548,7 +3579,7 @@ int mt7996_mcu_set_eeprom(struct mt7996_dev *dev)
&req, sizeof(req), true);
}
-int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
+int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len)
{
struct {
u8 _rsv[4];
@@ -3577,15 +3608,21 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
valid = le32_to_cpu(*(__le32 *)(skb->data + 16));
if (valid) {
u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12));
- u8 *buf = (u8 *)dev->mt76.eeprom.data + addr;
+
+ if (!buf)
+ buf = (u8 *)dev->mt76.eeprom.data + addr;
+ if (!buf_len || buf_len > MT7996_EEPROM_BLOCK_SIZE)
+ buf_len = MT7996_EEPROM_BLOCK_SIZE;
skb_pull(skb, 48);
- memcpy(buf, skb->data, MT7996_EEPROM_BLOCK_SIZE);
+ memcpy(buf, skb->data, buf_len);
+ } else {
+ ret = -EINVAL;
}
dev_kfree_skb(skb);
- return 0;
+ return ret;
}
int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num)
@@ -3666,6 +3703,13 @@ int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap)
int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
{
+ enum {
+ IDX_TX_TIME,
+ IDX_RX_TIME,
+ IDX_OBSS_AIRTIME,
+ IDX_NON_WIFI_TIME,
+ IDX_NUM
+ };
struct {
struct {
u8 band;
@@ -3675,16 +3719,15 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
__le16 tag;
__le16 len;
__le32 offs;
- } data[4];
+ } data[IDX_NUM];
} __packed req = {
.hdr.band = phy->mt76->band_idx,
};
- /* strict order */
static const u32 offs[] = {
- UNI_MIB_TX_TIME,
- UNI_MIB_RX_TIME,
- UNI_MIB_OBSS_AIRTIME,
- UNI_MIB_NON_WIFI_TIME,
+ [IDX_TX_TIME] = UNI_MIB_TX_TIME,
+ [IDX_RX_TIME] = UNI_MIB_RX_TIME,
+ [IDX_OBSS_AIRTIME] = UNI_MIB_OBSS_AIRTIME,
+ [IDX_NON_WIFI_TIME] = UNI_MIB_NON_WIFI_TIME,
};
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
@@ -3693,7 +3736,7 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
struct sk_buff *skb;
int i, ret;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < IDX_NUM; i++) {
req.data[i].tag = cpu_to_le16(UNI_CMD_MIB_DATA);
req.data[i].len = cpu_to_le16(sizeof(req.data[i]));
req.data[i].offs = cpu_to_le32(offs[i]);
@@ -3712,17 +3755,24 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
goto out;
#define __res_u64(s) le64_to_cpu(res[s].data)
- state->cc_tx += __res_u64(1) - state_ts->cc_tx;
- state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
- state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
- state->cc_busy += __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3) -
+ state->cc_tx += __res_u64(IDX_TX_TIME) - state_ts->cc_tx;
+ state->cc_bss_rx += __res_u64(IDX_RX_TIME) - state_ts->cc_bss_rx;
+ state->cc_rx += __res_u64(IDX_RX_TIME) +
+ __res_u64(IDX_OBSS_AIRTIME) -
+ state_ts->cc_rx;
+ state->cc_busy += __res_u64(IDX_TX_TIME) +
+ __res_u64(IDX_RX_TIME) +
+ __res_u64(IDX_OBSS_AIRTIME) +
+ __res_u64(IDX_NON_WIFI_TIME) -
state_ts->cc_busy;
-
out:
- state_ts->cc_tx = __res_u64(1);
- state_ts->cc_bss_rx = __res_u64(2);
- state_ts->cc_rx = __res_u64(2) + __res_u64(3);
- state_ts->cc_busy = __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3);
+ state_ts->cc_tx = __res_u64(IDX_TX_TIME);
+ state_ts->cc_bss_rx = __res_u64(IDX_RX_TIME);
+ state_ts->cc_rx = __res_u64(IDX_RX_TIME) + __res_u64(IDX_OBSS_AIRTIME);
+ state_ts->cc_busy = __res_u64(IDX_TX_TIME) +
+ __res_u64(IDX_RX_TIME) +
+ __res_u64(IDX_OBSS_AIRTIME) +
+ __res_u64(IDX_NON_WIFI_TIME);
#undef __res_u64
dev_kfree_skb(skb);
@@ -4023,7 +4073,7 @@ mt7996_mcu_set_obss_spr_siga(struct mt7996_phy *phy, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = phy->dev;
- u8 omac = mvif->mt76.omac_idx;
+ u8 omac = mvif->deflink.mt76.omac_idx;
struct {
u8 band_idx;
u8 __rsv[3];
@@ -4137,16 +4187,16 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
return mt7996_mcu_set_obss_spr_bitmap(phy, he_obss_pd);
}
-int mt7996_mcu_update_bss_color(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
+ struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color)
{
int len = sizeof(struct bss_req_hdr) + sizeof(struct bss_color_tlv);
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct bss_color_tlv *bss_color;
struct sk_buff *skb;
struct tlv *tlv;
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, len);
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -4196,12 +4246,12 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
.len = cpu_to_le16(sizeof(req) - 4),
.tbl_idx = flow->table_id,
.cmd = cmd,
- .own_mac_idx = mvif->mt76.omac_idx,
+ .own_mac_idx = mvif->deflink.mt76.omac_idx,
.flowid = flow->id,
.peer_id = cpu_to_le16(flow->wcid),
.duration = flow->duration,
- .bss = mvif->mt76.idx,
- .bss_idx = mvif->mt76.idx,
+ .bss = mvif->deflink.mt76.idx,
+ .bss_idx = mvif->deflink.mt76.idx,
.start_tsf = cpu_to_le64(flow->tsf),
.mantissa = flow->mantissa,
.exponent = flow->exp,
@@ -4297,16 +4347,16 @@ int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
struct mt7996_sta *msta;
struct sk_buff *skb;
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
+ msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
&msta->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, &msta->wcid);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
@@ -4484,12 +4534,32 @@ int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id)
sizeof(req), true);
}
+int mt7996_mcu_set_sniffer_mode(struct mt7996_phy *phy, bool enabled)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct {
+ u8 band_idx;
+ u8 _rsv[3];
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ u8 _pad[3];
+ } __packed req = {
+ .band_idx = phy->mt76->band_idx,
+ .tag = 0,
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .enable = enabled,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(SNIFFER), &req,
+ sizeof(req), true);
+}
+
int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
{
#define TX_POWER_LIMIT_TABLE_RATE 0
struct mt7996_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
- struct ieee80211_hw *hw = mphy->hw;
struct tx_power_limit_table_ctrl {
u8 __rsv1[4];
@@ -4509,7 +4579,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
struct sk_buff *skb;
int i, tx_power;
- tx_power = mt7996_get_power_bound(phy, hw->conf.power_level);
+ tx_power = mt7996_get_power_bound(phy, phy->txpower);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&la, tx_power);
mphy->txpower_cur = tx_power;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 40e45fb2b626..7a8ee6c75cf2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -177,7 +177,7 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
continue;
ofs = addr - dev->reg.map[i].phys;
- if (ofs > dev->reg.map[i].size)
+ if (ofs >= dev->reg.map[i].size)
continue;
return dev->reg.map[i].mapped + ofs;
@@ -605,6 +605,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
+ .link_data_size = sizeof(struct mt7996_vif_link),
.drv_flags = MT_DRV_TXWI_NO_FREE |
MT_DRV_AMSDU_OFFLOAD |
MT_DRV_HW_MGMT_TXQ,
@@ -618,9 +619,12 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.rx_check = mt7996_rx_check,
.rx_poll_complete = mt7996_rx_poll_complete,
.sta_add = mt7996_mac_sta_add,
+ .sta_event = mt7996_mac_sta_event,
.sta_remove = mt7996_mac_sta_remove,
.update_survey = mt7996_update_channel,
.set_channel = mt7996_set_channel,
+ .vif_link_add = mt7996_vif_link_add,
+ .vif_link_remove = mt7996_vif_link_remove,
};
struct mt7996_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index ab8c9070630b..29fabb9b04ae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -11,6 +11,7 @@
#include "../mt76_connac.h"
#include "regs.h"
+#define MT7996_MAX_RADIOS 3
#define MT7996_MAX_INTERFACES 19 /* per-band */
#define MT7996_MAX_WMM_SETS 4
#define MT7996_WTBL_BMC_SIZE (is_mt7992(&dev->mt76) ? 32 : 64)
@@ -34,13 +35,32 @@
#define MT7996_FIRMWARE_DSP "mediatek/mt7996/mt7996_dsp.bin"
#define MT7996_ROM_PATCH "mediatek/mt7996/mt7996_rom_patch.bin"
+#define MT7996_FIRMWARE_WA_233 "mediatek/mt7996/mt7996_wa_233.bin"
+#define MT7996_FIRMWARE_WM_233 "mediatek/mt7996/mt7996_wm_233.bin"
+#define MT7996_FIRMWARE_DSP_233 MT7996_FIRMWARE_DSP
+#define MT7996_ROM_PATCH_233 "mediatek/mt7996/mt7996_rom_patch_233.bin"
+
#define MT7992_FIRMWARE_WA "mediatek/mt7996/mt7992_wa.bin"
#define MT7992_FIRMWARE_WM "mediatek/mt7996/mt7992_wm.bin"
#define MT7992_FIRMWARE_DSP "mediatek/mt7996/mt7992_dsp.bin"
#define MT7992_ROM_PATCH "mediatek/mt7996/mt7992_rom_patch.bin"
+#define MT7992_FIRMWARE_WA_23 "mediatek/mt7996/mt7992_wa_23.bin"
+#define MT7992_FIRMWARE_WM_23 "mediatek/mt7996/mt7992_wm_23.bin"
+#define MT7992_FIRMWARE_DSP_23 "mediatek/mt7996/mt7992_dsp_23.bin"
+#define MT7992_ROM_PATCH_23 "mediatek/mt7996/mt7992_rom_patch_23.bin"
+
#define MT7996_EEPROM_DEFAULT "mediatek/mt7996/mt7996_eeprom.bin"
+#define MT7996_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7996_eeprom_2i5i6i.bin"
+#define MT7996_EEPROM_DEFAULT_233 "mediatek/mt7996/mt7996_eeprom_233.bin"
+#define MT7996_EEPROM_DEFAULT_233_INT "mediatek/mt7996/mt7996_eeprom_233_2i5i6i.bin"
+
#define MT7992_EEPROM_DEFAULT "mediatek/mt7996/mt7992_eeprom.bin"
+#define MT7992_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7992_eeprom_2i5i.bin"
+#define MT7992_EEPROM_DEFAULT_MIX "mediatek/mt7996/mt7992_eeprom_2i5e.bin"
+#define MT7992_EEPROM_DEFAULT_23 "mediatek/mt7996/mt7992_eeprom_23.bin"
+#define MT7992_EEPROM_DEFAULT_23_INT "mediatek/mt7996/mt7992_eeprom_23_2i5i.bin"
+
#define MT7996_EEPROM_SIZE 7680
#define MT7996_EEPROM_BLOCK_SIZE 16
#define MT7996_TOKEN_SIZE 16384
@@ -48,6 +68,12 @@
#define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+#define MT7996_IBF_MAX_NC 2
+#define MT7996_IBF_TIMEOUT 0x18
+#define MT7996_IBF_TIMEOUT_LEGACY 0x48
+
+#define MT7992_CFEND_RATE_DEFAULT 0x4b /* OFDM 6M */
+#define MT7992_IBF_TIMEOUT 0xff
#define MT7996_SKU_RATE_NUM 417
#define MT7996_SKU_PATH_NUM 494
@@ -95,6 +121,22 @@ enum mt7996_ram_type {
MT7996_RAM_TYPE_DSP,
};
+enum mt7996_var_type {
+ MT7996_VAR_TYPE_444,
+ MT7996_VAR_TYPE_233,
+};
+
+enum mt7992_var_type {
+ MT7992_VAR_TYPE_44,
+ MT7992_VAR_TYPE_23,
+};
+
+enum mt7996_fem_type {
+ MT7996_FEM_EXT,
+ MT7996_FEM_INT,
+ MT7996_FEM_MIX,
+};
+
enum mt7996_txq_id {
MT7996_TXQ_FWDL = 16,
MT7996_TXQ_MCU_WM,
@@ -164,8 +206,8 @@ struct mt7996_sta {
} twt;
};
-struct mt7996_vif {
- struct mt76_vif mt76; /* must be first */
+struct mt7996_vif_link {
+ struct mt76_vif_link mt76; /* must be first */
struct mt7996_sta sta;
struct mt7996_phy *phy;
@@ -174,6 +216,11 @@ struct mt7996_vif {
struct cfg80211_bitrate_mask bitrate_mask;
};
+struct mt7996_vif {
+ struct mt7996_vif_link deflink; /* must be first */
+ struct mt76_vif_data mt76;
+};
+
/* crash-dump */
struct mt7996_crash_data {
guid_t guid;
@@ -211,8 +258,6 @@ struct mt7996_phy {
struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
- struct ieee80211_vif *monitor_vif;
-
struct thermal_cooling_device *cdev;
u8 cdev_state;
u8 throttle_state;
@@ -232,11 +277,15 @@ struct mt7996_phy {
u32 rx_ampdu_ts;
u32 ampdu_ref;
+ int txpower;
struct mt76_mib_stats mib;
struct mt76_channel_state state_ts;
+ u16 orig_chainmask;
+
bool has_aux_rx;
+ bool counter_reset;
};
struct mt7996_dev {
@@ -245,6 +294,10 @@ struct mt7996_dev {
struct mt76_phy mphy;
};
+ struct mt7996_phy *radio_phy[MT7996_MAX_RADIOS];
+ struct wiphy_radio radios[MT7996_MAX_RADIOS];
+ struct wiphy_radio_freq_range radio_freqs[MT7996_MAX_RADIOS];
+
struct mt7996_hif *hif2;
struct mt7996_reg_desc reg;
u8 q_id[MT7996_MAX_QUEUE];
@@ -329,6 +382,10 @@ struct mt7996_dev {
spinlock_t reg_lock;
u8 wtbl_size_group;
+ struct {
+ u8 type:4;
+ u8 fem:4;
+ } var;
};
enum {
@@ -360,14 +417,6 @@ enum mt7996_rdd_cmd {
RDD_IRQ_OFF,
};
-static inline struct mt7996_phy *
-mt7996_hw_phy(struct ieee80211_hw *hw)
-{
- struct mt76_phy *phy = hw->priv;
-
- return phy->priv;
-}
-
static inline struct mt7996_dev *
mt7996_hw_dev(struct ieee80211_hw *hw)
{
@@ -405,14 +454,69 @@ mt7996_band_valid(struct mt7996_dev *dev, u8 band)
if (is_mt7992(&dev->mt76))
return band <= MT_BAND1;
- /* tri-band support */
- if (band <= MT_BAND2 &&
- mt76_get_field(dev, MT_PAD_GPIO, MT_PAD_GPIO_ADIE_COMB) <= 1)
- return true;
+ return band <= MT_BAND2;
+}
- return band == MT_BAND0 || band == MT_BAND2;
+static inline bool
+mt7996_has_background_radar(struct mt7996_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7990:
+ if (dev->var.type == MT7996_VAR_TYPE_233)
+ return false;
+ break;
+ case 0x7992:
+ if (dev->var.type == MT7992_VAR_TYPE_23)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
}
+static inline struct mt7996_phy *
+mt7996_band_phy(struct mt7996_dev *dev, enum nl80211_band band)
+{
+ struct mt76_phy *mphy;
+
+ mphy = dev->mt76.band_phys[band];
+ if (!mphy)
+ return NULL;
+
+ return mphy->priv;
+}
+
+static inline struct mt7996_vif_link *
+mt7996_vif_link(struct mt7996_dev *dev, struct ieee80211_vif *vif, int link_id)
+{
+ return (struct mt7996_vif_link *)mt76_vif_link(&dev->mt76, vif, link_id);
+}
+
+static inline struct mt7996_phy *
+mt7996_vif_link_phy(struct mt7996_vif_link *link)
+{
+ struct mt76_phy *mphy = mt76_vif_link_phy(&link->mt76);
+
+ if (!mphy)
+ return NULL;
+
+ return mphy->priv;
+}
+
+static inline struct mt7996_vif_link *
+mt7996_vif_conf_link(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ return (struct mt7996_vif_link *)mt76_vif_conf_link(&dev->mt76, vif,
+ link_conf);
+}
+
+#define mt7996_for_each_phy(dev, phy) \
+ for (int __i = 0; __i < ARRAY_SIZE((dev)->radio_phy); __i++) \
+ if (((phy) = (dev)->radio_phy[__i]) != NULL)
+
extern const struct ieee80211_ops mt7996_ops;
extern struct pci_driver mt7996_pci_driver;
extern struct pci_driver mt7996_hif_driver;
@@ -424,6 +528,12 @@ irqreturn_t mt7996_irq_handler(int irq, void *dev_instance);
u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif);
int mt7996_register_device(struct mt7996_dev *dev);
void mt7996_unregister_device(struct mt7996_dev *dev);
+int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink);
+void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink);
int mt7996_eeprom_init(struct mt7996_dev *dev);
int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy);
int mt7996_eeprom_get_target_power(struct mt7996_dev *dev,
@@ -439,29 +549,33 @@ int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx,
void mt7996_init_txpower(struct mt7996_phy *phy);
int mt7996_txbf_init(struct mt7996_dev *dev);
void mt7996_reset(struct mt7996_dev *dev);
-int mt7996_run(struct ieee80211_hw *hw);
+int mt7996_run(struct mt7996_phy *phy);
int mt7996_mcu_init(struct mt7996_dev *dev);
int mt7996_mcu_init_firmware(struct mt7996_dev *dev);
int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
struct mt7996_vif *mvif,
struct mt7996_twt_flow *flow,
int cmd);
-int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
- struct ieee80211_vif *vif, bool enable);
-int mt7996_mcu_add_bss_info(struct mt7996_phy *phy,
- struct ieee80211_vif *vif, int enable);
+int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink, bool enable);
+int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink, int enable);
int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable, bool newly);
+ struct mt76_vif_link *mlink,
+ struct ieee80211_sta *sta, int conn_state, bool newly);
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
-int mt7996_mcu_update_bss_color(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
+ struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color);
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int enable);
+ struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
struct ieee80211_vif *vif, u32 changed);
int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
@@ -470,13 +584,14 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
int mt7996_set_channel(struct mt76_phy *mphy);
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
-int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif);
+int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
void *data, u16 version);
int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *data, u32 field);
int mt7996_mcu_set_eeprom(struct mt7996_dev *dev);
-int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset);
+int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len);
int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num);
int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap);
int mt7996_mcu_set_ser(struct mt7996_dev *dev, u8 action, u8 set, u8 band);
@@ -488,7 +603,8 @@ int mt7996_mcu_set_radar_th(struct mt7996_dev *dev, int index,
const struct mt7996_dfs_pattern *pattern);
int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable);
int mt7996_mcu_set_rts_thresh(struct mt7996_phy *phy, u32 val);
-int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif);
+int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch);
int mt7996_mcu_get_temperature(struct mt7996_phy *phy);
int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state);
@@ -511,6 +627,7 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb);
void mt7996_mcu_exit(struct mt7996_dev *dev);
int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id);
+int mt7996_mcu_set_sniffer_mode(struct mt7996_phy *phy, bool enabled);
static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev)
{
@@ -575,6 +692,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
void mt7996_mac_set_coverage_class(struct mt7996_phy *phy);
int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7996_mac_work(struct work_struct *work);
@@ -602,7 +721,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy);
void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy);
void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy);
void mt7996_update_channel(struct mt76_phy *mphy);
-int mt7996_init_debugfs(struct mt7996_phy *phy);
+int mt7996_init_debugfs(struct mt7996_dev *dev);
void mt7996_debugfs_rx_fw_monitor(struct mt7996_dev *dev, const void *data, int len);
bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len);
int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
index 47b429d8bfbe..1876a968c92d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -175,6 +175,9 @@ enum offs_rev {
#define MT_WTBLOFF_RSCR_RCPI_MODE GENMASK(31, 30)
#define MT_WTBLOFF_RSCR_RCPI_PARAM GENMASK(25, 24)
+#define MT_WTBLOFF_ACR(_band) MT_WTBLOFF(_band, 0x010)
+#define MT_WTBLOFF_ADM_BACKOFFTIME BIT(29)
+
/* ETBF: band 0(0x820ea000), band 1(0x820fa000), band 2(0x830ea000) */
#define MT_WF_ETBF_BASE(_band) __BASE(WF_ETBF_BASE, (_band))
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
@@ -660,8 +663,17 @@ enum offs_rev {
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+/* ADIE */
+#define MT_ADIE_CHIP_ID(_idx) (0x0f00002c + ((_idx) << 28))
+#define MT_ADIE_VERSION_MASK GENMASK(15, 0)
+#define MT_ADIE_CHIP_ID_MASK GENMASK(31, 16)
+
#define MT_PAD_GPIO 0x700056f0
#define MT_PAD_GPIO_ADIE_COMB GENMASK(16, 15)
+#define MT_PAD_GPIO_2ADIE_TBTC BIT(19)
+/* for mt7992 */
+#define MT_PAD_GPIO_ADIE_COMB_7992 GENMASK(17, 16)
+#define MT_PAD_GPIO_ADIE_SINGLE BIT(15)
#define MT_HW_REV 0x70010204
#define MT_HW_REV1 0x8a00
diff --git a/drivers/net/wireless/mediatek/mt76/scan.c b/drivers/net/wireless/mediatek/mt76/scan.c
new file mode 100644
index 000000000000..1c4f9deaaada
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/scan.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2024 Felix Fietkau <nbd@nbd.name>
+ */
+#include "mt76.h"
+
+static void mt76_scan_complete(struct mt76_dev *dev, bool abort)
+{
+ struct mt76_phy *phy = dev->scan.phy;
+ struct cfg80211_scan_info info = {
+ .aborted = abort,
+ };
+
+ if (!phy)
+ return;
+
+ clear_bit(MT76_SCANNING, &phy->state);
+
+ if (dev->scan.chan && phy->main_chandef.chan)
+ mt76_set_channel(phy, &phy->main_chandef, false);
+ mt76_put_vif_phy_link(phy, dev->scan.vif, dev->scan.mlink);
+ memset(&dev->scan, 0, sizeof(dev->scan));
+ ieee80211_scan_completed(phy->hw, &info);
+}
+
+void mt76_abort_scan(struct mt76_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->scan_work);
+ mt76_scan_complete(dev, true);
+}
+
+static void
+mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
+{
+ struct cfg80211_scan_request *req = dev->scan.req;
+ struct ieee80211_vif *vif = dev->scan.vif;
+ struct mt76_vif_link *mvif = dev->scan.mlink;
+ enum nl80211_band band = dev->scan.chan->band;
+ struct mt76_phy *phy = dev->scan.phy;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ skb = ieee80211_probereq_get(phy->hw, vif->addr, ssid->ssid,
+ ssid->ssid_len, req->ie_len);
+ if (!skb)
+ return;
+
+ if (is_unicast_ether_addr(req->bssid)) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ ether_addr_copy(hdr->addr1, req->bssid);
+ ether_addr_copy(hdr->addr3, req->bssid);
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ if (req->no_cck)
+ info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+ info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
+
+ if (req->ie_len)
+ skb_put_data(skb, req->ie, req->ie_len);
+
+ skb->priority = 7;
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+
+ rcu_read_lock();
+ if (ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL))
+ mt76_tx(phy, NULL, mvif->wcid, skb);
+ else
+ ieee80211_free_txskb(phy->hw, skb);
+ rcu_read_unlock();
+}
+
+void mt76_scan_work(struct work_struct *work)
+{
+ struct mt76_dev *dev = container_of(work, struct mt76_dev,
+ scan_work.work);
+ struct cfg80211_scan_request *req = dev->scan.req;
+ struct cfg80211_chan_def chandef = {};
+ struct mt76_phy *phy = dev->scan.phy;
+ int duration = HZ / 9; /* ~110 ms */
+ int i;
+
+ if (dev->scan.chan_idx >= req->n_channels) {
+ mt76_scan_complete(dev, false);
+ return;
+ }
+
+ if (dev->scan.chan && phy->num_sta) {
+ dev->scan.chan = NULL;
+ mt76_set_channel(phy, &phy->main_chandef, false);
+ goto out;
+ }
+
+ dev->scan.chan = req->channels[dev->scan.chan_idx++];
+ cfg80211_chandef_create(&chandef, dev->scan.chan, NL80211_CHAN_HT20);
+ mt76_set_channel(phy, &chandef, true);
+
+ if (!req->n_ssids ||
+ chandef.chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR))
+ goto out;
+
+ duration = HZ / 16; /* ~60 ms */
+ local_bh_disable();
+ for (i = 0; i < req->n_ssids; i++)
+ mt76_scan_send_probe(dev, &req->ssids[i]);
+ local_bh_enable();
+
+out:
+ if (!duration)
+ return;
+
+ if (dev->scan.chan)
+ duration = max_t(int, duration,
+ msecs_to_jiffies(req->duration +
+ (req->duration >> 5)));
+
+ ieee80211_queue_delayed_work(dev->phy.hw, &dev->scan_work, duration);
+}
+
+int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_vif_link *mlink;
+ int ret = 0;
+
+ if (hw->wiphy->n_radio > 1) {
+ phy = dev->band_phys[req->req.channels[0]->band];
+ if (!phy)
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->scan.req || phy->roc_vif) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mlink = mt76_get_vif_phy_link(phy, vif);
+ if (IS_ERR(mlink)) {
+ ret = PTR_ERR(mlink);
+ goto out;
+ }
+
+ memset(&dev->scan, 0, sizeof(dev->scan));
+ dev->scan.req = &req->req;
+ dev->scan.vif = vif;
+ dev->scan.phy = phy;
+ dev->scan.mlink = mlink;
+ ieee80211_queue_delayed_work(dev->phy.hw, &dev->scan_work, 0);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_hw_scan);
+
+void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ mt76_abort_scan(phy->dev);
+}
+EXPORT_SYMBOL_GPL(mt76_cancel_hw_scan);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index ddd8c0cc744d..0a927a7313a6 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -46,6 +46,10 @@ static int mt76s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
return 0;
sdio->sched.pse_mcu_quota += pse_mcu_quota;
+ if (sdio->pse_mcu_quota_max &&
+ sdio->sched.pse_mcu_quota > sdio->pse_mcu_quota_max) {
+ sdio->sched.pse_mcu_quota = sdio->pse_mcu_quota_max;
+ }
sdio->sched.pse_data_quota += pse_data_quota;
sdio->sched.ple_data_quota += ple_data_quota;
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index ce193e625666..af0c50c983ec 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -489,7 +489,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
do {
if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
- return -EBUSY;
+ break;
if (stop || mt76_txq_stopped(q))
break;
@@ -522,24 +522,16 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
static int
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
- struct mt76_queue *q = phy->q_tx[qid];
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq;
struct mt76_txq *mtxq;
struct mt76_wcid *wcid;
+ struct mt76_queue *q;
int ret = 0;
while (1) {
int n_frames = 0;
- if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
- return -EBUSY;
-
- if (dev->queue_ops->tx_cleanup &&
- q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
- dev->queue_ops->tx_cleanup(dev, q, false);
- }
-
txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
@@ -549,6 +541,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
continue;
+ phy = mt76_dev_phy(dev, wcid->phy_idx);
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
+ continue;
+
+ q = phy->q_tx[qid];
+ if (dev->queue_ops->tx_cleanup &&
+ q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
+ dev->queue_ops->tx_cleanup(dev, q, false);
+ }
+
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
@@ -578,7 +580,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
int len;
- if (qid >= 4 || phy->offchannel)
+ if (qid >= 4)
return;
local_bh_disable();
@@ -680,9 +682,14 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
void mt76_txq_schedule_all(struct mt76_phy *phy)
{
+ struct mt76_phy *main_phy = &phy->dev->phy;
int i;
mt76_txq_schedule_pending(phy);
+
+ if (phy != main_phy && phy->hw == main_phy->hw)
+ return;
+
for (i = 0; i <= MT_TXQ_BK; i++)
mt76_txq_schedule(phy, i);
}
@@ -693,6 +700,7 @@ void mt76_tx_worker_run(struct mt76_dev *dev)
struct mt76_phy *phy;
int i;
+ mt76_txq_schedule_all(&dev->phy);
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
phy = dev->phys[i];
if (!phy)
@@ -748,9 +756,6 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
- if (!test_bit(MT76_STATE_RUNNING, &phy->state))
- return;
-
mt76_worker_schedule(&dev->tx_worker);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 58ff06823389..f9e67b8c3b3c 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -33,9 +33,9 @@ int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
ret = usb_control_msg(udev, pipe, req, req_type, val,
offset, buf, len, MT_VEND_REQ_TOUT_MS);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -EPROTO)
set_bit(MT76_REMOVED, &dev->phy.state);
- if (ret >= 0 || ret == -ENODEV)
+ if (ret >= 0 || ret == -ENODEV || ret == -EPROTO)
return ret;
usleep_range(5000, 10000);
}
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index d6c01a2dd198..95b3dc96e4c4 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -64,7 +64,7 @@ int mt76_wcid_alloc(u32 *mask, int size)
}
EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx)
{
struct mt76_wcid *wcid;
int i, j, min_rssi = 0;
@@ -75,20 +75,16 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
u32 mask = dev->wcid_mask[i];
- u32 phy_mask = dev->wcid_phy_mask[i];
if (!mask)
continue;
- for (j = i * 32; mask; j++, mask >>= 1, phy_mask >>= 1) {
+ for (j = i * 32; mask; j++, mask >>= 1) {
if (!(mask & 1))
continue;
- if (!!(phy_mask & 1) != ext_phy)
- continue;
-
wcid = rcu_dereference(dev->wcid[j]);
- if (!wcid)
+ if (!wcid || wcid->phy_idx != phy_idx)
continue;
spin_lock(&dev->rx_lock);
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index e96736cc7259..e7aa0f991923 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1669,7 +1669,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
}
static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
int ret;
struct wilc_vif *vif = netdev_priv(wdev->netdev);
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 7e84fc0fd911..af298021e050 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -925,8 +925,6 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
- wiphy_unregister(wilc->wiphy);
- wiphy_free(wilc->wiphy);
}
EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 5262c8846c13..af970f999111 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -193,7 +193,7 @@ static int wilc_sdio_probe(struct sdio_func *func,
ret = wilc_load_mac_from_nv(wilc);
if (ret) {
pr_err("Can not retrieve MAC address from chip\n");
- goto dispose_irq;
+ goto unregister_wiphy;
}
wilc_sdio_deinit(wilc);
@@ -202,15 +202,18 @@ static int wilc_sdio_probe(struct sdio_func *func,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto dispose_irq;
+ goto unregister_wiphy;
}
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
+unregister_wiphy:
+ wiphy_unregister(wilc->wiphy);
dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
+ wiphy_free(wilc->wiphy);
free:
kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
@@ -223,6 +226,8 @@ static void wilc_sdio_remove(struct sdio_func *func)
struct wilc_sdio *sdio_priv = wilc->bus_data;
wilc_netdev_cleanup(wilc);
+ wiphy_unregister(wilc->wiphy);
+ wiphy_free(wilc->wiphy);
kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
}
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index ce2a9cdd6aa7..5bcabb7decea 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -256,7 +256,7 @@ static int wilc_bus_probe(struct spi_device *spi)
ret = wilc_load_mac_from_nv(wilc);
if (ret) {
pr_err("Can not retrieve MAC address from chip\n");
- goto power_down;
+ goto unregister_wiphy;
}
wilc_wlan_power(wilc, false);
@@ -264,14 +264,17 @@ static int wilc_bus_probe(struct spi_device *spi)
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto power_down;
+ goto unregister_wiphy;
}
return 0;
+unregister_wiphy:
+ wiphy_unregister(wilc->wiphy);
power_down:
wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
+ wiphy_free(wilc->wiphy);
free:
kfree(spi_priv);
return ret;
@@ -283,6 +286,8 @@ static void wilc_bus_remove(struct spi_device *spi)
struct wilc_spi *spi_priv = wilc->bus_data;
wilc_netdev_cleanup(wilc);
+ wiphy_unregister(wilc->wiphy);
+ wiphy_free(wilc->wiphy);
kfree(spi_priv);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 8b97accf6638..0b2282528342 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -881,7 +881,7 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
int ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index b375a4751580..a377d85c2451 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -102,7 +102,7 @@ struct qtnf_wmac {
struct qtnf_mac_info macinfo;
struct qtnf_vif iflist[QTNF_MAX_INTF];
struct cfg80211_scan_request *scan_req;
- struct mutex mac_lock; /* lock during wmac speicific ops */
+ struct mutex mac_lock; /* lock during wmac specific ops */
struct delayed_work scan_timeout;
struct ieee80211_regdomain *rd;
struct platform_device *pdev;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 60c2a12e9d5e..e5f553a1ea24 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -8882,13 +8882,10 @@ static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev)
for (ch_idx = 0; ch_idx < 2; ch_idx = ch_idx + 1) {
if (ch_idx == 0) {
- rfval = rfb0r1 & (~0x3);
rfval = rfb0r1 | 0x1;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
- rfval = rfb0r2 & (~0x33);
rfval = rfb0r2 | 0x11;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
- rfval = rfb0r42 & (~0x50);
rfval = rfb0r42 | 0x10;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
@@ -8901,13 +8898,10 @@ static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x00);
} else {
- rfval = rfb0r1 & (~0x3);
rfval = rfb0r1 | 0x2;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
- rfval = rfb0r2 & (~0x33);
rfval = rfb0r2 | 0x22;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
- rfval = rfb0r42 & (~0x50);
rfval = rfb0r42 | 0x40;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
index 3d04df0f5bf4..766a7a7c7d28 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
@@ -1860,7 +1860,7 @@ struct rtl8xxxu_fileops rtl8188eu_fops = {
.set_crystal_cap = rtl8188f_set_crystal_cap,
.cck_rssi = rtl8188e_cck_rssi,
.led_classdev_brightness_set = rtl8188eu_led_brightness_set,
- .writeN_block_size = 128,
+ .writeN_block_size = 196,
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.has_tx_report = 1,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index f95898f68d68..4ce0c05c5129 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -8147,6 +8147,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8186, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
@@ -8157,12 +8159,18 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x11f2, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9043, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
@@ -8179,6 +8187,10 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3358, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3359, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
@@ -8193,6 +8205,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x9846, 0x9041, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
@@ -8218,6 +8232,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
@@ -8226,6 +8242,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0077, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
@@ -8248,6 +8266,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330d, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index aab4605de9c4..ff61867d142f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -575,9 +575,15 @@ static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw,
void rtl_deinit_core(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
rtl_c2hcmd_launcher(hw, 0);
rtl_free_entries_from_scan_list(hw);
rtl_free_entries_from_ack_queue(hw, false);
+ if (rtlpriv->works.rtl_wq) {
+ destroy_workqueue(rtlpriv->works.rtl_wq);
+ rtlpriv->works.rtl_wq = NULL;
+ }
}
EXPORT_SYMBOL_GPL(rtl_deinit_core);
@@ -2696,9 +2702,6 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
-struct rtl_global_var rtl_global_var = {};
-EXPORT_SYMBOL_GPL(rtl_global_var);
-
static int __init rtl_core_module_init(void)
{
BUILD_BUG_ON(TX_PWR_BY_RATE_NUM_RATE < TX_PWR_BY_RATE_NUM_SECTION);
@@ -2712,10 +2715,6 @@ static int __init rtl_core_module_init(void)
/* add debugfs */
rtl_debugfs_add_topdir();
- /* init some global vars */
- INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
- spin_lock_init(&rtl_global_var.glb_list_lock);
-
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index f081a9a90563..f3a6a43a42ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -124,7 +124,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
-extern struct rtl_global_var rtl_global_var;
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 11709b6c83f1..0eafc4d125f9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -295,46 +295,6 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
return status;
}
-static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
- struct rtl_priv **buddy_priv)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_priv *tpriv = NULL, *iter;
- struct rtl_pci_priv *tpcipriv = NULL;
-
- if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
- list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list,
- list) {
- tpcipriv = (struct rtl_pci_priv *)iter->priv;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
- pcipriv->ndis_adapter.funcnumber);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
- tpcipriv->ndis_adapter.funcnumber);
-
- if (pcipriv->ndis_adapter.busnumber ==
- tpcipriv->ndis_adapter.busnumber &&
- pcipriv->ndis_adapter.devnumber ==
- tpcipriv->ndis_adapter.devnumber &&
- pcipriv->ndis_adapter.funcnumber !=
- tpcipriv->ndis_adapter.funcnumber) {
- tpriv = iter;
- break;
- }
- }
- }
-
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", tpriv != NULL);
-
- if (tpriv)
- *buddy_priv = tpriv;
-
- return tpriv != NULL;
-}
-
static void rtl_pci_parse_configuration(struct pci_dev *pdev,
struct ieee80211_hw *hw)
{
@@ -1696,8 +1656,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
-
- destroy_workqueue(rtlpriv->works.rtl_wq);
}
static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
@@ -2011,7 +1969,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
- list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
return true;
}
@@ -2158,7 +2115,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->rtlhal.interface = INTF_PCI;
rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
rtlpriv->intf_ops = &rtl_pci_ops;
- rtlpriv->glb_var = &rtl_global_var;
rtl_efuse_ops_init(hw);
/* MEM map */
@@ -2209,7 +2165,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
pr_err("Can't init_sw_vars\n");
err = -ENODEV;
- goto fail3;
+ goto fail2;
}
rtl_init_sw_leds(hw);
@@ -2227,14 +2183,14 @@ int rtl_pci_probe(struct pci_dev *pdev,
err = rtl_pci_init(hw, pdev);
if (err) {
pr_err("Failed to init PCI\n");
- goto fail3;
+ goto fail4;
}
err = ieee80211_register_hw(hw);
if (err) {
pr_err("Can't register mac80211 hw.\n");
err = -ENODEV;
- goto fail3;
+ goto fail5;
}
rtlpriv->mac80211.mac80211_registered = 1;
@@ -2257,16 +2213,19 @@ int rtl_pci_probe(struct pci_dev *pdev,
set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
-fail3:
- pci_set_drvdata(pdev, NULL);
+fail5:
+ rtl_pci_deinit(hw);
+fail4:
rtl_deinit_core(hw);
+fail3:
+ wait_for_completion(&rtlpriv->firmware_loading_complete);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
fail2:
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
- complete(&rtlpriv->firmware_loading_complete);
fail1:
if (hw)
@@ -2317,7 +2276,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
if (rtlpci->using_msi)
pci_disable_msi(rtlpci->pdev);
- list_del(&rtlpriv->list);
if (rtlpriv->io.pci_mem_start != 0) {
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
@@ -2376,7 +2334,6 @@ EXPORT_SYMBOL(rtl_pci_resume);
const struct rtl_intf_ops rtl_pci_ops = {
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
- .check_buddy_priv = rtl_pci_check_buddy_priv,
.adapter_tx = rtl_pci_tx,
.flush = rtl_pci_flush,
.reset_trx_ring = rtl_pci_reset_trx_ring,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index bbf8ff63dced..e63c67b1861b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -64,22 +64,23 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
pr_err("Firmware %s not available\n", fw_name);
rtlpriv->max_fw_size = 0;
- return;
+ goto exit;
}
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
rtlpriv->max_fw_size = 0;
release_firmware(firmware);
- return;
+ goto exit;
}
pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware;
memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
pfirmware->sz_fw_tmpbufferlen = firmware->size;
release_firmware(firmware);
+exit:
+ complete(&rtlpriv->firmware_loading_complete);
}
static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
index c269942b3f4a..af8d17b9e012 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
@@ -197,9 +197,9 @@ enum rtl8821a_h2c_cmd {
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value) \
- u8p_replace_bits(__cmd + 1, __value, BIT(0))
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __value) \
- u8p_replace_bits(__cmd + 1, __value, BIT(1))
+ u8p_replace_bits(__cmd, __value, BIT(1))
/* AP_OFFLOAD */
#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 1be51ea3f3c8..9eddbada8af1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -2033,8 +2033,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
if (!_rtl8821ae_check_condition(hw, v1)) {
i += 2; /* skip the pair of expression*/
v2 = array[i+1];
- while (v2 != 0xDEAD)
+ while (v2 != 0xDEAD) {
i += 3;
+ v2 = array[i + 1];
+ }
}
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index d37a017b2b81..f5718e570011 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -629,11 +629,6 @@ static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
tasklet_kill(&rtlusb->rx_work_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
- if (rtlpriv->works.rtl_wq) {
- destroy_workqueue(rtlpriv->works.rtl_wq);
- rtlpriv->works.rtl_wq = NULL;
- }
-
skb_queue_purge(&rtlusb->rx_queue);
while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
@@ -1028,19 +1023,22 @@ int rtl_usb_probe(struct usb_interface *intf,
err = ieee80211_register_hw(hw);
if (err) {
pr_err("Can't register mac80211 hw.\n");
- goto error_out;
+ goto error_init_vars;
}
rtlpriv->mac80211.mac80211_registered = 1;
set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
+error_init_vars:
+ wait_for_completion(&rtlpriv->firmware_loading_complete);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
error_out:
+ rtl_usb_deinit(hw);
rtl_deinit_core(hw);
error_out2:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
- complete(&rtlpriv->firmware_loading_complete);
kfree(rtlpriv->usb_data);
ieee80211_free_hw(hw);
return -ENODEV;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index ae6e351bc83c..f1830ddcdd8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2270,8 +2270,6 @@ struct rtl_intf_ops {
/*com */
int (*adapter_start)(struct ieee80211_hw *hw);
void (*adapter_stop)(struct ieee80211_hw *hw);
- bool (*check_buddy_priv)(struct ieee80211_hw *hw,
- struct rtl_priv **buddy_priv);
int (*adapter_tx)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@@ -2514,14 +2512,6 @@ struct dig_t {
u32 rssi_max;
};
-struct rtl_global_var {
- /* from this list we can get
- * other adapter's rtl_priv
- */
- struct list_head glb_priv_list;
- spinlock_t glb_list_lock;
-};
-
#define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */
struct rtl_btc_info {
@@ -2667,9 +2657,7 @@ struct rtl_scan_list {
struct rtl_priv {
struct ieee80211_hw *hw;
struct completion firmware_loading_complete;
- struct list_head list;
struct rtl_priv *buddy_priv;
- struct rtl_global_var *glb_var;
struct rtl_dmsp_ctl dmsp_ctl;
struct rtl_locks locks;
struct rtl_works works;
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
index 733b3e58da51..ab21b8059e0b 100644
--- a/drivers/net/wireless/realtek/rtw88/Kconfig
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -238,4 +238,9 @@ config RTW88_DEBUGFS
If unsure, say Y to simplify debug problems
+config RTW88_LEDS
+ bool
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
+ default y
+
endif
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index f0b49f5a8a5a..0cbbb366e393 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -20,6 +20,8 @@ rtw88_core-y += main.o \
rtw88_core-$(CONFIG_PM) += wow.o
+rtw88_core-$(CONFIG_RTW88_LEDS) += led.o
+
obj-$(CONFIG_RTW88_8822B) += rtw88_8822b.o
rtw88_8822b-objs := rtw8822b.o rtw8822b_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index e6e9946fbf44..02389b7c6876 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -332,6 +332,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
case C2H_RA_RPT:
rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
break;
+ case C2H_ADAPTIVITY:
+ rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
+ break;
default:
rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
break;
@@ -367,10 +370,6 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
rtw_fw_scan_result(rtwdev, c2h->payload, len);
dev_kfree_skb_any(skb);
break;
- case C2H_ADAPTIVITY:
- rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
- dev_kfree_skb_any(skb);
- break;
default:
/* pass offset for further operation */
*((u32 *)skb->cb) = pkt_offset;
diff --git a/drivers/net/wireless/realtek/rtw88/led.c b/drivers/net/wireless/realtek/rtw88/led.c
new file mode 100644
index 000000000000..25aa6cbaa728
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/led.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include "main.h"
+#include "debug.h"
+#include "led.h"
+
+static int rtw_led_set_blocking(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+
+ rtwdev->chip->ops->led_set(led, brightness);
+
+ return 0;
+}
+
+void rtw_led_init(struct rtw_dev *rtwdev)
+{
+ static const struct ieee80211_tpt_blink rtw_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 5 * 1024, .blink_time = 220 },
+ { .throughput = 10 * 1024, .blink_time = 190 },
+ { .throughput = 20 * 1024, .blink_time = 170 },
+ { .throughput = 50 * 1024, .blink_time = 150 },
+ { .throughput = 70 * 1024, .blink_time = 130 },
+ { .throughput = 100 * 1024, .blink_time = 110 },
+ { .throughput = 200 * 1024, .blink_time = 80 },
+ { .throughput = 300 * 1024, .blink_time = 50 },
+ };
+ struct led_classdev *led = &rtwdev->led_cdev;
+ int err;
+
+ if (!rtwdev->chip->ops->led_set)
+ return;
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ led->brightness_set = rtwdev->chip->ops->led_set;
+ else
+ led->brightness_set_blocking = rtw_led_set_blocking;
+
+ snprintf(rtwdev->led_name, sizeof(rtwdev->led_name),
+ "rtw88-%s", dev_name(rtwdev->dev));
+
+ led->name = rtwdev->led_name;
+ led->max_brightness = LED_ON;
+ led->default_trigger =
+ ieee80211_create_tpt_led_trigger(rtwdev->hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO,
+ rtw_tpt_blink,
+ ARRAY_SIZE(rtw_tpt_blink));
+
+ err = led_classdev_register(rtwdev->dev, led);
+ if (err) {
+ rtw_warn(rtwdev, "Failed to register the LED, error %d\n", err);
+ return;
+ }
+
+ rtwdev->led_registered = true;
+}
+
+void rtw_led_deinit(struct rtw_dev *rtwdev)
+{
+ struct led_classdev *led = &rtwdev->led_cdev;
+
+ if (!rtwdev->led_registered)
+ return;
+
+ rtwdev->chip->ops->led_set(led, LED_OFF);
+ led_classdev_unregister(led);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/led.h b/drivers/net/wireless/realtek/rtw88/led.h
new file mode 100644
index 000000000000..fa64002b0215
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/led.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#ifndef __RTW_LED_H
+#define __RTW_LED_H
+
+#ifdef CONFIG_RTW88_LEDS
+
+void rtw_led_init(struct rtw_dev *rtwdev);
+void rtw_led_deinit(struct rtw_dev *rtwdev);
+
+#else
+
+static inline void rtw_led_init(struct rtw_dev *rtwdev)
+{
+}
+
+static inline void rtw_led_deinit(struct rtw_dev *rtwdev)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index e91530ed05a0..0cee0fd8c0ef 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -19,6 +19,7 @@
#include "bf.h"
#include "sar.h"
#include "sdio.h"
+#include "led.h"
bool rtw_disable_lps_deep_mode;
EXPORT_SYMBOL(rtw_disable_lps_deep_mode);
@@ -1217,7 +1218,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
u8 wireless_set;
u8 bw_mode;
u8 rate_id;
- u8 rf_type = RF_1T1R;
u8 stbc_en = 0;
u8 ldpc_en = 0;
u8 tx_num = 1;
@@ -1302,13 +1302,10 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
break;
}
- if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000) {
+ if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000)
tx_num = 2;
- rf_type = RF_2T2R;
- } else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000) {
+ else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000)
tx_num = 2;
- rf_type = RF_2T2R;
- }
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
@@ -1319,7 +1316,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
si->ldpc_en = ldpc_en;
- si->rf_type = rf_type;
si->sgi_enable = is_support_sgi;
si->vht_enable = is_vht_enable;
si->ra_mask = ra_mask;
@@ -2297,16 +2293,18 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
return ret;
}
+ rtw_led_init(rtwdev);
+
ret = ieee80211_register_hw(hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");
- return ret;
+ goto led_deinit;
}
ret = rtw_regd_hint(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to hint regd\n");
- return ret;
+ goto led_deinit;
}
rtw_debugfs_init(rtwdev);
@@ -2315,6 +2313,10 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
rtwdev->bf_info.bfer_su_cnt = 0;
return 0;
+
+led_deinit:
+ rtw_led_deinit(rtwdev);
+ return ret;
}
EXPORT_SYMBOL(rtw_register_hw);
@@ -2325,6 +2327,7 @@ void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_unregister_hw(hw);
rtw_unset_supported_band(hw, chip);
rtw_debugfs_deinit(rtwdev);
+ rtw_led_deinit(rtwdev);
}
EXPORT_SYMBOL(rtw_unregister_hw);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index cd09fb6f7b8b..62cd4c526301 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -510,12 +510,12 @@ struct rtw_5g_txpwr_idx {
struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff;
struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff;
struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff;
-};
+} __packed;
struct rtw_txpwr_idx {
struct rtw_2g_txpwr_idx pwr_idx_2g;
struct rtw_5g_txpwr_idx pwr_idx_5g;
-};
+} __packed;
struct rtw_channel_params {
u8 center_chan;
@@ -757,7 +757,6 @@ struct rtw_sta_info {
u8 mac_id;
u8 rate_id;
enum rtw_bandwidth bw_mode;
- enum rtw_rf_type rf_type;
u8 stbc_en:2;
u8 ldpc_en:2;
bool sgi_enable;
@@ -888,6 +887,7 @@ struct rtw_chip_ops {
bool is_tx2_path);
void (*config_txrx_mode)(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
+ void (*led_set)(struct led_classdev *led, enum led_brightness brightness);
/* for USB/SDIO only */
void (*fill_txdesc_checksum)(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
@@ -2098,6 +2098,10 @@ struct rtw_dev {
struct completion fw_scan_density;
bool ap_active;
+ bool led_registered;
+ char led_name[32];
+ struct led_classdev led_cdev;
+
/* hci related data, must be last */
u8 priv[] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index e4d506cf9c33..e438405fba56 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -78,7 +78,19 @@
#define BIT_PAPE_SEL_EN BIT(25)
#define BIT_DPDT_WL_SEL BIT(24)
#define BIT_DPDT_SEL_EN BIT(23)
+#define BIT_GPIO13_14_WL_CTRL_EN BIT(22)
+#define BIT_LED2_SV BIT(19)
+#define BIT_LED2_CM GENMASK(18, 16)
+#define BIT_LED1_SV BIT(11)
+#define BIT_LED1_CM GENMASK(10, 8)
+#define BIT_LED0_SV BIT(3)
+#define BIT_LED0_CM GENMASK(2, 0)
+#define BIT_LED_MODE_SW_CTRL 0
+#define BIT_LED_MODE_RX 6
+#define BIT_LED_MODE_TX 4
+#define BIT_LED_MODE_TRX 2
#define REG_LEDCFG2 0x004E
+#define REG_GPIO_PIN_CTRL_2 0x0060
#define REG_PAD_CTRL1 0x0064
#define BIT_BT_BTG_SEL BIT(31)
#define BIT_PAPE_WLBT_SEL BIT(29)
@@ -871,7 +883,17 @@
#define REG_USB_MOD 0xf008
#define REG_USB3_RXITV 0xf050
+#define REG_USB2_PHY_ADR 0xfe40
+#define REG_USB2_PHY_DAT 0xfe41
+#define REG_USB2_PHY_CMD 0xfe42
+#define BIT_USB2_PHY_CMD_TRG 0x81
#define REG_USB_HRPWM 0xfe58
+#define REG_USB3_PHY_ADR 0xff0c
+#define REG_USB3_PHY_DAT_L 0xff0d
+#define REG_USB3_PHY_DAT_H 0xff0e
+#define BIT_USB3_PHY_ADR_WR BIT(7)
+#define BIT_USB3_PHY_ADR_RD BIT(6)
+#define BIT_USB3_PHY_ADR_MASK GENMASK(5, 0)
#define RF_MODE 0x00
#define RF_MODOPT 0x01
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
index a19b94d022ee..1d232adbdd7e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
@@ -903,7 +903,7 @@ static void rtw8703b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x0);
rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x0);
rtw_write32_mask(rtwdev, REG_OFDM0_TX_PSD_NOISE,
- GENMASK(31, 20), 0x0);
+ GENMASK(31, 30), 0x0);
rtw_write32(rtwdev, REG_BBRX_DFIR, 0x4A880000);
rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x19F60000);
break;
@@ -1198,9 +1198,9 @@ static u8 rtw8703b_iqk_rx_path(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
- rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8216000f);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8214030f);
rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000);
- rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x28110000);
+ rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000);
rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
/* LOK setting */
@@ -1372,7 +1372,7 @@ void rtw8703b_iqk_fill_a_matrix(struct rtw_dev *rtwdev, const s32 result[])
return;
tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_X, result[IQK_S1_RX_X]);
- tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_X]);
+ tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_Y]);
rtw_write32(rtwdev, REG_A_RXIQI, tmp_rx_iqi);
rtw_write32_mask(rtwdev, REG_RXIQK_MATRIX_LSB_11N, BIT_MASK_RXIQ_S1_Y2,
BIT_SET_RXIQ_S1_Y2(result[IQK_S1_RX_Y]));
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.h b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
index e93bfce994bf..a99af527c92c 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723x.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
@@ -47,7 +47,7 @@ struct rtw8723xe_efuse {
u8 device_id[2];
u8 sub_vendor_id[2];
u8 sub_device_id[2];
-};
+} __packed;
struct rtw8723xu_efuse {
u8 res4[48]; /* 0xd0 */
@@ -56,12 +56,12 @@ struct rtw8723xu_efuse {
u8 usb_option; /* 0x104 */
u8 res5[2]; /* 0x105 */
u8 mac_addr[ETH_ALEN]; /* 0x107 */
-};
+} __packed;
struct rtw8723xs_efuse {
u8 res4[0x4a]; /* 0xd0 */
u8 mac_addr[ETH_ALEN]; /* 0x11a */
-};
+} __packed;
struct rtw8723x_efuse {
__le16 rtl_id;
@@ -96,7 +96,7 @@ struct rtw8723x_efuse {
struct rtw8723xu_efuse u;
struct rtw8723xs_efuse s;
};
-};
+} __packed;
#define RTW8723X_IQK_ADDA_REG_NUM 16
#define RTW8723X_IQK_MAC8_REG_NUM 3
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a.c b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
index 482edd31823d..f9ba2aa2928a 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
@@ -868,6 +868,22 @@ static void rtw8812a_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
+static void rtw8812a_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u8 ledcfg;
+
+ ledcfg = rtw_read8(rtwdev, REG_LED_CFG);
+ ledcfg &= BIT(6) | BIT(4);
+ ledcfg |= BIT(5);
+
+ if (brightness == LED_OFF)
+ ledcfg |= BIT(3);
+
+ rtw_write8(rtwdev, REG_LED_CFG, ledcfg);
+}
+
static void rtw8812a_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -916,6 +932,7 @@ static const struct rtw_chip_ops rtw8812a_ops = {
.config_bfee = NULL,
.set_gid_table = NULL,
.cfg_csi_rate = NULL,
+ .led_set = rtw8812a_led_set,
.fill_txdesc_checksum = rtw8812a_fill_txdesc_checksum,
.coex_set_init = rtw8812a_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -985,6 +1002,9 @@ static const struct rtw_rfe_def rtw8812a_rfe_defs[] = {
[1] = { .phy_pg_tbl = &rtw8812a_bb_pg_tbl,
.txpwr_lmt_tbl = &rtw8812a_txpwr_lmt_tbl,
.pwr_track_tbl = &rtw8812a_rtw_pwr_track_tbl, },
+ [2] = { .phy_pg_tbl = &rtw8812a_bb_pg_tbl,
+ .txpwr_lmt_tbl = &rtw8812a_txpwr_lmt_tbl,
+ .pwr_track_tbl = &rtw8812a_rtw_pwr_track_tbl, },
[3] = { .phy_pg_tbl = &rtw8812a_bb_pg_rfe3_tbl,
.txpwr_lmt_tbl = &rtw8812a_txpwr_lmt_tbl,
.pwr_track_tbl = &rtw8812a_rtw_pwr_track_rfe3_tbl, },
@@ -1024,7 +1044,7 @@ const struct rtw_chip_info rtw8812a_hw_spec = {
.rx_buf_desc_sz = 8,
.phy_efuse_size = 512,
.log_efuse_size = 512,
- .ptct_efuse_size = 96 + 1, /* TODO or just 18? */
+ .ptct_efuse_size = 0,
.txff_size = 131072,
.rxff_size = 16128,
.rsvd_drv_pg_num = 9,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812au.c b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
index 4da69590a423..e18995f4cc78 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
@@ -9,8 +9,74 @@
#include "usb.h"
static const struct usb_device_id rtw_8812au_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff),
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0409, 0x0408, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* NEC */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x025d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Buffalo */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0952, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* I-O DATA */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1106, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1109, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x3426, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ZyXEL */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Logitec */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Abocom */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9051, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Netgear */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17d2, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ASUS */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0074, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Sitecom */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0022, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Hawking */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0632, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* WD */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13b1, 0x003f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Linksys */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x148f, 0x9097, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Amped Wireless */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1740, 0x0100, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* EnGenius */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3313, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3315, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3316, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab30, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Planex */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TRENDnet */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0101, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0103, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0122, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Tenda */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa822, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8812au_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a.c b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
index db242c9ad68f..f68239b07319 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
@@ -706,6 +706,31 @@ static void rtw8821a_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
+static void rtw8821a_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 gpio8_cfg;
+ u8 ledcfg;
+
+ if (brightness == LED_OFF) {
+ gpio8_cfg = rtw_read32(rtwdev, REG_GPIO_PIN_CTRL_2);
+ gpio8_cfg &= ~BIT(24);
+ gpio8_cfg |= BIT(16) | BIT(8);
+ rtw_write32(rtwdev, REG_GPIO_PIN_CTRL_2, gpio8_cfg);
+ } else {
+ ledcfg = rtw_read8(rtwdev, REG_LED_CFG + 2);
+ gpio8_cfg = rtw_read32(rtwdev, REG_GPIO_PIN_CTRL_2);
+
+ ledcfg &= BIT(7) | BIT(6);
+ rtw_write8(rtwdev, REG_LED_CFG + 2, ledcfg);
+
+ gpio8_cfg &= ~(BIT(24) | BIT(8));
+ gpio8_cfg |= BIT(16);
+ rtw_write32(rtwdev, REG_GPIO_PIN_CTRL_2, gpio8_cfg);
+ }
+}
+
static void rtw8821a_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -853,6 +878,7 @@ static const struct rtw_chip_ops rtw8821a_ops = {
.config_bfee = NULL,
.set_gid_table = NULL,
.cfg_csi_rate = NULL,
+ .led_set = rtw8821a_led_set,
.fill_txdesc_checksum = rtw8821a_fill_txdesc_checksum,
.coex_set_init = rtw8821a_coex_cfg_init,
.coex_set_ant_switch = rtw8821a_coex_cfg_ant_switch,
@@ -1118,7 +1144,7 @@ const struct rtw_chip_info rtw8821a_hw_spec = {
.rx_buf_desc_sz = 8,
.phy_efuse_size = 512,
.log_efuse_size = 512,
- .ptct_efuse_size = 96 + 1, /* TODO or just 18? */
+ .ptct_efuse_size = 0,
.txff_size = 65536,
.rxff_size = 16128,
.rsvd_drv_pg_num = 8,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821au.c b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
index 730018773e1c..a01744b64e8d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
@@ -9,8 +9,58 @@
#include "usb.h"
static const struct usb_device_id rtw_8821au_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff),
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0820, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0821, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8822, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0823, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xa811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x0242, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x029b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0953, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* I-O DATA */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4007, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9052, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Netgear */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0023, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* HAWKING */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3314, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3318, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab32, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Planex */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x804b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TRENDnet */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0120, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3823, 0x6249, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Obihai */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa813, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb611, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8821au_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 0270225b9c20..eb7e34c545d0 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -1206,6 +1206,24 @@ static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
dm_info->cck_pd_default + new_lvl * 2);
}
+static void rtw8821c_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 ledcfg;
+
+ ledcfg = rtw_read32(rtwdev, REG_LED_CFG);
+ u32p_replace_bits(&ledcfg, BIT_LED_MODE_SW_CTRL, BIT_LED2_CM);
+ ledcfg &= ~BIT_GPIO13_14_WL_CTRL_EN;
+
+ if (brightness == LED_OFF)
+ ledcfg |= BIT_LED2_SV;
+ else
+ ledcfg &= ~BIT_LED2_SV;
+
+ rtw_write32(rtwdev, REG_LED_CFG, ledcfg);
+}
+
static void rtw8821c_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -1655,6 +1673,7 @@ static const struct rtw_chip_ops rtw8821c_ops = {
.config_bfee = rtw8821c_bf_config_bfee,
.set_gid_table = rtw_bf_set_gid_table,
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
+ .led_set = rtw8821c_led_set,
.fill_txdesc_checksum = rtw8821c_fill_txdesc_checksum,
.coex_set_init = rtw8821c_coex_cfg_init,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
index 7a33ebd612ed..954e93c8020d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
@@ -27,7 +27,7 @@ struct rtw8821cu_efuse {
u8 res11[0xcf];
u8 package_type; /* 0x1fb */
u8 res12[0x4];
-};
+} __packed;
struct rtw8821ce_efuse {
u8 mac_addr[ETH_ALEN]; /* 0xd0 */
@@ -47,7 +47,8 @@ struct rtw8821ce_efuse {
u8 ltr_en:1;
u8 res1:2;
u8 obff:2;
- u8 res2:3;
+ u8 res2_1:1;
+ u8 res2_2:2;
u8 obff_cap:2;
u8 res3:4;
u8 res4[3];
@@ -63,7 +64,7 @@ struct rtw8821ce_efuse {
u8 res6:1;
u8 port_t_power_on_value:5;
u8 res7;
-};
+} __packed;
struct rtw8821cs_efuse {
u8 res4[0x4a]; /* 0xd0 */
@@ -101,7 +102,7 @@ struct rtw8821c_efuse {
struct rtw8821cu_efuse u;
struct rtw8821cs_efuse s;
};
-};
+} __packed;
static inline void
_rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 739809f4cab5..7f03903ddf4b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -1566,6 +1566,24 @@ static void rtw8822b_adaptivity(struct rtw_dev *rtwdev)
rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
}
+static void rtw8822b_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 ledcfg;
+
+ ledcfg = rtw_read32(rtwdev, REG_LED_CFG);
+ u32p_replace_bits(&ledcfg, BIT_LED_MODE_SW_CTRL, BIT_LED2_CM);
+ ledcfg &= ~BIT_GPIO13_14_WL_CTRL_EN;
+
+ if (brightness == LED_OFF)
+ ledcfg |= BIT_LED2_SV;
+ else
+ ledcfg &= ~BIT_LED2_SV;
+
+ rtw_write32(rtwdev, REG_LED_CFG, ledcfg);
+}
+
static void rtw8822b_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -2146,6 +2164,7 @@ static const struct rtw_chip_ops rtw8822b_ops = {
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
.adaptivity_init = rtw8822b_adaptivity_init,
.adaptivity = rtw8822b_adaptivity,
+ .led_set = rtw8822b_led_set,
.fill_txdesc_checksum = rtw8822b_fill_txdesc_checksum,
.coex_set_init = rtw8822b_coex_cfg_init,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 0514958fb57c..9fca9ba67c90 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -27,7 +27,7 @@ struct rtw8822bu_efuse {
u8 res11[0xcf];
u8 package_type; /* 0x1fb */
u8 res12[0x4];
-};
+} __packed;
struct rtw8822be_efuse {
u8 mac_addr[ETH_ALEN]; /* 0xd0 */
@@ -47,7 +47,8 @@ struct rtw8822be_efuse {
u8 ltr_en:1;
u8 res1:2;
u8 obff:2;
- u8 res2:3;
+ u8 res2_1:1;
+ u8 res2_2:2;
u8 obff_cap:2;
u8 res3:4;
u8 res4[3];
@@ -63,7 +64,7 @@ struct rtw8822be_efuse {
u8 res6:1;
u8 port_t_power_on_value:5;
u8 res7;
-};
+} __packed;
struct rtw8822bs_efuse {
u8 res4[0x4a]; /* 0xd0 */
@@ -103,7 +104,7 @@ struct rtw8822b_efuse {
struct rtw8822bu_efuse u;
struct rtw8822bs_efuse s;
};
-};
+} __packed;
static inline void
_rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
index ab620a0b1dfc..8883300fc6ad 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
@@ -67,6 +67,12 @@ static const struct usb_device_id rtw_8822bu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* LiteOn */
{ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x808a, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-808UBM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-805UBH */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4011, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ELECOM WDB-867DU3S */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0107, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30H */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index af6b76937f1d..ec362a817f5f 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -4537,6 +4537,24 @@ static void rtw8822c_adaptivity(struct rtw_dev *rtwdev)
rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
}
+static void rtw8822c_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 ledcfg;
+
+ ledcfg = rtw_read32(rtwdev, REG_LED_CFG);
+ u32p_replace_bits(&ledcfg, BIT_LED_MODE_SW_CTRL, BIT_LED2_CM);
+ ledcfg &= ~BIT_GPIO13_14_WL_CTRL_EN;
+
+ if (brightness == LED_OFF)
+ ledcfg |= BIT_LED2_SV;
+ else
+ ledcfg &= ~BIT_LED2_SV;
+
+ rtw_write32(rtwdev, REG_LED_CFG, ledcfg);
+}
+
static void rtw8822c_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -4964,6 +4982,7 @@ static const struct rtw_chip_ops rtw8822c_ops = {
.cfo_track = rtw8822c_cfo_track,
.config_tx_path = rtw8822c_config_tx_path,
.config_txrx_mode = rtw8822c_config_trx_mode,
+ .led_set = rtw8822c_led_set,
.fill_txdesc_checksum = rtw8822c_fill_txdesc_checksum,
.coex_set_init = rtw8822c_coex_cfg_init,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index e2b383d633cd..fc62b67a15f2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -14,7 +14,7 @@ struct rtw8822cu_efuse {
u8 res1[3];
u8 mac_addr[ETH_ALEN]; /* 0x157 */
u8 res2[0x3d];
-};
+} __packed;
struct rtw8822cs_efuse {
u8 res0[0x4a]; /* 0x120 */
@@ -39,7 +39,8 @@ struct rtw8822ce_efuse {
u8 ltr_en:1;
u8 res1:2;
u8 obff:2;
- u8 res2:3;
+ u8 res2_1:1;
+ u8 res2_2:2;
u8 obff_cap:2;
u8 res3:4;
u8 class_code[3];
@@ -55,7 +56,7 @@ struct rtw8822ce_efuse {
u8 res6:1;
u8 port_t_power_on_value:5;
u8 res7;
-};
+} __packed;
struct rtw8822c_efuse {
__le16 rtl_id;
@@ -102,7 +103,7 @@ struct rtw8822c_efuse {
struct rtw8822cu_efuse u;
struct rtw8822cs_efuse s;
};
-};
+} __packed;
enum rtw8822c_dpk_agc_phase {
RTW_DPK_GAIN_CHECK,
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 799230eb5f16..e024061bdbf7 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -1192,6 +1192,8 @@ static void rtw_sdio_indicate_tx_status(struct rtw_dev *rtwdev,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hw *hw = rtwdev->hw;
+ skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
+
/* enqueue to wait for tx report */
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index 8d6db68246f1..c4908db4ff0e 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -7,6 +7,7 @@
#include <linux/mutex.h>
#include "main.h"
#include "debug.h"
+#include "mac.h"
#include "reg.h"
#include "tx.h"
#include "rx.h"
@@ -547,49 +548,58 @@ static void rtw_usb_rx_handler(struct work_struct *work)
{
struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_work);
struct rtw_dev *rtwdev = rtwusb->rtwdev;
- const struct rtw_chip_info *chip = rtwdev->chip;
- u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
struct ieee80211_rx_status rx_status;
- u32 pkt_offset, next_pkt, urb_len;
struct rtw_rx_pkt_stat pkt_stat;
- struct sk_buff *next_skb;
+ struct sk_buff *rx_skb;
struct sk_buff *skb;
+ u32 pkt_desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u32 max_skb_len = pkt_desc_sz + PHY_STATUS_SIZE * 8 +
+ IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ u32 pkt_offset, next_pkt, skb_len;
u8 *rx_desc;
int limit;
for (limit = 0; limit < 200; limit++) {
- skb = skb_dequeue(&rtwusb->rx_queue);
- if (!skb)
+ rx_skb = skb_dequeue(&rtwusb->rx_queue);
+ if (!rx_skb)
break;
if (skb_queue_len(&rtwusb->rx_queue) >= RTW_USB_MAX_RXQ_LEN) {
dev_dbg_ratelimited(rtwdev->dev, "failed to get rx_queue, overflow\n");
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(rx_skb);
continue;
}
- urb_len = skb->len;
+ rx_desc = rx_skb->data;
do {
- rx_desc = skb->data;
rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat,
&rx_status);
pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
pkt_stat.shift;
- next_pkt = round_up(pkt_stat.pkt_len + pkt_offset, 8);
+ skb_len = pkt_stat.pkt_len + pkt_offset;
+ if (skb_len > max_skb_len) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "skipping too big packet: %u\n",
+ skb_len);
+ goto skip_packet;
+ }
- if (urb_len >= next_pkt + pkt_desc_sz)
- next_skb = skb_clone(skb, GFP_KERNEL);
- else
- next_skb = NULL;
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (!skb) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "failed to allocate RX skb of size %u\n",
+ skb_len);
+ goto skip_packet;
+ }
+
+ skb_put_data(skb, rx_desc, skb_len);
if (pkt_stat.is_c2h) {
- skb_trim(skb, pkt_stat.pkt_len + pkt_offset);
rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb);
} else {
skb_pull(skb, pkt_offset);
- skb_trim(skb, pkt_stat.pkt_len);
rtw_update_rx_freq_for_invalid(rtwdev, skb,
&rx_status,
&pkt_stat);
@@ -598,37 +608,75 @@ static void rtw_usb_rx_handler(struct work_struct *work)
ieee80211_rx_irqsafe(rtwdev->hw, skb);
}
- skb = next_skb;
- if (skb)
- skb_pull(skb, next_pkt);
+skip_packet:
+ next_pkt = round_up(skb_len, 8);
+ rx_desc += next_pkt;
+ } while (rx_desc + pkt_desc_sz < rx_skb->data + rx_skb->len);
- urb_len -= next_pkt;
- } while (skb);
+ if (skb_queue_len(&rtwusb->rx_free_queue) >= RTW_USB_RX_SKB_NUM)
+ dev_kfree_skb_any(rx_skb);
+ else
+ skb_queue_tail(&rtwusb->rx_free_queue, rx_skb);
}
}
static void rtw_usb_read_port_complete(struct urb *urb);
-static void rtw_usb_rx_resubmit(struct rtw_usb *rtwusb, struct rx_usb_ctrl_block *rxcb)
+static void rtw_usb_rx_resubmit(struct rtw_usb *rtwusb,
+ struct rx_usb_ctrl_block *rxcb,
+ gfp_t gfp)
{
struct rtw_dev *rtwdev = rtwusb->rtwdev;
+ struct sk_buff *rx_skb;
int error;
- rxcb->rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, GFP_ATOMIC);
- if (!rxcb->rx_skb)
- return;
+ rx_skb = skb_dequeue(&rtwusb->rx_free_queue);
+ if (!rx_skb)
+ rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, gfp);
+
+ if (!rx_skb)
+ goto try_later;
+
+ skb_reset_tail_pointer(rx_skb);
+ rx_skb->len = 0;
+
+ rxcb->rx_skb = rx_skb;
usb_fill_bulk_urb(rxcb->rx_urb, rtwusb->udev,
usb_rcvbulkpipe(rtwusb->udev, rtwusb->pipe_in),
rxcb->rx_skb->data, RTW_USB_MAX_RECVBUF_SZ,
rtw_usb_read_port_complete, rxcb);
- error = usb_submit_urb(rxcb->rx_urb, GFP_ATOMIC);
+ error = usb_submit_urb(rxcb->rx_urb, gfp);
if (error) {
- kfree_skb(rxcb->rx_skb);
+ skb_queue_tail(&rtwusb->rx_free_queue, rxcb->rx_skb);
+
if (error != -ENODEV)
rtw_err(rtwdev, "Err sending rx data urb %d\n",
error);
+
+ if (error == -ENOMEM)
+ goto try_later;
+ }
+
+ return;
+
+try_later:
+ rxcb->rx_skb = NULL;
+ queue_work(rtwusb->rxwq, &rtwusb->rx_urb_work);
+}
+
+static void rtw_usb_rx_resubmit_work(struct work_struct *work)
+{
+ struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_urb_work);
+ struct rx_usb_ctrl_block *rxcb;
+ int i;
+
+ for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+
+ if (!rxcb->rx_skb)
+ rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC);
}
}
@@ -644,15 +692,16 @@ static void rtw_usb_read_port_complete(struct urb *urb)
urb->actual_length < 24) {
rtw_err(rtwdev, "failed to get urb length:%d\n",
urb->actual_length);
- if (skb)
- dev_kfree_skb_any(skb);
+ skb_queue_tail(&rtwusb->rx_free_queue, skb);
} else {
skb_put(skb, urb->actual_length);
skb_queue_tail(&rtwusb->rx_queue, skb);
queue_work(rtwusb->rxwq, &rtwusb->rx_work);
}
- rtw_usb_rx_resubmit(rtwusb, rxcb);
+ rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC);
} else {
+ skb_queue_tail(&rtwusb->rx_free_queue, skb);
+
switch (urb->status) {
case -EINVAL:
case -EPIPE:
@@ -670,8 +719,6 @@ static void rtw_usb_read_port_complete(struct urb *urb)
rtw_err(rtwdev, "status %d\n", urb->status);
break;
}
- if (skb)
- dev_kfree_skb_any(skb);
}
}
@@ -789,6 +836,30 @@ static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable)
rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
}
+static void rtw_usb_dynamic_rx_agg_v2(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ u8 size, timeout;
+ u16 val16;
+
+ if (!enable) {
+ size = 0x0;
+ timeout = 0x1;
+ } else if (rtwusb->udev->speed == USB_SPEED_SUPER) {
+ size = 0x6;
+ timeout = 0x1a;
+ } else {
+ size = 0x5;
+ timeout = 0x20;
+ }
+
+ val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) |
+ u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1);
+
+ rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
+ rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
+}
+
static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
{
switch (rtwdev->chip->id) {
@@ -797,6 +868,10 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
case RTW_CHIP_TYPE_8821C:
rtw_usb_dynamic_rx_agg_v1(rtwdev, enable);
break;
+ case RTW_CHIP_TYPE_8821A:
+ case RTW_CHIP_TYPE_8812A:
+ rtw_usb_dynamic_rx_agg_v2(rtwdev, enable);
+ break;
case RTW_CHIP_TYPE_8723D:
/* Doesn't like aggregation. */
break;
@@ -831,16 +906,26 @@ static struct rtw_hci_ops rtw_usb_ops = {
static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct sk_buff *rx_skb;
+ int i;
- rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq");
+ rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH, 0);
if (!rtwusb->rxwq) {
rtw_err(rtwdev, "failed to create RX work queue\n");
return -ENOMEM;
}
skb_queue_head_init(&rtwusb->rx_queue);
+ skb_queue_head_init(&rtwusb->rx_free_queue);
INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler);
+ INIT_WORK(&rtwusb->rx_urb_work, rtw_usb_rx_resubmit_work);
+
+ for (i = 0; i < RTW_USB_RX_SKB_NUM; i++) {
+ rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, GFP_KERNEL);
+ if (rx_skb)
+ skb_queue_tail(&rtwusb->rx_free_queue, rx_skb);
+ }
return 0;
}
@@ -853,7 +938,7 @@ static void rtw_usb_setup_rx(struct rtw_dev *rtwdev)
for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i];
- rtw_usb_rx_resubmit(rtwusb, rxcb);
+ rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_KERNEL);
}
}
@@ -865,6 +950,8 @@ static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev)
flush_workqueue(rtwusb->rxwq);
destroy_workqueue(rtwusb->rxwq);
+
+ skb_queue_purge(&rtwusb->rx_free_queue);
}
static int rtw_usb_init_tx(struct rtw_dev *rtwdev)
@@ -930,6 +1017,32 @@ static void rtw_usb_intf_deinit(struct rtw_dev *rtwdev,
usb_set_intfdata(intf, NULL);
}
+static int rtw_usb_switch_mode_old(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ enum usb_device_speed cur_speed = rtwusb->udev->speed;
+ u8 hci_opt;
+
+ if (cur_speed == USB_SPEED_HIGH) {
+ hci_opt = rtw_read8(rtwdev, REG_HCI_OPT_CTRL);
+
+ if ((hci_opt & (BIT(2) | BIT(3))) != BIT(3)) {
+ rtw_write8(rtwdev, REG_HCI_OPT_CTRL, 0x8);
+ rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x2);
+ rtw_write8(rtwdev, REG_ACLK_MON, 0x1);
+ rtw_write8(rtwdev, 0x3d, 0x3);
+ /* usb disconnect */
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL + 1, 0x80);
+ return 1;
+ }
+ } else if (cur_speed == USB_SPEED_SUPER) {
+ rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT(1));
+ rtw_write8_clr(rtwdev, REG_ACLK_MON, BIT(0));
+ }
+
+ return 0;
+}
+
static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
{
enum usb_device_speed cur_speed;
@@ -979,11 +1092,22 @@ static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
return 1;
}
+static bool rtw_usb3_chip_old(u8 chip_id)
+{
+ return chip_id == RTW_CHIP_TYPE_8812A;
+}
+
+static bool rtw_usb3_chip_new(u8 chip_id)
+{
+ return chip_id == RTW_CHIP_TYPE_8822C ||
+ chip_id == RTW_CHIP_TYPE_8822B;
+}
+
static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
{
u8 id = rtwdev->chip->id;
- if (id != RTW_CHIP_TYPE_8822C && id != RTW_CHIP_TYPE_8822B)
+ if (!rtw_usb3_chip_new(id) && !rtw_usb3_chip_old(id))
return 0;
if (!rtwdev->efuse.usb_mode_switch) {
@@ -998,7 +1122,75 @@ static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
return 0;
}
- return rtw_usb_switch_mode_new(rtwdev);
+ if (rtw_usb3_chip_old(id))
+ return rtw_usb_switch_mode_old(rtwdev);
+ else
+ return rtw_usb_switch_mode_new(rtwdev);
+}
+
+#define USB_REG_PAGE 0xf4
+#define USB_PHY_PAGE0 0x9b
+#define USB_PHY_PAGE1 0xbb
+
+static void rtw_usb_phy_write(struct rtw_dev *rtwdev, u8 addr, u16 data,
+ enum usb_device_speed speed)
+{
+ if (speed == USB_SPEED_SUPER) {
+ rtw_write8(rtwdev, REG_USB3_PHY_DAT_L, data & 0xff);
+ rtw_write8(rtwdev, REG_USB3_PHY_DAT_H, data >> 8);
+ rtw_write8(rtwdev, REG_USB3_PHY_ADR, addr | BIT_USB3_PHY_ADR_WR);
+ } else if (speed == USB_SPEED_HIGH) {
+ rtw_write8(rtwdev, REG_USB2_PHY_DAT, data);
+ rtw_write8(rtwdev, REG_USB2_PHY_ADR, addr);
+ rtw_write8(rtwdev, REG_USB2_PHY_CMD, BIT_USB2_PHY_CMD_TRG);
+ }
+}
+
+static void rtw_usb_page_switch(struct rtw_dev *rtwdev,
+ enum usb_device_speed speed, u8 page)
+{
+ if (speed == USB_SPEED_SUPER)
+ return;
+
+ rtw_usb_phy_write(rtwdev, USB_REG_PAGE, page, speed);
+}
+
+static void rtw_usb_phy_cfg(struct rtw_dev *rtwdev,
+ enum usb_device_speed speed)
+{
+ const struct rtw_intf_phy_para *para = NULL;
+ u16 offset;
+
+ if (!rtwdev->chip->intf_table)
+ return;
+
+ if (speed == USB_SPEED_SUPER)
+ para = rtwdev->chip->intf_table->usb3_para;
+ else if (speed == USB_SPEED_HIGH)
+ para = rtwdev->chip->intf_table->usb2_para;
+
+ if (!para)
+ return;
+
+ for ( ; para->offset != 0xffff; para++) {
+ if (!(para->cut_mask & BIT(rtwdev->hal.cut_version)))
+ continue;
+
+ offset = para->offset;
+
+ if (para->ip_sel == RTW_IP_SEL_MAC) {
+ rtw_write8(rtwdev, offset, para->value);
+ } else {
+ if (offset > 0x100)
+ rtw_usb_page_switch(rtwdev, speed, USB_PHY_PAGE1);
+ else
+ rtw_usb_page_switch(rtwdev, speed, USB_PHY_PAGE0);
+
+ offset &= 0xff;
+
+ rtw_usb_phy_write(rtwdev, offset, para->value, speed);
+ }
+ }
}
int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -1056,6 +1248,9 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err_destroy_rxwq;
}
+ rtw_usb_phy_cfg(rtwdev, USB_SPEED_HIGH);
+ rtw_usb_phy_cfg(rtwdev, USB_SPEED_SUPER);
+
ret = rtw_usb_switch_mode(rtwdev);
if (ret) {
/* Not a fail, but we do need to skip rtw_register_hw. */
diff --git a/drivers/net/wireless/realtek/rtw88/usb.h b/drivers/net/wireless/realtek/rtw88/usb.h
index 86697a5c0103..9b695b688b24 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.h
+++ b/drivers/net/wireless/realtek/rtw88/usb.h
@@ -38,6 +38,7 @@
#define RTW_USB_RXAGG_TIMEOUT 10
#define RTW_USB_RXCB_NUM 4
+#define RTW_USB_RX_SKB_NUM 8
#define RTW_USB_EP_MAX 4
@@ -81,7 +82,9 @@ struct rtw_usb {
struct rx_usb_ctrl_block rx_cb[RTW_USB_RXCB_NUM];
struct sk_buff_head rx_queue;
+ struct sk_buff_head rx_free_queue;
struct work_struct rx_work;
+ struct work_struct rx_urb_work;
};
static inline struct rtw_usb_tx_data *rtw_usb_get_tx_data(struct sk_buff *skb)
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index d2a3361669d7..b1c86cdd9c0e 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -96,17 +96,19 @@ config RTW89_8852CE
802.11ax PCIe wireless network (Wi-Fi 6E) adapter
config RTW89_8922AE
- tristate "Realtek 8922AE PCI wireless network (Wi-Fi 7) adapter"
+ tristate "Realtek 8922AE/8922AE-VS PCI wireless network (Wi-Fi 7) adapter"
depends on PCI
select RTW89_CORE
select RTW89_PCI
select RTW89_8922A
help
- Select this option will enable support for 8922AE chipset
+ Select this option will enable support for 8922AE/8922AE-VS chipset
802.11be PCIe wireless network (Wi-Fi 7) adapter
supporting 2x2 2GHz/5GHz/6GHz 4096-QAM 160MHz channels.
+ The variant 8922AE-VS has the same features except 1024-QAM.
+
config RTW89_DEBUG
bool
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c
index 908e980a4b72..f5dedb12c129 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.c
+++ b/drivers/net/wireless/realtek/rtw89/acpi.c
@@ -148,3 +148,50 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
ACPI_FREE(obj);
return ret;
}
+
+int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
+ struct rtw89_acpi_rtag_result *res)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_handle root, handle;
+ union acpi_object *obj;
+ acpi_status status;
+ u32 buf_len;
+ int ret = 0;
+
+ root = ACPI_HANDLE(rtwdev->dev);
+ if (!root)
+ return -EOPNOTSUPP;
+
+ status = acpi_get_handle(root, (acpi_string)"RTAG", &handle);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = buf.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf_len = obj->buffer.length;
+ if (buf_len != sizeof(*res)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *res = *(struct rtw89_acpi_rtag_result *)obj->buffer.pointer;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res));
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h
index d274be1775bf..b43ab106e44d 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.h
+++ b/drivers/net/wireless/realtek/rtw89/acpi.h
@@ -63,8 +63,17 @@ struct rtw89_acpi_dsm_result {
} u;
};
+struct rtw89_acpi_rtag_result {
+ u8 tag[4];
+ u8 revision;
+ __le32 domain;
+ u8 ant_gain_table[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
+} __packed;
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res);
+int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
+ struct rtw89_acpi_rtag_result *res);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 8ef59994c0db..8fa1e6c1ce13 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -135,8 +135,8 @@ again:
}
static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
- struct rtw89_sec_cam_entry *sec_cam,
- struct ieee80211_key_conf *key,
+ const struct rtw89_sec_cam_entry *sec_cam,
+ const struct ieee80211_key_conf *key,
u8 *key_idx)
{
u8 idx;
@@ -246,8 +246,8 @@ static int __rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
- struct ieee80211_key_conf *key,
- struct rtw89_sec_cam_entry *sec_cam)
+ const struct ieee80211_key_conf *key,
+ const struct rtw89_sec_cam_entry *sec_cam)
{
struct rtw89_addr_cam_entry *addr_cam;
u8 key_idx = 0;
@@ -286,6 +286,22 @@ static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return 0;
}
+int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ u8 sec_cam_idx)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ const struct rtw89_sec_cam_entry *sec_cam;
+
+ sec_cam = cam_info->sec_entries[sec_cam_idx];
+ if (!sec_cam)
+ return -ENOENT;
+
+ return __rtw89_cam_attach_sec_cam(rtwdev, rtwvif_link, rtwsta_link,
+ sec_cam->key_conf, sec_cam);
+}
+
static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -306,6 +322,9 @@ static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
rtwvif = vif_to_rtwvif(vif);
+ if (rtwsta)
+ clear_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map);
+
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
rtwsta_link = rtwsta ? rtwsta->links[link_id] : NULL;
if (rtwsta && !rtwsta_link)
@@ -369,6 +388,8 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return ret;
}
+ set_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map);
+
return 0;
}
@@ -410,6 +431,9 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
sec_cam->len = RTW89_SEC_CAM_LEN;
sec_cam->ext_key = ext_key;
memcpy(sec_cam->key, key->key, key->keylen);
+
+ sec_cam->key_conf = key;
+
ret = rtw89_cam_send_sec_key_cmd(rtwdev, sec_cam);
if (ret) {
rtw89_err(rtwdev, "failed to send sec key cmd: %d\n", ret);
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 3134ebf08825..8fd2d776408e 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -578,4 +578,9 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev);
+int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ u8 sec_cam_idx);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index fb9449930c40..4df4e04c3e67 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -391,11 +391,12 @@ static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev)
list_del(&role->mgnt_entry);
list_add(&role->mgnt_entry, &mgnt->active_list);
- break;
+ goto fill;
}
}
}
+fill:
list_for_each_entry(role, &mgnt->active_list, mgnt_entry) {
if (unlikely(pos >= RTW89_MAX_INTERFACE_NUM)) {
rtw89_warn(rtwdev,
@@ -801,7 +802,7 @@ fill:
mcc_role->limit.max_toa = max_toa_us / 1024;
mcc_role->limit.max_tob = max_tob_us / 1024;
- mcc_role->limit.max_dur = max_dur_us / 1024;
+ mcc_role->limit.max_dur = mcc_role->limit.max_toa + mcc_role->limit.max_tob;
mcc_role->limit.enable = true;
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
@@ -2530,7 +2531,25 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
hal->entity_pause = true;
}
-void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
+static void rtw89_chanctx_proceed_cb(struct rtw89_dev *rtwdev,
+ const struct rtw89_chanctx_cb_parm *parm)
+{
+ int ret;
+
+ if (!parm || !parm->cb)
+ return;
+
+ ret = parm->cb(rtwdev, parm->data);
+ if (ret)
+ rtw89_warn(rtwdev, "%s (%s): cb failed: %d\n", __func__,
+ parm->caller ?: "unknown", ret);
+}
+
+/* pass @cb_parm if there is a @cb_parm->cb which needs to invoke right after
+ * call rtw89_set_channel() and right before proceed entity according to mode.
+ */
+void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
+ const struct rtw89_chanctx_cb_parm *cb_parm)
{
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_entity_mode mode;
@@ -2538,14 +2557,18 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
lockdep_assert_held(&rtwdev->mutex);
- if (!hal->entity_pause)
+ if (unlikely(!hal->entity_pause)) {
+ rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
return;
+ }
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx proceed\n");
hal->entity_pause = false;
rtw89_set_channel(rtwdev);
+ rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
+
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
case RTW89_ENTITY_MODE_MCC:
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 2eb31dff2083..092a6f676894 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -38,6 +38,12 @@ enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_ROC,
};
+struct rtw89_chanctx_cb_parm {
+ int (*cb)(struct rtw89_dev *rtwdev, void *data);
+ void *data;
+ const char *caller;
+};
+
struct rtw89_entity_weight {
unsigned int active_chanctxs;
unsigned int active_roles;
@@ -100,7 +106,8 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
void rtw89_chanctx_track(struct rtw89_dev *rtwdev);
void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_pause_reasons rsn);
-void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev);
+void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
+ const struct rtw89_chanctx_cb_parm *cb_parm);
const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
const char *caller_message,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index e5b2968c1431..85f739f1173d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -203,6 +203,55 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
},
};
+#define RTW89_6GHZ_SPAN_HEAD 6145
+#define RTW89_6GHZ_SPAN_IDX(center_freq) \
+ ((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2)
+
+#define RTW89_DECL_6GHZ_SPAN(center_freq, subband_l, subband_h) \
+ [RTW89_6GHZ_SPAN_IDX(center_freq)] = { \
+ .sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
+ .sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
+ .ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \
+ .ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \
+ }
+
+/* Since 6GHz subbands are not edge aligned, some cases span two subbands.
+ * In the following, we describe each of them with rtw89_6ghz_span.
+ */
+static const struct rtw89_6ghz_span rtw89_overlapping_6ghz[] = {
+ RTW89_DECL_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
+};
+
+const struct rtw89_6ghz_span *
+rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq)
+{
+ int idx;
+
+ if (center_freq >= RTW89_6GHZ_SPAN_HEAD) {
+ idx = RTW89_6GHZ_SPAN_IDX(center_freq);
+ /* To decrease size of rtw89_overlapping_6ghz[],
+ * RTW89_6GHZ_SPAN_IDX() truncates the leading NULLs
+ * to make first span as index 0 of the table. So, if center
+ * frequency is less than the first one, it will get netative.
+ */
+ if (idx >= 0 && idx < ARRAY_SIZE(rtw89_overlapping_6ghz))
+ return &rtw89_overlapping_6ghz[idx];
+ }
+
+ return NULL;
+}
+
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
{
struct ieee80211_rate rate;
@@ -931,6 +980,11 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
bool is_bmc;
u16 seq;
+ if (tx_req->sta)
+ desc_info->mlo = tx_req->sta->mlo;
+ else if (tx_req->vif)
+ desc_info->mlo = ieee80211_vif_is_mld(tx_req->vif);
+
seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) {
tx_type = rtw89_core_get_tx_type(rtwdev, skb);
@@ -938,7 +992,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
addr_cam = rtw89_get_addr_cam_of(tx_req->rtwvif_link,
tx_req->rtwsta_link);
- if (addr_cam->valid)
+ if (addr_cam->valid && desc_info->mlo)
upd_wlan_hdr = true;
}
is_bmc = (is_broadcast_ether_addr(hdr->addr1) ||
@@ -1078,6 +1132,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
}
tx_req.skb = skb;
+ tx_req.vif = vif;
+ tx_req.sta = sta;
tx_req.rtwvif_link = rtwvif_link;
tx_req.rtwsta_link = rtwsta_link;
@@ -2140,6 +2196,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
if (phy_ppdu)
ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
+
+ pkt_stat->beacon_rate = desc_info->data_rate;
}
if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
@@ -2317,6 +2375,12 @@ static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev,
}
}
+static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status)
+{
+ if (!rx_status->signal)
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+}
+
static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -2333,6 +2397,8 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
+ rtw89_core_validate_rx_signal(rx_status);
+
/* In low power mode, it does RX in thread context. */
local_bh_disable();
ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi);
@@ -2468,6 +2534,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
u8 *data, u32 data_offset)
{
+ struct rtw89_rxdesc_phy_rpt_v2 *rxd_rpt;
struct rtw89_rxdesc_short_v2 *rxd_s;
struct rtw89_rxdesc_long_v2 *rxd_l;
u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len;
@@ -2515,6 +2582,12 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2);
desc_info->ready = true;
+ if (phy_rtp_len == sizeof(*rxd_rpt)) {
+ rxd_rpt = (struct rtw89_rxdesc_phy_rpt_v2 *)(data + data_offset +
+ desc_info->rxd_len);
+ desc_info->rssi = le32_get_bits(rxd_rpt->dword0, BE_RXD_PHY_RSSI);
+ }
+
if (!desc_info->long_rxdesc)
return;
@@ -2657,6 +2730,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
rx_status->flag |= RX_FLAG_MACTIME_START;
rx_status->mactime = desc_info->free_run_cnt;
+ rtw89_chip_phy_rpt_to_rssi(rtwdev, desc_info, rx_status);
rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status);
}
@@ -2664,10 +2738,6 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- /* FIXME: Fix __rtw89_enter_ps_mode() to consider MLO cases. */
- if (rtwdev->support_mlo)
- return RTW89_PS_MODE_NONE;
-
if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
@@ -2700,6 +2770,41 @@ static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
}
}
+static
+void rtw89_core_rx_pkt_hdl(struct rtw89_dev *rtwdev, const struct sk_buff *skb,
+ const struct rtw89_rx_desc_info *desc)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct rtw89_sta_link *rtwsta_link;
+ struct ieee80211_sta *sta;
+ struct rtw89_sta *rtwsta;
+ u8 macid = desc->mac_id;
+
+ if (!refcount_read(&rtwdev->refcount_ap_info))
+ return;
+
+ rcu_read_lock();
+
+ rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid);
+ if (!rtwsta_link)
+ goto out;
+
+ rtwsta = rtwsta_link->rtwsta;
+ if (!test_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags))
+ goto out;
+
+ sta = rtwsta_to_sta(rtwsta);
+ if (ieee80211_is_pspoll(hdr->frame_control))
+ ieee80211_sta_pspoll(sta);
+ else if (ieee80211_has_pm(hdr->frame_control) &&
+ (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)))
+ ieee80211_sta_uapsd_trigger(sta, ieee80211_get_tid(hdr));
+
+out:
+ rcu_read_unlock();
+}
+
void rtw89_core_rx(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct sk_buff *skb)
@@ -2722,6 +2827,7 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev,
rx_status = IEEE80211_SKB_RXCB(skb);
memset(rx_status, 0, sizeof(*rx_status));
rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
+ rtw89_core_rx_pkt_hdl(rtwdev, skb, desc_info);
if (desc_info->long_rxdesc &&
BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
@@ -3131,6 +3237,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool qos, bool ps)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
@@ -3146,7 +3253,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
goto out;
}
- skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, qos);
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos);
if (!skb) {
ret = -ENOMEM;
goto out;
@@ -3257,7 +3364,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
roc->state = RTW89_ROC_IDLE;
rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, NULL);
- rtw89_chanctx_proceed(rtwdev);
+ rtw89_chanctx_proceed(rtwdev, NULL);
ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false);
if (ret)
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
@@ -3367,21 +3474,10 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
return tfc_changed;
}
-static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
-{
- if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION &&
- rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
- return;
-
- rtw89_enter_lps(rtwdev, rtwvif_link, true);
-}
-
static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
{
- struct rtw89_vif_link *rtwvif_link;
+ struct ieee80211_vif *vif;
struct rtw89_vif *rtwvif;
- unsigned int link_id;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
if (rtwvif->tdls_peer)
@@ -3393,8 +3489,13 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
rtwvif->stats.rx_tfc_lv != RTW89_TFC_IDLE)
continue;
- rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- rtw89_vif_enter_lps(rtwdev, rtwvif_link);
+ vif = rtwvif_to_vif(rtwvif);
+
+ if (!(vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_P2P_CLIENT))
+ continue;
+
+ rtw89_enter_lps(rtwdev, rtwvif, true);
}
}
@@ -3699,6 +3800,8 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
{
const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ rtw89_assoc_link_clr(rtwsta_link);
+
if (vif->type == NL80211_IFTYPE_STATION)
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
@@ -3748,6 +3851,22 @@ int rtw89_core_sta_link_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
+static bool rtw89_sta_link_can_er(struct rtw89_dev *rtwdev,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ieee80211_link_sta *link_sta)
+{
+ if (!bss_conf->he_support ||
+ bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE)
+ return false;
+
+ if (rtwdev->chip->chip_id == RTL8852C &&
+ rtw89_sta_link_has_su_mu_4xhe08(link_sta) &&
+ !rtw89_sta_link_has_er_su_4xhe08(link_sta))
+ return false;
+
+ return true;
+}
+
int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link)
@@ -3758,12 +3877,11 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rtwsta_link);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
+ struct ieee80211_link_sta *link_sta;
int ret;
if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
if (sta->tdls) {
- struct ieee80211_link_sta *link_sta;
-
rcu_read_lock();
link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
@@ -3814,9 +3932,8 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rcu_read_lock();
bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
- if (bss_conf->he_support &&
- !(bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE))
- rtwsta_link->er_cap = true;
+ link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
+ rtwsta_link->er_cap = rtw89_sta_link_can_er(rtwdev, bss_conf, link_sta);
rcu_read_unlock();
@@ -3834,6 +3951,7 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
}
+ rtw89_assoc_link_set(rtwsta_link);
return ret;
}
@@ -4117,13 +4235,17 @@ static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
struct ieee80211_eht_mcs_nss_supp *eht_nss;
struct ieee80211_sta_eht_cap *eht_cap;
struct rtw89_hal *hal = &rtwdev->hal;
+ bool support_mcs_12_13 = true;
bool support_320mhz = false;
+ u8 val, val_mcs13;
int sts = 8;
- u8 val;
if (chip->chip_gen == RTW89_CHIP_AX)
return;
+ if (hal->no_mcs_12_13)
+ support_mcs_12_13 = false;
+
if (band == NL80211_BAND_6GHZ &&
chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_320))
support_320mhz = true;
@@ -4181,16 +4303,18 @@ static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
val = u8_encode_bits(hal->rx_nss, IEEE80211_EHT_MCS_NSS_RX) |
u8_encode_bits(hal->tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ val_mcs13 = support_mcs_12_13 ? val : 0;
+
eht_nss->bw._80.rx_tx_mcs9_max_nss = val;
eht_nss->bw._80.rx_tx_mcs11_max_nss = val;
- eht_nss->bw._80.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs13_max_nss = val_mcs13;
eht_nss->bw._160.rx_tx_mcs9_max_nss = val;
eht_nss->bw._160.rx_tx_mcs11_max_nss = val;
- eht_nss->bw._160.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs13_max_nss = val_mcs13;
if (support_320mhz) {
eht_nss->bw._320.rx_tx_mcs9_max_nss = val;
eht_nss->bw._320.rx_tx_mcs11_max_nss = val;
- eht_nss->bw._320.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs13_max_nss = val_mcs13;
}
}
@@ -4433,6 +4557,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_phy_dm_init(rtwdev);
rtw89_mac_cfg_ppdu_status_bands(rtwdev, true);
+ rtw89_mac_cfg_phy_rpt_bands(rtwdev, true);
rtw89_mac_update_rts_threshold(rtwdev);
rtw89_tas_reset(rtwdev);
@@ -4755,6 +4880,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_ser_init(rtwdev);
rtw89_entity_init(rtwdev);
rtw89_tas_init(rtwdev);
+ rtw89_phy_ant_gain_init(rtwdev);
return 0;
}
@@ -5100,6 +5226,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
+ ieee80211_hw_set(hw, AP_LINK_PS);
+
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -5220,7 +5349,8 @@ EXPORT_SYMBOL(rtw89_core_unregister);
struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
u32 bus_data_size,
- const struct rtw89_chip_info *chip)
+ const struct rtw89_chip_info *chip,
+ const struct rtw89_chip_variant *variant)
{
struct rtw89_fw_info early_fw = {};
const struct firmware *firmware;
@@ -5278,6 +5408,7 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
rtwdev->dev = device;
rtwdev->ops = ops;
rtwdev->chip = chip;
+ rtwdev->variant = variant;
rtwdev->fw.req.firmware = firmware;
rtwdev->fw.fw_format = fw_format;
rtwdev->support_mlo = support_mlo;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 5ad32eacd0d5..ff4894c7fa8a 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -830,6 +830,7 @@ enum rtw89_phy_idx {
};
#define __RTW89_MLD_MAX_LINK_NUM 2
+#define RTW89_MLD_NON_STA_LINK_NUM 1
enum rtw89_chanctx_idx {
RTW89_CHANCTX_0 = 0,
@@ -1083,6 +1084,7 @@ struct rtw89_rx_desc_info {
u16 offset;
u16 rxd_len;
bool ready;
+ u16 rssi;
};
struct rtw89_rxdesc_short {
@@ -1125,6 +1127,11 @@ struct rtw89_rxdesc_long_v2 {
__le32 dword9;
} __packed;
+struct rtw89_rxdesc_phy_rpt_v2 {
+ __le32 dword0;
+ __le32 dword1;
+} __packed;
+
struct rtw89_tx_desc_info {
u16 pkt_size;
u8 wp_offset;
@@ -1163,12 +1170,15 @@ struct rtw89_tx_desc_info {
bool stbc;
bool ldpc;
bool upd_wlan_hdr;
+ bool mlo;
};
struct rtw89_core_tx_request {
enum rtw89_core_tx_type tx_type;
struct sk_buff *skb;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
struct rtw89_vif_link *rtwvif_link;
struct rtw89_sta_link *rtwsta_link;
struct rtw89_tx_desc_info desc_info;
@@ -3358,6 +3368,8 @@ struct rtw89_sec_cam_entry {
u8 spp_mode : 1;
/* 256 bits */
u8 key[32];
+
+ struct ieee80211_key_conf *key_conf;
};
struct rtw89_sta_link {
@@ -3621,6 +3633,9 @@ struct rtw89_chip_ops {
struct ieee80211_rx_status *status);
void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
+ void (*phy_rpt_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status);
void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
@@ -4255,6 +4270,7 @@ struct rtw89_chip_info {
u16 support_bandwidths;
bool support_unii4;
bool support_rnr;
+ bool support_ant_gain;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool hw_sec_hdr;
@@ -4296,6 +4312,7 @@ struct rtw89_chip_info {
const struct rtw89_rfe_parms *dflt_parms;
const struct rtw89_chanctx_listener *chanctx_listener;
+ u8 txpwr_factor_bb;
u8 txpwr_factor_rf;
u8 txpwr_factor_mac;
@@ -4350,12 +4367,18 @@ struct rtw89_chip_info {
const struct rtw89_xtal_info *xtal_info;
};
+struct rtw89_chip_variant {
+ bool no_mcs_12_13: 1;
+ u32 fw_min_ver_code;
+};
+
union rtw89_bus_info {
const struct rtw89_pci_info *pci;
};
struct rtw89_driver_info {
const struct rtw89_chip_info *chip;
+ const struct rtw89_chip_variant *variant;
const struct dmi_system_id *quirks;
union rtw89_bus_info bus;
};
@@ -4448,8 +4471,13 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_SCAN_OFFLOAD_BE_V0,
RTW89_FW_FEATURE_WOW_REASON_V1,
RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V1,
RTW89_FW_FEATURE_RFK_RXDCK_V0,
RTW89_FW_FEATURE_NO_WOW_CPU_IO_RX,
+ RTW89_FW_FEATURE_NOTIFY_AP_INFO,
+ RTW89_FW_FEATURE_CH_INFO_BE_V0,
+ RTW89_FW_FEATURE_LPS_CH_INFO,
+ RTW89_FW_FEATURE_NO_PHYCAP_P1,
};
struct rtw89_fw_suit {
@@ -4597,6 +4625,44 @@ struct rtw89_sar_info {
};
};
+enum rtw89_ant_gain_subband {
+ RTW89_ANT_GAIN_2GHZ_SUBBAND,
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_1, /* U-NII-1 */
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_2, /* U-NII-2 */
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_2E, /* U-NII-2-Extended */
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4, /* U-NII-3 and U-NII-4 */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_6, /* U-NII-6 */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_8, /* U-NII-8 */
+
+ RTW89_ANT_GAIN_SUBBAND_NR,
+};
+
+enum rtw89_ant_gain_domain_type {
+ RTW89_ANT_GAIN_ETSI = 0,
+
+ RTW89_ANT_GAIN_DOMAIN_NUM,
+};
+
+#define RTW89_ANT_GAIN_CHAIN_NUM 2
+struct rtw89_ant_gain_info {
+ s8 offset[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
+ u32 regd_enabled;
+};
+
+struct rtw89_6ghz_span {
+ enum rtw89_sar_subband sar_subband_low;
+ enum rtw89_sar_subband sar_subband_high;
+ enum rtw89_ant_gain_subband ant_gain_subband_low;
+ enum rtw89_ant_gain_subband ant_gain_subband_high;
+};
+
+#define RTW89_SAR_SPAN_VALID(span) ((span)->sar_subband_high)
+#define RTW89_ANT_GAIN_SPAN_VALID(span) ((span)->ant_gain_subband_high)
+
enum rtw89_tas_state {
RTW89_TAS_STATE_DPR_OFF,
RTW89_TAS_STATE_DPR_ON,
@@ -4672,7 +4738,7 @@ enum rtw89_dm_type {
};
#define RTW89_THERMAL_PROT_LV_MAX 5
-#define RTW89_THERMAL_PROT_STEP 19 /* -19% for each level */
+#define RTW89_THERMAL_PROT_STEP 5 /* -5% for each level */
struct rtw89_hal {
u32 rx_fltr;
@@ -4687,6 +4753,8 @@ struct rtw89_hal {
bool ant_diversity_fixed;
bool support_cckpd;
bool support_igi;
+ bool no_mcs_12_13;
+
atomic_t roc_chanctx_idx;
DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES);
@@ -4781,6 +4849,7 @@ struct rtw89_pkt_drop_params {
struct rtw89_pkt_stat {
u16 beacon_nr;
+ u8 beacon_rate;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
};
@@ -5544,6 +5613,7 @@ struct rtw89_dev {
enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
+ const struct rtw89_chip_variant *variant;
const struct rtw89_pci_info *pci_info;
const struct rtw89_rfe_parms *rfe_parms;
struct rtw89_hal hal;
@@ -5556,6 +5626,9 @@ struct rtw89_dev {
struct rtw89_rfe_data *rfe_data;
enum rtw89_custid custid;
+ struct rtw89_sta_link __rcu *assoc_link_on_macid[RTW89_MAX_MAC_ID_NUM];
+ refcount_t refcount_ap_info;
+
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
struct list_head rtwvifs_list;
@@ -5636,6 +5709,7 @@ struct rtw89_dev {
struct rtw89_regulatory_info regulatory;
struct rtw89_sar_info sar;
struct rtw89_tas_info tas;
+ struct rtw89_ant_gain_info ant_gain;
struct rtw89_btc btc;
enum rtw89_ps_mode ps_mode;
@@ -5654,10 +5728,17 @@ struct rtw89_dev {
u8 priv[] __aligned(sizeof(void *));
};
+struct rtw89_link_conf_container {
+ struct ieee80211_bss_conf *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+#define RTW89_VIF_IDLE_LINK_ID 0
+
struct rtw89_vif {
struct rtw89_dev *rtwdev;
struct list_head list;
struct list_head mgnt_entry;
+ struct rtw89_link_conf_container __rcu *snap_link_confs;
u8 mac_addr[ETH_ALEN];
__be32 ip_addr;
@@ -5689,10 +5770,18 @@ static inline bool rtw89_vif_assign_link_is_valid(struct rtw89_vif_link **rtwvif
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) \
if (rtw89_vif_assign_link_is_valid(&(rtwvif_link), rtwvif, link_id))
+enum rtw89_sta_flags {
+ RTW89_REMOTE_STA_IN_PS,
+
+ NUM_OF_RTW89_STA_FLAGS,
+};
+
struct rtw89_sta {
struct rtw89_dev *rtwdev;
struct rtw89_vif *rtwvif;
+ DECLARE_BITMAP(flags, NUM_OF_RTW89_STA_FLAGS);
+
bool disassoc;
struct sk_buff_head roc_queue;
@@ -5700,6 +5789,8 @@ struct rtw89_sta {
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
+ DECLARE_BITMAP(pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
+
u8 links_inst_valid_num;
DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
struct rtw89_sta_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -5770,6 +5861,31 @@ u8 rtw89_sta_link_inst_get_index(struct rtw89_sta_link *rtwsta_link)
return rtwsta_link - rtwsta->links_inst;
}
+static inline void rtw89_assoc_link_set(struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+
+ rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id],
+ rtwsta_link);
+}
+
+static inline void rtw89_assoc_link_clr(struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+
+ rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id],
+ NULL);
+ synchronize_rcu();
+}
+
+static inline struct rtw89_sta_link *
+rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid)
+{
+ return rcu_dereference(rtwdev->assoc_link_on_macid[macid]);
+}
+
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
@@ -6194,9 +6310,19 @@ static inline struct ieee80211_bss_conf *
__rtw89_vif_rcu_dereference_link(struct rtw89_vif_link *rtwvif_link, bool *nolink)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct rtw89_link_conf_container *snap;
struct ieee80211_bss_conf *bss_conf;
+ snap = rcu_dereference(rtwvif->snap_link_confs);
+ if (snap) {
+ bss_conf = snap->link_conf[rtwvif_link->link_id];
+ goto out;
+ }
+
bss_conf = rcu_dereference(vif->link_conf[rtwvif_link->link_id]);
+
+out:
if (unlikely(!bss_conf)) {
*nolink = true;
return &vif->bss_conf;
@@ -6605,6 +6731,16 @@ static inline void rtw89_chip_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu);
}
+static inline void rtw89_chip_phy_rpt_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->phy_rpt_to_rssi)
+ chip->ops->phy_rpt_to_rssi(rtwdev, desc_info, rx_status);
+}
+
static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@@ -6753,6 +6889,26 @@ bool rtw89_sta_has_beamformer_cap(struct ieee80211_link_sta *link_sta)
return false;
}
+static inline
+bool rtw89_sta_link_has_su_mu_4xhe08(struct ieee80211_link_sta *link_sta)
+{
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
+ return true;
+
+ return false;
+}
+
+static inline
+bool rtw89_sta_link_has_er_su_4xhe08(struct ieee80211_link_sta *link_sta)
+{
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
+ return true;
+
+ return false;
+}
+
static inline struct rtw89_fw_suit *rtw89_fw_suit_get(struct rtw89_dev *rtwdev,
enum rtw89_fw_type type)
{
@@ -6893,7 +7049,8 @@ int rtw89_core_register(struct rtw89_dev *rtwdev);
void rtw89_core_unregister(struct rtw89_dev *rtwdev);
struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
u32 bus_data_size,
- const struct rtw89_chip_info *chip);
+ const struct rtw89_chip_info *chip,
+ const struct rtw89_chip_variant *variant);
void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev);
u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev);
void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id);
@@ -6908,6 +7065,8 @@ struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
unsigned int link_id);
void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id);
void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
+const struct rtw89_6ghz_span *
+rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
struct rtw89_chan *chan);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 6abd88fa80ba..09fa977a6e6d 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -9,6 +9,7 @@
#include "fw.h"
#include "mac.h"
#include "pci.h"
+#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "sar.h"
@@ -811,6 +812,9 @@ static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev,
case_REGD(MEXICO);
case_REGD(UKRAINE);
case_REGD(CN);
+ case_REGD(QATAR);
+ case_REGD(UK);
+ case_REGD(THAILAND);
}
}
@@ -882,6 +886,9 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
seq_puts(m, "[TAS]\n");
rtw89_print_tas(m, rtwdev);
+ seq_puts(m, "[DAG]\n");
+ rtw89_print_ant_gain(m, rtwdev, chan);
+
tbl = dbgfs_txpwr_tables[chip_gen];
if (!tbl) {
ret = -EOPNOTSUPP;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 2191c037d72e..5d4ad23cc3bd 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -709,11 +709,13 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
@@ -727,7 +729,12 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -799,6 +806,27 @@ out:
return firmware;
}
+static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_variant *variant = rtwdev->variant;
+ const struct rtw89_fw_suit *fw_suit;
+ u32 suit_ver_code;
+
+ if (!variant)
+ return 0;
+
+ fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
+ suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
+
+ if (variant->fw_min_ver_code > suit_ver_code) {
+ rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n",
+ variant->fw_min_ver_code);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -815,6 +843,10 @@ int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
return ret;
normal_done:
+ ret = rtw89_fw_validate_ver_required(rtwdev);
+ if (ret)
+ return ret;
+
/* It still works if wowlan firmware isn't existing. */
__rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
@@ -2414,6 +2446,7 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
u8 *id)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct rtw89_pktofld_info *info;
struct sk_buff *skb;
int ret;
@@ -2430,10 +2463,10 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
skb = ieee80211_proberesp_get(rtwdev->hw, vif);
break;
case RTW89_PKT_OFLD_TYPE_NULL_DATA:
- skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false);
break;
case RTW89_PKT_OFLD_TYPE_QOS_NULL:
- skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true);
break;
case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
skb = rtw89_eapol_get(rtwdev, rtwvif_link);
@@ -2589,14 +2622,17 @@ fail:
return ret;
}
-int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif_link->chanctx_idx);
const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_chan *chan;
+ struct rtw89_vif_link *rtwvif_link;
struct rtw89_h2c_lps_ch_info *h2c;
u32 len = sizeof(*h2c);
+ unsigned int link_id;
struct sk_buff *skb;
+ bool no_chan = true;
+ u8 phy_idx;
u32 done;
int ret;
@@ -2611,11 +2647,27 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
skb_put(skb, len);
h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
- h2c->info[0].central_ch = chan->channel;
- h2c->info[0].pri_ch = chan->primary_channel;
- h2c->info[0].band = chan->band_type;
- h2c->info[0].bw = chan->band_width;
- h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF);
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ phy_idx = rtwvif_link->phy_idx;
+ if (phy_idx >= ARRAY_SIZE(h2c->info))
+ continue;
+
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ no_chan = false;
+
+ h2c->info[phy_idx].central_ch = chan->channel;
+ h2c->info[phy_idx].pri_ch = chan->primary_channel;
+ h2c->info[phy_idx].band = chan->band_type;
+ h2c->info[phy_idx].bw = chan->band_width;
+ }
+
+ if (no_chan) {
+ rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n");
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
@@ -2640,6 +2692,87 @@ fail:
return ret;
}
+int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_h2c_lps_ml_cmn_info *h2c;
+ struct rtw89_vif_link *rtwvif_link;
+ const struct rtw89_chan *chan;
+ u8 bw_idx = RTW89_BB_BW_20_40;
+ u32 len = sizeof(*h2c);
+ unsigned int link_id;
+ struct sk_buff *skb;
+ u8 gain_band;
+ u32 done;
+ u8 path;
+ int ret;
+ int i;
+
+ if (chip->chip_gen != RTW89_CHIP_BE)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
+
+ h2c->fmt_id = 0x1;
+
+ h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+
+ h2c->central_ch[rtwvif_link->phy_idx] = chan->channel;
+ h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel;
+ h2c->band[rtwvif_link->phy_idx] = chan->band_type;
+ h2c->bw[rtwvif_link->phy_idx] = chan->band_width;
+ if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6)
+ h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1;
+ else
+ h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2;
+
+ /* Fill BW20 RX gain table for beacon mode */
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ h2c->tia_gain[rtwvif_link->phy_idx][i] =
+ cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
+ }
+ memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
+ gain->lna_gain[gain_band][bw_idx][path],
+ LNA_GAIN_NUM);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
+ H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
+
+ rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
+ true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
+ if (ret)
+ rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
@@ -4954,13 +5087,14 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_h2c_chinfo_elem_be *elem;
struct rtw89_mac_chinfo_be *ch_info;
- struct rtw89_h2c_chinfo *h2c;
+ struct rtw89_h2c_chinfo_be *h2c;
struct sk_buff *skb;
unsigned int cond;
+ u8 ver = U8_MAX;
int skb_len;
int ret;
- static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE);
skb_len = struct_size(h2c, elem, ch_num);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
@@ -4969,8 +5103,11 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
return -ENOMEM;
}
+ if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
+ ver = 0;
+
skb_put(skb, sizeof(*h2c));
- h2c = (struct rtw89_h2c_chinfo *)skb->data;
+ h2c = (struct rtw89_h2c_chinfo_be *)skb->data;
h2c->ch_num = ch_num;
h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
@@ -4980,8 +5117,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
list_for_each_entry(ch_info, chan_list, list) {
elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
- elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) |
- le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
+ elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
le32_encode_bits(ch_info->central_ch,
RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
@@ -5028,6 +5164,12 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
le32_encode_bits(ch_info->fw_probe0_bssids,
RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
+ if (ver == 0)
+ elem->w0 |=
+ le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD);
+ else
+ elem->w7 = le32_encode_bits(ch_info->period,
+ RTW89_H2C_CHINFO_BE_W7_PERIOD_V1);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -5171,6 +5313,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
u8 probe_id[NUM_NL80211_BANDS];
u8 cfg_len = sizeof(*h2c);
unsigned int cond;
+ u8 ver = U8_MAX;
void *ptr;
int ret;
u32 len;
@@ -5191,6 +5334,9 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
+ if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
+ ver = 0;
+
if (!wowlan) {
list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
if (pkt_info->wildcard_6ghz) {
@@ -5286,9 +5432,7 @@ flex_member:
le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
- opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME,
- RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) |
- le32_encode_bits(op->band_type,
+ opch->w1 = le32_encode_bits(op->band_type,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
le32_encode_bits(op->band_width,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
@@ -5314,6 +5458,13 @@ flex_member:
RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
+
+ if (ver == 0)
+ opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION);
+ else
+ opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1);
ptr += sizeof(*opch);
}
@@ -5416,7 +5567,9 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info_common *common;
struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
+ struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
struct rtw89_fw_h2c_rfk_pre_info *h2c;
u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
u32 len = sizeof(*h2c);
@@ -5426,7 +5579,10 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
u32 val32;
int ret;
- if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
+ if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
+ len = sizeof(*h2c_v1);
+ ver = 1;
+ } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
len = sizeof(*h2c_v0);
ver = 0;
}
@@ -5438,17 +5594,18 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
}
skb_put(skb, len);
h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
+ common = &h2c->base_v1.common;
- h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH);
for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
- h2c->common.dbcc.ch[path][tbl] =
+ common->dbcc.ch[path][tbl] =
cpu_to_le32(rfk_mcc->data[path].ch[tbl]);
- h2c->common.dbcc.band[path][tbl] =
+ common->dbcc.band[path][tbl] =
cpu_to_le32(rfk_mcc->data[path].band[tbl]);
}
}
@@ -5456,13 +5613,19 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
tbl_sel[path] = rfk_mcc->data[path].table_idx;
- h2c->common.tbl.cur_ch[path] =
+ common->tbl.cur_ch[path] =
cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]);
- h2c->common.tbl.cur_band[path] =
+ common->tbl.cur_band[path] =
cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]);
+
+ if (ver <= 1)
+ continue;
+
+ h2c->cur_bandwidth[path] =
+ cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
}
- h2c->common.phy_idx = cpu_to_le32(phy_idx);
+ common->phy_idx = cpu_to_le32(phy_idx);
if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
@@ -5488,8 +5651,10 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
goto done;
}
- if (rtw89_is_mlo_1_1(rtwdev))
- h2c->mlo_1_1 = cpu_to_le32(1);
+ if (rtw89_is_mlo_1_1(rtwdev)) {
+ h2c_v1 = &h2c->base_v1;
+ h2c_v1->mlo_1_1 = cpu_to_le32(1);
+ }
done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
@@ -6496,7 +6661,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = 0, list_len = 0;
- idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
idx++, list_len++) {
channel = nd_config->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6547,7 +6712,7 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
idx++, list_len++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6624,7 +6789,7 @@ int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = 0, list_len = 0;
- idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
idx++, list_len++) {
channel = nd_config->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6679,7 +6844,7 @@ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
idx++, list_len++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6780,22 +6945,25 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
}
-void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- bool aborted)
+struct rtw89_hw_scan_complete_cb_data {
+ struct rtw89_vif_link *rtwvif_link;
+ bool aborted;
+};
+
+static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_hw_scan_complete_cb_data *cb_data = data;
+ struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
struct cfg80211_scan_info info = {
- .aborted = aborted,
+ .aborted = cb_data->aborted,
};
struct rtw89_vif *rtwvif;
u32 reg;
if (!rtwvif_link)
- return;
-
- rtw89_chanctx_proceed(rtwdev);
+ return -EINVAL;
rtwvif = rtwvif_link->rtwvif;
@@ -6814,6 +6982,29 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
scan_info->last_chan_idx = 0;
scan_info->scanning_vif = NULL;
scan_info->abort = false;
+
+ return 0;
+}
+
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ bool aborted)
+{
+ struct rtw89_hw_scan_complete_cb_data cb_data = {
+ .rtwvif_link = rtwvif_link,
+ .aborted = aborted,
+ };
+ const struct rtw89_chanctx_cb_parm cb_parm = {
+ .cb = rtw89_hw_scan_complete_cb,
+ .data = &cb_data,
+ .caller = __func__,
+ };
+
+ /* The things here needs to be done after setting channel (for coex)
+ * and before proceeding entity mode (for MCC). So, pass a callback
+ * of them for the right sequence rather than doing them directly.
+ */
+ rtw89_chanctx_proceed(rtwdev, &cb_parm);
}
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
@@ -8164,6 +8355,71 @@ int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
return 0;
}
+static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en)
+{
+ struct rtw89_h2c_ap_info *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for ap info\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ap_info *)skb->data;
+
+ h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_AP,
+ H2C_FUNC_AP_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
+{
+ int ret;
+
+ if (en) {
+ if (refcount_inc_not_zero(&rtwdev->refcount_ap_info))
+ return 0;
+ } else {
+ if (!refcount_dec_and_test(&rtwdev->refcount_ap_info))
+ return 0;
+ }
+
+ ret = rtw89_fw_h2c_ap_info(rtwdev, en);
+ if (ret) {
+ if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
+ return ret;
+
+ /* During recovery, neither driver nor stack has full error
+ * handling, so show a warning, but return 0 with refcount
+ * increased normally. It can avoid underflow when calling
+ * with @en == false later.
+ */
+ rtw89_warn(rtwdev, "h2c ap_info failed during SER\n");
+ }
+
+ if (en)
+ refcount_set(&rtwdev->refcount_ap_info, 1);
+
+ return 0;
+}
+
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index efa63d444821..2026bc2fd2ac 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -47,6 +47,19 @@ struct rtw89_c2hreg_phycap {
#define RTW89_C2HREG_PHYCAP_W2_HW_TYPE GENMASK(7, 0)
#define RTW89_C2HREG_PHYCAP_W3_ANT_TX_NUM GENMASK(15, 8)
#define RTW89_C2HREG_PHYCAP_W3_ANT_RX_NUM GENMASK(23, 16)
+#define RTW89_C2HREG_PHYCAP_W3_BAND_SEL GENMASK(31, 24)
+
+#define RTW89_C2HREG_PHYCAP_P1_W0_B1_RX_NSS GENMASK(23, 16)
+#define RTW89_C2HREG_PHYCAP_P1_W0_B1_BW GENMASK(31, 24)
+#define RTW89_C2HREG_PHYCAP_P1_W1_B1_TX_NSS GENMASK(7, 0)
+#define RTW89_C2HREG_PHYCAP_P1_W1_B1_ANT_TX_NUM GENMASK(15, 8)
+#define RTW89_C2HREG_PHYCAP_P1_W1_B1_ANT_RX_NUM GENMASK(23, 16)
+#define RTW89_C2HREG_PHYCAP_P1_W1_B1_BAND_SEL GENMASK(31, 24)
+#define RTW89_C2HREG_PHYCAP_P1_W2_QAM GENMASK(7, 0)
+#define RTW89_C2HREG_PHYCAP_P1_W2_QAM_256 0x1
+#define RTW89_C2HREG_PHYCAP_P1_W2_QAM_1024 0x2
+#define RTW89_C2HREG_PHYCAP_P1_W2_QAM_4096 0x3
+#define RTW89_C2HREG_PHYCAP_P1_W2_B1_QAM GENMASK(15, 8)
#define RTW89_C2HREG_AOAC_RPT_1_W0_KEY_IDX GENMASK(23, 16)
#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_0 GENMASK(7, 0)
@@ -92,6 +105,8 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN GENMASK(23, 16)
+#define RTW89_H2CREG_GET_FEATURE_PART_NUM GENMASK(23, 16)
+
#define RTW89_H2CREG_MAX 4
#define RTW89_C2HREG_MAX 4
#define RTW89_C2HREG_HDR_LEN 2
@@ -138,6 +153,7 @@ enum rtw89_mac_c2h_type {
RTW89_FWCMD_C2HREG_FUNC_PHY_CAP,
RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT,
RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK = 0xA,
+ RTW89_FWCMD_C2HREG_FUNC_PHY_CAP_PART1 = 0xC,
RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF,
};
@@ -310,9 +326,12 @@ struct rtw89_fw_macid_pause_sleep_grp {
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
#define RTW89_CHAN_INVALID 0xFF
#define RTW89_MAC_CHINFO_SIZE 28
+#define RTW89_MAC_CHINFO_SIZE_BE 32
#define RTW89_SCAN_LIST_GUARD 4
-#define RTW89_SCAN_LIST_LIMIT \
- ((RTW89_H2C_MAX_SIZE / RTW89_MAC_CHINFO_SIZE) - RTW89_SCAN_LIST_GUARD)
+#define RTW89_SCAN_LIST_LIMIT(size) \
+ ((RTW89_H2C_MAX_SIZE / (size)) - RTW89_SCAN_LIST_GUARD)
+#define RTW89_SCAN_LIST_LIMIT_AX RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE)
+#define RTW89_SCAN_LIST_LIMIT_BE RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE_BE)
#define RTW89_BCN_LOSS_CNT 10
@@ -1780,6 +1799,21 @@ struct rtw89_h2c_lps_ch_info {
__le32 mlo_dbcc_mode_lps;
} __packed;
+struct rtw89_h2c_lps_ml_cmn_info {
+ u8 fmt_id;
+ u8 rsvd0[3];
+ __le32 mlo_dbcc_mode;
+ u8 central_ch[RTW89_PHY_MAX];
+ u8 pri_ch[RTW89_PHY_MAX];
+ u8 bw[RTW89_PHY_MAX];
+ u8 band[RTW89_PHY_MAX];
+ u8 bcn_rate_type[RTW89_PHY_MAX];
+ u8 rsvd1[2];
+ __le16 tia_gain[RTW89_PHY_MAX][TIA_GAIN_NUM];
+ u8 lna_gain[RTW89_PHY_MAX][LNA_GAIN_NUM];
+ u8 rsvd2[2];
+} __packed;
+
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
@@ -2647,6 +2681,7 @@ struct rtw89_h2c_chinfo_elem_be {
__le32 w4;
__le32 w5;
__le32 w6;
+ __le32 w7;
} __packed;
#define RTW89_H2C_CHINFO_BE_W0_PERIOD GENMASK(7, 0)
@@ -2678,6 +2713,7 @@ struct rtw89_h2c_chinfo_elem_be {
#define RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS GENMASK(31, 16)
#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS GENMASK(15, 0)
#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W7_PERIOD_V1 GENMASK(15, 0)
struct rtw89_h2c_chinfo {
u8 ch_num;
@@ -2687,6 +2723,14 @@ struct rtw89_h2c_chinfo {
struct rtw89_h2c_chinfo_elem elem[] __counted_by(ch_num);
} __packed;
+struct rtw89_h2c_chinfo_be {
+ u8 ch_num;
+ u8 elem_size;
+ u8 arg;
+ u8 rsvd0;
+ struct rtw89_h2c_chinfo_elem_be elem[] __counted_by(ch_num);
+} __packed;
+
#define RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK BIT(0)
#define RTW89_H2C_CHINFO_ARG_APPEND_MASK BIT(1)
@@ -2733,6 +2777,7 @@ struct rtw89_h2c_scanofld_be_opch {
__le32 w1;
__le32 w2;
__le32 w3;
+ __le32 w4;
} __packed;
#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID GENMASK(15, 0)
@@ -2754,6 +2799,7 @@ struct rtw89_h2c_scanofld_be_opch {
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1 GENMASK(15, 0)
struct rtw89_h2c_scanofld_be {
__le32 w0;
@@ -3466,6 +3512,12 @@ struct rtw89_h2c_wow_aoac {
__le32 w0;
} __packed;
+struct rtw89_h2c_ap_info {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_AP_INFO_W0_PWR_INT_EN BIT(0)
+
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@@ -3590,6 +3642,7 @@ struct rtw89_c2h_scanofld {
__le32 w5;
__le32 w6;
__le32 w7;
+ __le32 w8;
} __packed;
#define RTW89_C2H_SCANOFLD_W2_PRI_CH GENMASK(7, 0)
@@ -3604,6 +3657,8 @@ struct rtw89_c2h_scanofld {
#define RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD GENMASK(15, 8)
#define RTW89_C2H_SCANOFLD_W6_FW_DEF GENMASK(23, 16)
#define RTW89_C2H_SCANOFLD_W7_REPORT_TSF GENMASK(31, 0)
+#define RTW89_C2H_SCANOFLD_W8_PERIOD_V1 GENMASK(15, 0)
+#define RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1 GENMASK(31, 16)
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
@@ -3725,6 +3780,14 @@ struct rtw89_c2h_wow_aoac_report {
#define RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX BIT(0)
+struct rtw89_c2h_pwr_int_notify {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+} __packed;
+
+#define RTW89_C2H_PWR_INT_NOTIFY_W2_MACID GENMASK(15, 0)
+#define RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS BIT(16)
+
struct rtw89_h2c_tx_duty {
__le32 w0;
__le32 w1;
@@ -4168,6 +4231,10 @@ enum rtw89_mrc_h2c_func {
#define RTW89_MRC_WAIT_COND_REQ_TSF \
RTW89_MRC_WAIT_COND(0 /* don't care */, H2C_FUNC_MRC_REQ_TSF)
+/* CLASS 36 - AP */
+#define H2C_CL_AP 0x24
+#define H2C_FUNC_AP_INFO 0x0
+
#define H2C_CAT_OUTSRC 0x2
#define H2C_CL_OUTSRC_RA 0x1
@@ -4175,6 +4242,7 @@ enum rtw89_mrc_h2c_func {
#define H2C_CL_OUTSRC_DM 0x2
#define H2C_FUNC_FW_LPS_CH_INFO 0xb
+#define H2C_FUNC_FW_LPS_ML_CMN_INFO 0xe
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
@@ -4241,11 +4309,16 @@ struct rtw89_fw_h2c_rfk_pre_info_v0 {
} __packed mlo;
} __packed;
-struct rtw89_fw_h2c_rfk_pre_info {
+struct rtw89_fw_h2c_rfk_pre_info_v1 {
struct rtw89_fw_h2c_rfk_pre_info_common common;
__le32 mlo_1_1;
} __packed;
+struct rtw89_fw_h2c_rfk_pre_info {
+ struct rtw89_fw_h2c_rfk_pre_info_v1 base_v1;
+ __le32 cur_bandwidth[NUM_OF_RTW89_FW_RFK_PATH];
+} __packed;
+
struct rtw89_h2c_rf_tssi {
__le16 len;
u8 phy;
@@ -4602,8 +4675,9 @@ int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
-int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link);
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
bool enable);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
@@ -4697,6 +4771,7 @@ int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_sync_arg *arg);
int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_upd_duration_arg *arg);
+int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 7907b84d204b..a37c6d525d6f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -2898,22 +2898,42 @@ static int cmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
}
static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev,
- struct rtw89_mac_c2h_info *c2h_info)
+ struct rtw89_mac_c2h_info *c2h_info, u8 part_num)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- struct rtw89_mac_h2c_info h2c_info = {0};
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_mac_h2c_info h2c_info = {};
+ enum rtw89_mac_c2h_type c2h_type;
+ u8 content_len;
u32 ret;
+ if (chip->chip_gen == RTW89_CHIP_AX)
+ content_len = 0;
+ else
+ content_len = 2;
+
+ switch (part_num) {
+ case 0:
+ c2h_type = RTW89_FWCMD_C2HREG_FUNC_PHY_CAP;
+ break;
+ case 1:
+ c2h_type = RTW89_FWCMD_C2HREG_FUNC_PHY_CAP_PART1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
mac->cnv_efuse_state(rtwdev, false);
h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE;
- h2c_info.content_len = 0;
+ h2c_info.content_len = content_len;
+ h2c_info.u.hdr.w0 = u32_encode_bits(part_num, RTW89_H2CREG_GET_FEATURE_PART_NUM);
ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info);
if (ret)
goto out;
- if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP)
+ if (c2h_info->id != c2h_type)
ret = -EINVAL;
out:
@@ -2922,20 +2942,20 @@ out:
return ret;
}
-int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
+static int rtw89_mac_setup_phycap_part0(struct rtw89_dev *rtwdev)
{
- struct rtw89_efuse *efuse = &rtwdev->efuse;
- struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_mac_c2h_info c2h_info = {0};
const struct rtw89_c2hreg_phycap *phycap;
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw89_mac_c2h_info c2h_info = {};
+ struct rtw89_hal *hal = &rtwdev->hal;
u8 tx_nss;
u8 rx_nss;
u8 tx_ant;
u8 rx_ant;
- u32 ret;
+ int ret;
- ret = rtw89_mac_read_phycap(rtwdev, &c2h_info);
+ ret = rtw89_mac_read_phycap(rtwdev, &c2h_info, 0);
if (ret)
return ret;
@@ -2979,6 +2999,60 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_mac_setup_phycap_part1(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_variant *variant = rtwdev->variant;
+ const struct rtw89_c2hreg_phycap *phycap;
+ struct rtw89_mac_c2h_info c2h_info = {};
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 qam_raw, qam;
+ int ret;
+
+ ret = rtw89_mac_read_phycap(rtwdev, &c2h_info, 1);
+ if (ret)
+ return ret;
+
+ phycap = &c2h_info.u.phycap;
+
+ qam_raw = u32_get_bits(phycap->w2, RTW89_C2HREG_PHYCAP_P1_W2_QAM);
+
+ switch (qam_raw) {
+ case RTW89_C2HREG_PHYCAP_P1_W2_QAM_256:
+ case RTW89_C2HREG_PHYCAP_P1_W2_QAM_1024:
+ case RTW89_C2HREG_PHYCAP_P1_W2_QAM_4096:
+ qam = qam_raw;
+ break;
+ default:
+ qam = RTW89_C2HREG_PHYCAP_P1_W2_QAM_4096;
+ break;
+ }
+
+ if ((variant && variant->no_mcs_12_13) ||
+ qam <= RTW89_C2HREG_PHYCAP_P1_W2_QAM_1024)
+ hal->no_mcs_12_13 = true;
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "phycap qam=%d/%d no_mcs_12_13=%d\n",
+ qam_raw, qam, hal->no_mcs_12_13);
+
+ return 0;
+}
+
+int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ int ret;
+
+ ret = rtw89_mac_setup_phycap_part0(rtwdev);
+ if (ret)
+ return ret;
+
+ if (chip->chip_gen == RTW89_CHIP_AX ||
+ RTW89_CHK_FW_FEATURE(NO_PHYCAP_P1, &rtwdev->fw))
+ return 0;
+
+ return rtw89_mac_setup_phycap_part1(rtwdev);
+}
+
static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band,
u16 tx_en_u16, u16 mask_u16)
{
@@ -4788,9 +4862,11 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif;
struct rtw89_chan new;
- u8 reason, status, tx_fail, band, actual_period, expect_period;
u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
+ u16 actual_period, expect_period;
+ u8 reason, status, tx_fail, band;
u8 mac_idx, sw_def, fw_def;
+ u8 ver = U8_MAX;
u16 chan;
int ret;
@@ -4799,6 +4875,9 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
rtwvif = rtwvif_link->rtwvif;
+ if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
+ ver = 0;
+
tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL);
status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH);
@@ -4811,21 +4890,28 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
- rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
- mac_idx, band, chan, reason, status, tx_fail, actual_period);
-
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF);
- expect_period = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF);
report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF);
+ if (ver == 0) {
+ expect_period =
+ le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
+ } else {
+ actual_period = le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_PERIOD_V1);
+ expect_period =
+ le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1);
+ }
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
"sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n",
sw_def, fw_def, report_tsf, expect_period);
}
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ mac_idx, band, chan, reason, status, tx_fail, actual_period);
+
switch (reason) {
case RTW89_SCAN_LEAVE_OP_NOTIFY:
case RTW89_SCAN_LEAVE_CH_NOTIFY:
@@ -5364,6 +5450,39 @@ rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rtw89_complete_cond(wait, cond, &data);
}
+static void
+rtw89_mac_c2h_pwr_int_notify(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len)
+{
+ const struct rtw89_c2h_pwr_int_notify *c2h;
+ struct rtw89_sta_link *rtwsta_link;
+ struct ieee80211_sta *sta;
+ struct rtw89_sta *rtwsta;
+ u16 macid;
+ bool ps;
+
+ c2h = (const struct rtw89_c2h_pwr_int_notify *)skb->data;
+ macid = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_MACID);
+ ps = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS);
+
+ rcu_read_lock();
+
+ rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid);
+ if (unlikely(!rtwsta_link))
+ goto out;
+
+ rtwsta = rtwsta_link->rtwsta;
+ if (ps)
+ set_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags);
+ else
+ clear_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags);
+
+ sta = rtwsta_to_sta(rtwsta);
+ ieee80211_sta_ps_transition(sta, ps);
+
+out:
+ rcu_read_unlock();
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -5409,6 +5528,12 @@ void (* const rtw89_mac_c2h_wow_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_AOAC_REPORT] = rtw89_mac_c2h_wow_aoac_rpt,
};
+static
+void (* const rtw89_mac_c2h_ap_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY] = rtw89_mac_c2h_pwr_int_notify,
+};
+
static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
@@ -5463,6 +5588,13 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
return true;
case RTW89_MAC_C2H_CLASS_WOW:
return true;
+ case RTW89_MAC_C2H_CLASS_AP:
+ switch (func) {
+ default:
+ return false;
+ case RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY:
+ return true;
+ }
}
}
@@ -5493,14 +5625,18 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_WOW)
handler = rtw89_mac_c2h_wow_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_AP:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_AP)
+ handler = rtw89_mac_c2h_ap_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
- rtw89_info(rtwdev, "c2h class %d not support\n", class);
+ rtw89_info(rtwdev, "MAC c2h class %d not support\n", class);
return;
}
if (!handler) {
- rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
+ rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class,
func);
return;
}
@@ -6674,6 +6810,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax,
+ .cfg_phy_rpt = NULL,
.dle_mix_cfg = dle_mix_cfg_ax,
.chk_dle_rdy = chk_dle_rdy_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 18579c020548..8edea96d037f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -169,6 +169,20 @@ enum rtw89_mac_ax_l0_to_l1_event {
MAC_AX_L0_TO_L1_EVENT_MAX = 15,
};
+enum rtw89_mac_phy_rpt_size {
+ MAC_AX_PHY_RPT_SIZE_0 = 0,
+ MAC_AX_PHY_RPT_SIZE_8 = 1,
+ MAC_AX_PHY_RPT_SIZE_16 = 2,
+ MAC_AX_PHY_RPT_SIZE_24 = 3,
+};
+
+enum rtw89_mac_hdr_cnv_size {
+ MAC_AX_HDR_CNV_SIZE_0 = 0,
+ MAC_AX_HDR_CNV_SIZE_32 = 1,
+ MAC_AX_HDR_CNV_SIZE_64 = 2,
+ MAC_AX_HDR_CNV_SIZE_96 = 3,
+};
+
enum rtw89_mac_wow_fw_status {
WOWLAN_NOT_READY = 0x00,
WOWLAN_SLEEP_READY = 0x01,
@@ -426,6 +440,12 @@ enum rtw89_mac_c2h_wow_func {
NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
};
+enum rtw89_mac_c2h_ap_func {
+ RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY = 0,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_AP,
+};
+
enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_INFO = 0x0,
RTW89_MAC_C2H_CLASS_OFLD = 0x1,
@@ -434,6 +454,7 @@ enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_MCC = 0x4,
RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
RTW89_MAC_C2H_CLASS_MRC = 0xe,
+ RTW89_MAC_C2H_CLASS_AP = 0x18,
RTW89_MAC_C2H_CLASS_MAX,
};
@@ -961,6 +982,7 @@ struct rtw89_mac_gen_def {
enum rtw89_mac_fwd_target fwd_target,
u8 mac_idx);
int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
+ void (*cfg_phy_rpt)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
@@ -1216,6 +1238,27 @@ int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
+
+static inline
+void rtw89_mac_cfg_phy_rpt(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ if (mac->cfg_phy_rpt)
+ mac->cfg_phy_rpt(rtwdev, mac_idx, enable);
+}
+
+static inline
+void rtw89_mac_cfg_phy_rpt_bands(struct rtw89_dev *rtwdev, bool enable)
+{
+ rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_0, enable);
+
+ if (!rtwdev->dbcc_en)
+ return;
+
+ rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_1, enable);
+}
static inline
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 619d2d3771d5..b3669e0074df 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -189,10 +189,10 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtw89_core_txq_init(rtwdev, vif->txq);
- if (!rtw89_rtwvif_in_list(rtwdev, rtwvif))
+ if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) {
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
-
- INIT_LIST_HEAD(&rtwvif->mgnt_entry);
+ INIT_LIST_HEAD(&rtwvif->mgnt_entry);
+ }
ether_addr_copy(rtwvif->mac_addr, vif->addr);
@@ -202,7 +202,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
- rtwvif_link = rtw89_vif_set_link(rtwvif, 0);
+ rtwvif_link = rtw89_vif_set_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
if (!rtwvif_link) {
ret = -EINVAL;
goto release_port;
@@ -218,7 +218,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
return 0;
unset_link:
- rtw89_vif_unset_link(rtwvif, 0);
+ rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
release_port:
list_del_init(&rtwvif->list);
rtw89_core_release_bit_map(rtwdev->hw_port, port);
@@ -246,17 +246,17 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
- rtwvif_link = rtwvif->links[0];
+ rtwvif_link = rtwvif->links[RTW89_VIF_IDLE_LINK_ID];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
- __func__, 0);
+ __func__, RTW89_VIF_IDLE_LINK_ID);
goto bottom;
}
__rtw89_ops_remove_iface_link(rtwdev, rtwvif_link);
- rtw89_vif_unset_link(rtwvif, 0);
+ rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
bottom:
list_del_init(&rtwvif->list);
@@ -509,6 +509,7 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev,
rtw89_core_txq_init(rtwdev, sta->txq[i]);
skb_queue_head_init(&rtwsta->roc_queue);
+ bitmap_zero(rtwsta->pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
rtwsta_link = rtw89_sta_set_link(rtwsta, sta->deflink.link_id);
if (!rtwsta_link) {
@@ -775,6 +776,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
const struct rtw89_chan *chan;
+ int ret = 0;
mutex_lock(&rtwdev->mutex);
@@ -783,6 +785,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
+ ret = -ENOLINK;
goto out;
}
@@ -804,12 +807,18 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
rtw89_chip_rfk_channel(rtwdev, rtwvif_link);
+ if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) {
+ ret = rtw89_fw_h2c_ap_info_refcount(rtwdev, true);
+ if (ret)
+ goto out;
+ }
+
rtw89_queue_chanctx_work(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
- return 0;
+ return ret;
}
static
@@ -830,6 +839,9 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
+ if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
+ rtw89_fw_h2c_ap_info_refcount(rtwdev, false);
+
rtw89_mac_stop_ap(rtwdev, rtwvif_link);
rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true);
@@ -1273,11 +1285,11 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
return;
- if (!rtwdev->scanning)
- return;
-
mutex_lock(&rtwdev->mutex);
+ if (!rtwdev->scanning)
+ goto out;
+
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n");
@@ -1295,10 +1307,15 @@ static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
- struct ieee80211_sta *sta = link_sta->sta;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta(link_sta->sta);
struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta_link *rtwsta_link;
+
+ rtwsta_link = rtwsta->links[link_sta->link_id];
+ if (unlikely(!rtwsta_link))
+ return;
- rtw89_phy_ra_update_sta(rtwdev, sta, changed);
+ rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
}
static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
@@ -1473,6 +1490,259 @@ static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
return 0;
}
+static bool rtw89_can_work_on_links(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif, u16 links)
+{
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ u8 w = hweight16(links);
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ w > RTW89_MLD_NON_STA_LINK_NUM)
+ return false;
+
+ return w <= rtwvif->links_inst_valid_num;
+}
+
+static bool rtw89_ops_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ guard(mutex)(&rtwdev->mutex);
+
+ return rtw89_can_work_on_links(rtwdev, vif, active_links);
+}
+
+static void __rtw89_ops_clr_vif_links(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ unsigned long clr_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwvif_link = rtwvif->links[link_id];
+ if (unlikely(!rtwvif_link))
+ continue;
+
+ __rtw89_ops_remove_iface_link(rtwdev, rtwvif_link);
+
+ rtw89_vif_unset_link(rtwvif, link_id);
+ }
+}
+
+static int __rtw89_ops_set_vif_links(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ unsigned long set_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ unsigned int link_id;
+ int ret;
+
+ for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwvif_link = rtw89_vif_set_link(rtwvif, link_id);
+ if (!rtwvif_link)
+ return -EINVAL;
+
+ ret = __rtw89_ops_add_iface_link(rtwdev, rtwvif_link);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to add iface (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static
+int rtw89_ops_change_vif_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ unsigned long clr_links = old_links & ~new_links;
+ unsigned long set_links = new_links & ~old_links;
+ bool removing_links = !old_links || clr_links;
+ struct rtw89_link_conf_container *snap;
+ int ret = 0;
+ int i;
+
+ guard(mutex)(&rtwdev->mutex);
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE,
+ "%s: old_links (0x%08x) -> new_links (0x%08x)\n",
+ __func__, old_links, new_links);
+
+ if (!rtw89_can_work_on_links(rtwdev, vif, new_links))
+ return -EOPNOTSUPP;
+
+ if (removing_links) {
+ snap = kzalloc(sizeof(*snap), GFP_KERNEL);
+ if (!snap)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(snap->link_conf); i++)
+ snap->link_conf[i] = old[i];
+
+ rcu_assign_pointer(rtwvif->snap_link_confs, snap);
+ }
+
+ /* might depend on @snap; don't change order */
+ rtw89_leave_ips_by_hwflags(rtwdev);
+
+ if (rtwdev->scanning)
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
+
+ if (!old_links)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif,
+ BIT(RTW89_VIF_IDLE_LINK_ID));
+ else if (clr_links)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif, clr_links);
+
+ if (removing_links) {
+ /* @snap is required if and only if during removing links.
+ * However, it's done here. So, cleanup @snap immediately.
+ */
+ rcu_assign_pointer(rtwvif->snap_link_confs, NULL);
+
+ /* The pointers in @old will free after this function return,
+ * so synchronously wait for all readers of snap to be done.
+ */
+ synchronize_rcu();
+ kfree(snap);
+ }
+
+ if (set_links) {
+ ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif, set_links);
+ if (ret)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif, set_links);
+ } else if (!new_links) {
+ ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif,
+ BIT(RTW89_VIF_IDLE_LINK_ID));
+ if (ret)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif,
+ BIT(RTW89_VIF_IDLE_LINK_ID));
+ }
+
+ rtw89_enter_ips_by_hwflags(rtwdev);
+ return ret;
+}
+
+static void __rtw89_ops_clr_sta_links(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ unsigned long clr_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_sta_link *rtwsta_link;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwsta_link = rtwsta->links[link_id];
+ if (unlikely(!rtwsta_link))
+ continue;
+
+ rtwvif_link = rtwsta_link->rtwvif_link;
+
+ rtw89_core_sta_link_disassoc(rtwdev, rtwvif_link, rtwsta_link);
+ rtw89_core_sta_link_disconnect(rtwdev, rtwvif_link, rtwsta_link);
+ rtw89_core_sta_link_remove(rtwdev, rtwvif_link, rtwsta_link);
+
+ rtw89_sta_unset_link(rtwsta, link_id);
+ }
+}
+
+static int __rtw89_ops_set_sta_links(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ unsigned long set_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_sta_link *rtwsta_link;
+ unsigned int link_id;
+ u8 sec_cam_idx;
+ int ret;
+
+ for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwsta_link = rtw89_sta_set_link(rtwsta, link_id);
+ if (!rtwsta_link)
+ return -EINVAL;
+
+ rtwvif_link = rtwsta_link->rtwvif_link;
+
+ ret = rtw89_core_sta_link_add(rtwdev, rtwvif_link, rtwsta_link);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to add sta (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+
+ rtw89_vif_type_mapping(rtwvif_link, true);
+
+ ret = rtw89_core_sta_link_assoc(rtwdev, rtwvif_link, rtwsta_link);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to assoc sta (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+
+ __rtw89_ops_bss_link_assoc(rtwdev, rtwvif_link);
+
+ for_each_set_bit(sec_cam_idx, rtwsta->pairwise_sec_cam_map,
+ RTW89_MAX_SEC_CAM_NUM) {
+ ret = rtw89_cam_attach_link_sec_cam(rtwdev,
+ rtwvif_link,
+ rtwsta_link,
+ sec_cam_idx);
+ if (ret) {
+ rtw89_err(rtwdev,
+ "%s: failed to apply pairwise key (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static
+int rtw89_ops_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ unsigned long clr_links = old_links & ~new_links;
+ unsigned long set_links = new_links & ~old_links;
+ int ret = 0;
+
+ guard(mutex)(&rtwdev->mutex);
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE,
+ "%s: old_links (0x%08x) -> new_links (0x%08x)\n",
+ __func__, old_links, new_links);
+
+ if (!rtw89_can_work_on_links(rtwdev, vif, new_links))
+ return -EOPNOTSUPP;
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ if (clr_links)
+ __rtw89_ops_clr_sta_links(rtwdev, rtwsta, clr_links);
+
+ if (set_links) {
+ ret = __rtw89_ops_set_sta_links(rtwdev, rtwsta, set_links);
+ if (ret)
+ __rtw89_ops_clr_sta_links(rtwdev, rtwsta, set_links);
+ }
+
+ return ret;
+}
+
#ifdef CONFIG_PM
static int rtw89_ops_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
@@ -1600,6 +1870,9 @@ const struct ieee80211_ops rtw89_ops = {
.set_sar_specs = rtw89_ops_set_sar_specs,
.link_sta_rc_update = rtw89_ops_sta_rc_update,
.set_tid_config = rtw89_ops_set_tid_config,
+ .can_activate_links = rtw89_ops_can_activate_links,
+ .change_vif_links = rtw89_ops_change_vif_links,
+ .change_sta_links = rtw89_ops_change_sta_links,
#ifdef CONFIG_PM
.suspend = rtw89_ops_suspend,
.resume = rtw89_ops_resume,
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index f7a396c8a3cd..2dbdeae904ad 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -1988,6 +1988,20 @@ int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
}
EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v2);
+void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ u32 reg, val;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RCR, mac_idx);
+ val = enable ? MAC_AX_PHY_RPT_SIZE_8 : MAC_AX_PHY_RPT_SIZE_0;
+ rtw89_write32_mask(rtwdev, reg, B_BE_PHY_RPT_SZ_MASK, val);
+ rtw89_write32_mask(rtwdev, reg, B_BE_HDR_CNV_SZ_MASK, MAC_AX_HDR_CNV_SIZE_0);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_DRV_INFO_OPTION, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_DRV_INFO_PHYRPT_EN, enable);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_phy_rpt_be);
+
static
int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
@@ -2583,6 +2597,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be,
+ .cfg_phy_rpt = rtw89_mac_cfg_phy_rpt_be,
.dle_mix_cfg = dle_mix_cfg_be,
.chk_dle_rdy = chk_dle_rdy_be,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index f923bec03d41..c2fe5a898dc7 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -321,10 +321,11 @@ static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring)
{
- struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
- struct rtw89_pci_rx_info *rx_info;
struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
+ struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct sk_buff *new = rx_ring->diliver_skb;
+ struct rtw89_pci_rx_info *rx_info;
struct sk_buff *skb;
u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
u32 skb_idx;
@@ -344,9 +345,14 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
}
rx_info = RTW89_PCI_RX_SKB_CB(skb);
- fs = rx_info->fs;
+ fs = info->no_rxbd_fs ? !new : rx_info->fs;
ls = rx_info->ls;
+ if (unlikely(!fs || !ls))
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ "unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n",
+ fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0);
+
if (fs) {
if (new) {
rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
@@ -2516,7 +2522,7 @@ static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
}
-static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
+static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
{
if (pwr_up)
rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
@@ -2825,6 +2831,8 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
+ rtw89_pci_power_wake(rtwdev, false);
+
if (rtwdev->chip->chip_id == RTL8852A) {
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
@@ -2867,7 +2875,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
return ret;
}
- rtw89_pci_power_wake(rtwdev, true);
+ rtw89_pci_power_wake_ax(rtwdev, true);
rtw89_pci_autoload_hang(rtwdev);
rtw89_pci_l12_vmain(rtwdev);
rtw89_pci_gen2_force_ib(rtwdev);
@@ -2912,6 +2920,13 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
+{
+ rtw89_pci_power_wake_ax(rtwdev, false);
+
+ return 0;
+}
+
int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
{
u32 val;
@@ -4069,6 +4084,15 @@ static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
rtw89_pci_l1ss_set(rtwdev, true);
}
+static void rtw89_pci_cpl_timeout_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
+}
+
static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev)
{
int ret = 0;
@@ -4282,6 +4306,7 @@ void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume)
rtw89_pci_disable_eq(rtwdev);
rtw89_pci_filter_out(rtwdev);
+ rtw89_pci_cpl_timeout_cfg(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
}
@@ -4325,7 +4350,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
B_AX_RDU_INT},
.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
- .mac_pre_deinit = NULL,
+ .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
.clr_idx_all = rtw89_pci_clr_idx_all_ax,
@@ -4343,6 +4368,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.l1ss_set = rtw89_pci_l1ss_set_ax,
.disable_eq = rtw89_pci_disable_eq_ax,
+ .power_wake = rtw89_pci_power_wake_ax,
};
EXPORT_SYMBOL(rtw89_pci_gen_ax);
@@ -4400,7 +4426,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
sizeof(struct rtw89_pci),
- info->chip);
+ info->chip, info->variant);
if (!rtwdev) {
dev_err(&pdev->dev, "failed to allocate hw\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index b68e2d82eea9..4d11c3dd60a5 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -1051,7 +1051,8 @@
#define RTW89_PCI_TXWD_NUM_MAX 512
#define RTW89_PCI_TXWD_PAGE_SIZE 128
#define RTW89_PCI_ADDRINFO_MAX 4
-#define RTW89_PCI_RX_BUF_SIZE (11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
+/* +40 for rtw89_rxdesc_long_v2; +4 for rtw89_pci_rxbd_info */
+#define RTW89_PCI_RX_BUF_SIZE (11454 + 40 + 4)
#define RTW89_PCI_POLL_BDRAM_RST_CNT 100
#define RTW89_PCI_MULTITAG 8
@@ -1290,6 +1291,7 @@ struct rtw89_pci_gen_def {
void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable);
void (*disable_eq)(struct rtw89_dev *rtwdev);
+ void (*power_wake)(struct rtw89_dev *rtwdev, bool pwr_up);
};
#define RTW89_PCI_SSID(v, d, ssv, ssd, cust) \
@@ -1323,6 +1325,7 @@ struct rtw89_pci_info {
enum mac_ax_io_rcy_tmr io_rcy_tmr;
bool rx_ring_eq_is_full;
bool check_rx_tag;
+ bool no_rxbd_fs;
u32 init_cfg_reg;
u32 txhci_en_bit;
@@ -1805,4 +1808,12 @@ static inline void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
gen_def->disable_eq(rtwdev);
}
+static inline void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ gen_def->power_wake(rtwdev, pwr_up);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index 34154506f5d4..cd39eebe8186 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -691,5 +691,6 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.l1ss_set = rtw89_pci_l1ss_set_be,
.disable_eq = rtw89_pci_disable_eq_be,
+ .power_wake = _patch_pcie_power_wake_be,
};
EXPORT_SYMBOL(rtw89_pci_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index f24aca663cf0..c7c05f7fda1d 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include "acpi.h"
#include "chan.h"
#include "coex.h"
#include "debug.h"
@@ -260,19 +261,32 @@ rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
static const u64
rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES};
+static const u64
+rtw89_ra_mask_eht_mcs0_11[4] = {RA_MASK_EHT_1SS_MCS0_11, RA_MASK_EHT_2SS_MCS0_11,
+ RA_MASK_EHT_3SS_MCS0_11, RA_MASK_EHT_4SS_MCS0_11};
static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link,
+ struct ieee80211_link_sta *link_sta,
const struct rtw89_chan *chan,
bool *fix_giltf_en, u8 *fix_giltf)
{
struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
u8 band = chan->band_type;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
- u8 he_gi = mask->control[nl_band].he_gi;
u8 he_ltf = mask->control[nl_band].he_ltf;
+ u8 he_gi = mask->control[nl_band].he_gi;
- if (!rtwsta_link->use_cfg_mask)
+ *fix_giltf_en = true;
+
+ if (rtwdev->chip->chip_id == RTL8852C &&
+ chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
+ rtw89_sta_link_has_su_mu_4xhe08(link_sta))
+ *fix_giltf = RTW89_GILTF_SGI_4XHE08;
+ else
+ *fix_giltf = RTW89_GILTF_2XHE08;
+
+ if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he))
return;
if (he_ltf == 2 && he_gi == 2) {
@@ -287,12 +301,7 @@ static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
*fix_giltf = RTW89_GILTF_1XHE16;
} else if (he_ltf == 0 && he_gi == 0) {
*fix_giltf = RTW89_GILTF_1XHE08;
- } else {
- *fix_giltf_en = false;
- return;
}
-
- *fix_giltf_en = true;
}
static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
@@ -324,7 +333,14 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
if (link_sta->eht_cap.has_eht) {
mode |= RTW89_RA_MODE_EHT;
ra_mask |= get_eht_ra_mask(link_sta);
- high_rate_masks = rtw89_ra_mask_eht_rates;
+
+ if (rtwdev->hal.no_mcs_12_13)
+ high_rate_masks = rtw89_ra_mask_eht_mcs0_11;
+ else
+ high_rate_masks = rtw89_ra_mask_eht_rates;
+
+ rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
+ chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->he_cap.has_he) {
mode |= RTW89_RA_MODE_HE;
csi_mode = RTW89_RA_RPT_MODE_HE;
@@ -336,7 +352,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
ldpc_en = 1;
- rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, chan, &fix_giltf_en, &fix_giltf);
+ rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
+ chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->vht_cap.vht_supported) {
u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
@@ -466,11 +483,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->csi_mode = csi_mode;
}
-static void __rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- struct rtw89_sta_link *rtwsta_link,
- u32 changed)
+void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link,
+ u32 changed)
{
+ struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct rtw89_ra_info *ra = &rtwsta_link->ra;
struct ieee80211_link_sta *link_sta;
@@ -503,14 +520,11 @@ void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta
u32 changed)
{
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
- struct rtw89_vif_link *rtwvif_link;
struct rtw89_sta_link *rtwsta_link;
unsigned int link_id;
- rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
- rtwvif_link = rtwsta_link->rtwvif_link;
- __rtw89_phy_ra_update_sta(rtwdev, rtwvif_link, rtwsta_link, changed);
- }
+ rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
+ rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
}
static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
@@ -1854,6 +1868,228 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
+static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd)
+{
+ switch (ant_gain_regd) {
+ case RTW89_ANT_GAIN_ETSI:
+ return RTW89_ETSI;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "unknown antenna gain domain: %d\n",
+ ant_gain_regd);
+ return RTW89_REGD_NUM;
+ }
+}
+
+/* antenna gain in unit of 0.25 dbm */
+#define RTW89_ANT_GAIN_2GHZ_MIN -8
+#define RTW89_ANT_GAIN_2GHZ_MAX 14
+#define RTW89_ANT_GAIN_5GHZ_MIN -8
+#define RTW89_ANT_GAIN_5GHZ_MAX 20
+#define RTW89_ANT_GAIN_6GHZ_MIN -8
+#define RTW89_ANT_GAIN_6GHZ_MAX 20
+
+#define RTW89_ANT_GAIN_REF_2GHZ 14
+#define RTW89_ANT_GAIN_REF_5GHZ 20
+#define RTW89_ANT_GAIN_REF_6GHZ 20
+
+void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_acpi_rtag_result res = {};
+ u32 domain;
+ int ret;
+ u8 i, j;
+ u8 regd;
+ u8 val;
+
+ if (!chip->support_ant_gain)
+ return;
+
+ ret = rtw89_acpi_evaluate_rtag(rtwdev, &res);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "acpi: cannot eval rtag: %d\n", ret);
+ return;
+ }
+
+ if (res.revision != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "unknown rtag revision: %d\n", res.revision);
+ return;
+ }
+
+ domain = get_unaligned_le32(&res.domain);
+
+ for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) {
+ if (!(domain & BIT(i)))
+ continue;
+
+ regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i);
+ if (regd >= RTW89_REGD_NUM)
+ continue;
+ ant_gain->regd_enabled |= BIT(regd);
+ }
+
+ for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) {
+ for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) {
+ val = res.ant_gain_table[i][j];
+ switch (j) {
+ default:
+ case RTW89_ANT_GAIN_2GHZ_SUBBAND:
+ val = RTW89_ANT_GAIN_REF_2GHZ -
+ clamp_t(s8, val,
+ RTW89_ANT_GAIN_2GHZ_MIN,
+ RTW89_ANT_GAIN_2GHZ_MAX);
+ break;
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_1:
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_2:
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E:
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4:
+ val = RTW89_ANT_GAIN_REF_5GHZ -
+ clamp_t(s8, val,
+ RTW89_ANT_GAIN_5GHZ_MIN,
+ RTW89_ANT_GAIN_5GHZ_MAX);
+ break;
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_6:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_8:
+ val = RTW89_ANT_GAIN_REF_6GHZ -
+ clamp_t(s8, val,
+ RTW89_ANT_GAIN_6GHZ_MIN,
+ RTW89_ANT_GAIN_6GHZ_MAX);
+ }
+ ant_gain->offset[i][j] = val;
+ }
+ }
+}
+
+static
+enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev,
+ u32 center_freq)
+{
+ switch (center_freq) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "center freq: %u to antenna gain subband is unhandled\n",
+ center_freq);
+ fallthrough;
+ case 2412 ... 2484:
+ return RTW89_ANT_GAIN_2GHZ_SUBBAND;
+ case 5180 ... 5240:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_1;
+ case 5250 ... 5320:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_2;
+ case 5500 ... 5720:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E;
+ case 5745 ... 5885:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4;
+ case 5955 ... 6155:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L;
+ case 6175 ... 6415:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H;
+ case 6435 ... 6515:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_6;
+ case 6535 ... 6695:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L;
+ case 6715 ... 6855:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H;
+
+ /* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H
+ * and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with
+ * struct rtw89_6ghz_span.
+ */
+
+ case 6895 ... 7115:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_8;
+ }
+}
+
+static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u32 center_freq)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ enum rtw89_ant_gain_subband subband_l, subband_h;
+ const struct rtw89_6ghz_span *span;
+
+ span = rtw89_get_6ghz_span(rtwdev, center_freq);
+
+ if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) {
+ subband_l = span->ant_gain_subband_low;
+ subband_h = span->ant_gain_subband_high;
+ } else {
+ subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq);
+ subband_h = subband_l;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "center_freq %u: antenna gain subband {%u, %u}\n",
+ center_freq, subband_l, subband_h);
+
+ return min(ant_gain->offset[path][subband_l],
+ ant_gain->offset[path][subband_h]);
+}
+
+static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ s8 offset_patha, offset_pathb;
+
+ if (!chip->support_ant_gain)
+ return 0;
+
+ if (!(ant_gain->regd_enabled & BIT(regd)))
+ return 0;
+
+ offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
+ offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
+
+ return max(offset_patha, offset_pathb);
+}
+
+s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
+ s8 offset_patha, offset_pathb;
+
+ if (!(ant_gain->regd_enabled & BIT(regd)))
+ return 0;
+
+ offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
+ offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
+
+ return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb);
+}
+EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
+
+void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
+ s8 offset_patha, offset_pathb;
+
+ if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) {
+ seq_puts(m, "no DAG is applied\n");
+ return;
+ }
+
+ offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
+ offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
+
+ seq_printf(m, "ChainA offset: %d dBm\n", offset_patha);
+ seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb);
+}
+
static const u8 rtw89_rs_idx_num_ax[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
@@ -1917,20 +2153,6 @@ void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
-static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
-
- return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
-}
-
-static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
-
- return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
-}
-
static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
{
const u8 tssi_deviation_point = 0;
@@ -2027,7 +2249,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt = 0, sar;
+ s8 lmt = 0, sar, offset;
s8 cstr;
switch (band) {
@@ -2059,7 +2281,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt);
+ offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
+ lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset);
sar = rtw89_query_sar(rtwdev, freq);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
@@ -2286,7 +2509,7 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt_ru = 0, sar;
+ s8 lmt_ru = 0, sar, offset;
s8 cstr;
switch (band) {
@@ -2318,7 +2541,8 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
+ offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
+ lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset);
sar = rtw89_query_sar(rtwdev, freq);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
@@ -3228,10 +3452,16 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
(int)(len - sizeof(report->hdr)), &report->state);
}
+static void
+rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
static
void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
+ [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
};
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
@@ -3285,11 +3515,11 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return;
fallthrough;
default:
- rtw89_info(rtwdev, "c2h class %d not support\n", class);
+ rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
return;
}
if (!handler) {
- rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
+ rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class,
func);
return;
}
@@ -4058,7 +4288,6 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
if (!force && cfo->crystal_cap == crystal_cap)
return;
- crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
@@ -4181,7 +4410,7 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
s32 curr_cfo)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
- s8 crystal_cap = cfo->crystal_cap;
+ int crystal_cap = cfo->crystal_cap;
s32 cfo_abs = abs(curr_cfo);
int sign;
@@ -4202,15 +4431,17 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
}
sign = curr_cfo > 0 ? 1 : -1;
if (cfo_abs > CFO_TRK_STOP_TH_4)
- crystal_cap += 7 * sign;
+ crystal_cap += 3 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_3)
- crystal_cap += 5 * sign;
- else if (cfo_abs > CFO_TRK_STOP_TH_2)
crystal_cap += 3 * sign;
+ else if (cfo_abs > CFO_TRK_STOP_TH_2)
+ crystal_cap += 1 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_1)
crystal_cap += 1 * sign;
else
return;
+
+ crystal_cap = clamp(crystal_cap, 0, 127);
rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
rtw89_debug(rtwdev, RTW89_DBG_CFO,
"X_cap{Curr,Default}={0x%x,0x%x}\n",
@@ -6310,6 +6541,12 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_chip_cfg_txrx_path(rtwdev);
}
+void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_env_monitor_init(rtwdev);
+ rtw89_physts_parsing_init(rtwdev);
+}
+
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index c683f4d7d29b..08b635c93ac3 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -51,13 +51,17 @@
#define RA_MASK_EHT_2SS_RATES GENMASK_ULL(43, 28)
#define RA_MASK_EHT_3SS_RATES GENMASK_ULL(59, 44)
#define RA_MASK_EHT_4SS_RATES GENMASK_ULL(62, 60)
+#define RA_MASK_EHT_1SS_MCS0_11 GENMASK_ULL(23, 12)
+#define RA_MASK_EHT_2SS_MCS0_11 GENMASK_ULL(39, 28)
+#define RA_MASK_EHT_3SS_MCS0_11 GENMASK_ULL(55, 44)
+#define RA_MASK_EHT_4SS_MCS0_11 GENMASK_ULL(62, 60)
#define RA_MASK_EHT_RATES GENMASK_ULL(62, 12)
#define CFO_TRK_ENABLE_TH (2 << 2)
#define CFO_TRK_STOP_TH_4 (30 << 2)
#define CFO_TRK_STOP_TH_3 (20 << 2)
#define CFO_TRK_STOP_TH_2 (10 << 2)
-#define CFO_TRK_STOP_TH_1 (00 << 2)
+#define CFO_TRK_STOP_TH_1 (03 << 2)
#define CFO_TRK_STOP_TH (2 << 2)
#define CFO_SW_COMP_FINE_TUNE (2 << 2)
#define CFO_PERIOD_CNT 15
@@ -151,6 +155,7 @@ enum rtw89_phy_c2h_rfk_log_func {
enum rtw89_phy_c2h_rfk_report_func {
RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0,
+ RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6,
};
enum rtw89_phy_c2h_dm_func {
@@ -813,6 +818,7 @@ void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path,
void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
+void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
@@ -826,6 +832,11 @@ s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
const struct rtw89_rate_desc *desc);
s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
const struct rtw89_rate_desc *rate_desc);
+void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev);
+s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan);
+void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
@@ -896,10 +907,34 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
phy->set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
+static inline s8 rtw89_phy_txpwr_rf_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_rf)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_rf << (chip->txpwr_factor_bb - chip->txpwr_factor_rf);
+}
+
+static inline s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
+}
+
+static inline s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
+}
+
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
u32 changed);
+void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link,
+ u32 changed);
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index c1c12abc2ea9..96ea04d90cd3 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -8,6 +8,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
+#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "util.h"
@@ -62,11 +63,8 @@ static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
rtw89_mac_power_mode_change(rtwdev, enter);
}
-void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
{
- if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
- return;
-
if (!rtwdev->ps_mode)
return;
@@ -85,8 +83,8 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
rtw89_ps_power_mode_change(rtwdev, false);
}
-static void __rtw89_enter_lps(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
+static void __rtw89_enter_lps_link(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
{
struct rtw89_lps_parm lps_param = {
.macid = rtwvif_link->mac_id,
@@ -96,7 +94,6 @@ static void __rtw89_enter_lps(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
- rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif_link);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
@@ -121,17 +118,32 @@ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
}
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool ps_mode)
{
+ struct rtw89_vif_link *rtwvif_link;
+ bool can_ps_mode = true;
+ unsigned int link_id;
+
lockdep_assert_held(&rtwdev->mutex);
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, rtwvif_link);
- if (ps_mode)
- __rtw89_enter_ps_mode(rtwdev, rtwvif_link);
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ __rtw89_enter_lps_link(rtwdev, rtwvif_link);
+
+ if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ can_ps_mode = false;
+ }
+
+ if (RTW89_CHK_FW_FEATURE(LPS_CH_INFO, &rtwdev->fw))
+ rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
+ else
+ rtw89_fw_h2c_lps_ml_cmn_info(rtwdev, rtwvif);
+
+ if (ps_mode && can_ps_mode)
+ __rtw89_enter_ps_mode(rtwdev);
}
static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev,
@@ -157,6 +169,8 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
+ rtw89_phy_dm_reinit(rtwdev);
+
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
rtw89_leave_lps_vif(rtwdev, rtwvif_link);
@@ -282,12 +296,6 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
enum rtw89_entity_mode mode;
int count = 0;
- /* FIXME: Fix rtw89_enter_lps() and __rtw89_enter_ps_mode()
- * to take MLO cases into account before doing the following.
- */
- if (rtwdev->support_mlo)
- goto disable_lps;
-
mode = rtw89_get_entity_mode(rtwdev);
if (mode == RTW89_ENTITY_MODE_MCC)
goto disable_lps;
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index cdd712966b09..2b88f254a32d 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -5,11 +5,11 @@
#ifndef __RTW89_PS_H_
#define __RTW89_PS_H_
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool ps_mode);
void rtw89_leave_lps(struct rtw89_dev *rtwdev);
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
-void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 18ec7c0252fb..10d0efa7a58e 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -7447,6 +7447,10 @@
#define B_BE_CSIPRT_HESU_AID_EN BIT(25)
#define B_BE_CSIPRT_VHTSU_AID_EN BIT(24)
+#define R_BE_DRV_INFO_OPTION 0x11470
+#define R_BE_DRV_INFO_OPTION_C1 0x15470
+#define B_BE_DRV_INFO_PHYRPT_EN BIT(0)
+
#define R_BE_RX_ERR_ISR 0x114F4
#define R_BE_RX_ERR_ISR_C1 0x154F4
#define B_BE_RX_ERR_TRIG_ACT_TO BIT(9)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index cad5189708e7..80b2f74589eb 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -17,7 +17,7 @@ static const struct rtw89_regd rtw89_ww_regd =
static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC),
- COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE),
COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
@@ -35,7 +35,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
@@ -72,7 +72,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
@@ -82,13 +82,13 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR),
COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -101,7 +101,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE),
COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN),
@@ -110,12 +110,12 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC),
COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI),
- COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_THAILAND),
+ COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND),
COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
@@ -158,9 +158,9 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
@@ -176,12 +176,12 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -194,19 +194,19 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -216,15 +216,15 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA),
@@ -237,9 +237,9 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -247,13 +247,16 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA),
COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA),
+ COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
};
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 68c67a763f4d..c56f70267882 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -2298,7 +2298,8 @@ static void rtw8851b_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(rx_power[RF_PATH_A]);
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(rx_power[RF_PATH_A]);
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
@@ -2391,6 +2392,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx,
.query_ppdu = rtw8851b_query_ppdu,
.convert_rpl_to_rssi = NULL,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8851b_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset,
@@ -2464,6 +2466,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.nctl_post_table = &rtw8851b_nctl_post_defs_tbl,
.dflt_parms = &rtw89_8851b_dflt_parms,
.rfe_parms_conf = rtw89_8851b_rfe_parms_conf,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -2479,6 +2482,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
+ .support_ant_gain = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index 364e36354225..f72b3ac6f149 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -2199,7 +2199,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (dgain > 0x5fc || dgain < 0x556) {
_dpk_one_shot(rtwdev, phy, path, D_SYNC);
- dgain = _dpk_dgain_read(rtwdev);
+ _dpk_dgain_read(rtwdev);
}
if (agc_cnt == 0) {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index 651cbce1dd7e..5810af825242 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -27,6 +27,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -66,6 +67,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
static const struct rtw89_driver_info rtw89_8851be_info = {
.chip = &rtw8851b_chip_info,
+ .variant = NULL,
.quirks = NULL,
.bus = {
.pci = &rtw8851b_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index e647759ebd69..9bd2842c27d5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -2068,7 +2068,9 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2116,6 +2118,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx,
.query_ppdu = rtw8852a_query_ppdu,
.convert_rpl_to_rssi = NULL,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
@@ -2181,6 +2184,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = &rtw89_8852a_dflt_parms,
.rfe_parms_conf = NULL,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
@@ -2196,6 +2200,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = false,
+ .support_ant_gain = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 701187d69e14..2037713e3952 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -27,6 +27,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -64,6 +65,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
static const struct rtw89_driver_info rtw89_8852ae_info = {
.chip = &rtw8852a_chip_info,
+ .variant = NULL,
.quirks = NULL,
.bus = {
.pci = &rtw8852a_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index 49a319128316..dfb2bf61b0b8 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -745,6 +745,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
.convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
@@ -819,6 +820,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = &rtw89_8852b_dflt_parms,
.rfe_parms_conf = NULL,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -834,6 +836,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
+ .support_ant_gain = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index f4aa4437fb75..0e094ce9c9b0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -1206,24 +1206,25 @@ void __rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_cha
}
static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, s16 ref)
+ enum rtw89_phy_idx phy_idx,
+ s16 ref, u16 pwr_ofst_decrease)
{
const u16 tssi_16dbm_cw = 0x12c;
const u8 base_cw_0db = 0x27;
- const s8 ofst_int = 0;
s16 pwr_s10_3;
s16 rf_pwr_cw;
u16 bb_pwr_cw;
u32 pwr_cw;
u32 tssi_ofst_cw;
- pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ pwr_s10_3 = (ref << 1) + (s16)(base_cw_0db << 3) - pwr_ofst_decrease;
bb_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(2, 0));
rf_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(8, 3));
rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
- tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)) -
+ pwr_ofst_decrease;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
@@ -1234,10 +1235,11 @@ static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx, s16 pwr_ofst)
{
static const u32 addr[RF_PATH_NUM_8852BX] = {0x5800, 0x7800};
const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF;
+ u16 ofst_dec[RF_PATH_NUM_8852BX];
const u8 ofst_ofdm = 0x4;
const u8 ofst_cck = 0x8;
const s16 ref_ofdm = 0;
@@ -1250,19 +1252,20 @@ static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
B_AX_PWR_REF, 0x0);
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
- val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+ ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? 0 : abs(pwr_ofst);
+ ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ofst : 0;
- for (i = 0; i < RF_PATH_NUM_8852BX; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
- phy_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, phy_idx);
+ }
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
- val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
-
- for (i = 0; i < RF_PATH_NUM_8852BX; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
- phy_idx);
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, phy_idx);
+ }
}
static void rtw8852bx_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
@@ -1333,6 +1336,16 @@ static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev,
tx_shape_ofdm);
}
+static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 pwr_ofst;
+
+ pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
+}
+
static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -1342,12 +1355,13 @@ static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
rtw8852bx_set_tx_shape(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw8852bx_set_txpwr_diff(rtwdev, chan, phy_idx);
}
static void __rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
- rtw8852bx_set_txpwr_ref(rtwdev, phy_idx);
+ rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, 0);
}
static
@@ -1936,7 +1950,9 @@ static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index a13ea1cce4a7..abdeafc14b0b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -27,6 +27,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -66,6 +67,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
static const struct rtw89_driver_info rtw89_8852be_info = {
.chip = &rtw8852b_chip_info,
+ .variant = NULL,
.quirks = NULL,
.bus = {
.pci = &rtw8852b_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index 876725133228..bde3e1fb7ca6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -679,6 +679,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
.convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
@@ -752,6 +753,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL,
.rfe_parms_conf = NULL,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -767,6 +769,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
+ .support_ant_gain = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
index e4f40c2e287d..b69fa17beb33 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
@@ -9,6 +9,12 @@
#include "reg.h"
#include "rtw8852bt.h"
+static const struct rtw89_pci_ssid_quirk rtw8852bt_pci_ssid_quirks[] = {
+ {RTW89_PCI_SSID(PCI_VENDOR_ID_REALTEK, 0xB520, 0x103C, 0x88E9, HP),
+ .bitmap = BIT(RTW89_QUIRK_THERMAL_PROT_110C)},
+ {},
+};
+
static const struct rtw89_pci_info rtw8852bt_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
@@ -27,6 +33,7 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -61,11 +68,12 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.disable_intr = rtw89_pci_disable_intr,
.recognize_intrs = rtw89_pci_recognize_intrs,
- .ssid_quirks = NULL,
+ .ssid_quirks = rtw8852bt_pci_ssid_quirks,
};
static const struct rtw89_driver_info rtw89_8852bte_info = {
.chip = &rtw8852bt_chip_info,
+ .variant = NULL,
.quirks = NULL,
.bus = {
.pci = &rtw8852bt_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index cde34f8e1e67..bc84b15e7826 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1882,9 +1882,9 @@ static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev)
}
static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, s16 ref)
+ enum rtw89_phy_idx phy_idx,
+ s16 ref, u16 pwr_ofst_decrease)
{
- s8 ofst_int = 0;
u8 base_cw_0db = 0x27;
u16 tssi_16dbm_cw = 0x12c;
s16 pwr_s10_3 = 0;
@@ -1893,13 +1893,14 @@ static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
u32 pwr_cw = 0;
u32 tssi_ofst_cw = 0;
- pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ pwr_s10_3 = (ref << 1) + (s16)(base_cw_0db << 3) - pwr_ofst_decrease;
bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3);
rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3);
rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
- tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)) -
+ pwr_ofst_decrease;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
@@ -1943,9 +1944,10 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx, s16 pwr_ofst)
{
static const u32 addr[RF_PATH_NUM_8852C] = {0x5800, 0x7800};
+ u16 ofst_dec[RF_PATH_NUM_8852C];
const u32 mask = 0x7FFFFFF;
const u8 ofst_ofdm = 0x4;
const u8 ofst_cck = 0x8;
@@ -1959,19 +1961,20 @@ static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
GENMASK(27, 10), 0x0);
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
- val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+ ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? 0 : abs(pwr_ofst);
+ ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ofst : 0;
- for (i = 0; i < RF_PATH_NUM_8852C; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
- phy_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, phy_idx);
+ }
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
- val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
-
- for (i = 0; i < RF_PATH_NUM_8852C; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
- phy_idx);
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, phy_idx);
+ }
}
static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
@@ -2052,6 +2055,16 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
B_P1_DAC_COMP_POST_DPD_EN);
}
+static void rtw8852c_set_txpwr_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 pwr_ofst;
+
+ pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ rtw8852c_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
+}
+
static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -2061,12 +2074,13 @@ static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
rtw8852c_set_tx_shape(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_diff(rtwdev, chan, phy_idx);
}
static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_txpwr_ref(rtwdev, phy_idx);
+ rtw8852c_set_txpwr_ref(rtwdev, phy_idx, 0);
}
static void
@@ -2793,7 +2807,10 @@ static void rtw8852c_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
+
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2893,6 +2910,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx,
.query_ppdu = rtw8852c_query_ppdu,
.convert_rpl_to_rssi = NULL,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
@@ -2959,6 +2977,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dflt_parms = &rtw89_8852c_dflt_parms,
.rfe_parms_conf = NULL,
.chanctx_listener = &rtw8852c_chanctx_listener,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -2976,6 +2995,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
+ .support_ant_gain = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.hw_sec_hdr = true,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index bd17c0a1c684..b92e2ce4f4ad 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -1769,10 +1769,10 @@ u8 _rx_dck_channel_calc(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan)
target_ch = chan->channel - 33;
}
} else if (chan->band_type == RTW89_BAND_6G) {
- if (chan->channel >= 1 && chan->channel <= 125)
- target_ch = chan->channel + 32;
- else
+ if (chan->channel > 125)
target_ch = chan->channel - 32;
+ else
+ target_ch = chan->channel + 32;
} else {
target_ch = chan->channel;
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 1a46878be96b..5d864fd5974e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -36,6 +36,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
@@ -95,6 +96,7 @@ static const struct dmi_system_id rtw8852c_pci_quirks[] = {
static const struct rtw89_driver_info rtw89_8852ce_info = {
.chip = &rtw8852c_chip_info,
+ .variant = NULL,
.quirks = rtw8852c_pci_quirks,
.bus = {
.pci = &rtw8852c_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 9a4db04a1967..11d66bfceb15 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -14,7 +14,7 @@
#include "rtw8922a_rfk.h"
#include "util.h"
-#define RTW8922A_FW_FORMAT_MAX 2
+#define RTW8922A_FW_FORMAT_MAX 3
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
RTW8922A_FW_BASENAME "-" __stringify(RTW8922A_FW_FORMAT_MAX) ".bin"
@@ -2565,8 +2565,10 @@ static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal =
- RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
+
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2607,6 +2609,16 @@ static void rtw8922a_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
}
+static void rtw8922a_phy_rpt_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ if (desc_info->rssi <= 0x1 || (desc_info->rssi >> 2) > MAX_RSSI)
+ return;
+
+ rx_status->signal = (desc_info->rssi >> 2) - MAX_RSSI;
+}
+
static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
@@ -2665,6 +2677,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
.query_ppdu = rtw8922a_query_ppdu,
.convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi,
+ .phy_rpt_to_rssi = rtw8922a_phy_rpt_to_rssi,
.ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = NULL,
@@ -2729,6 +2742,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL, /* load parm from fw */
.rfe_parms_conf = NULL, /* load parm from fw */
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -2746,6 +2760,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
+ .support_ant_gain = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = true,
@@ -2823,6 +2838,12 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
};
EXPORT_SYMBOL(rtw8922a_chip_info);
+const struct rtw89_chip_variant rtw8922ae_vs_variant = {
+ .no_mcs_12_13 = true,
+ .fw_min_ver_code = RTW89_FW_VER_CODE(0, 35, 54, 0),
+};
+EXPORT_SYMBOL(rtw8922ae_vs_variant);
+
MODULE_FIRMWARE(RTW8922A_MODULE_FIRMWARE);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11be wireless 8922A driver");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.h b/drivers/net/wireless/realtek/rtw89/rtw8922a.h
index 597317ab6af7..a29cfa5b4291 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.h
@@ -69,5 +69,6 @@ struct rtw8922a_efuse {
} __packed;
extern const struct rtw89_chip_info rtw8922a_chip_info;
+extern const struct rtw89_chip_variant rtw8922ae_vs_variant;
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index edfb1f220af0..0ea8d5281c10 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -33,6 +33,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_DEF,
.rx_ring_eq_is_full = true,
.check_rx_tag = true,
+ .no_rxbd_fs = true,
.init_cfg_reg = R_BE_HAXI_INIT_CFG1,
.txhci_en_bit = B_BE_TXDMA_EN,
@@ -70,6 +71,16 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
static const struct rtw89_driver_info rtw89_8922ae_info = {
.chip = &rtw8922a_chip_info,
+ .variant = NULL,
+ .quirks = NULL,
+ .bus = {
+ .pci = &rtw8922a_pci_info,
+ },
+};
+
+static const struct rtw89_driver_info rtw89_8922ae_vs_info = {
+ .chip = &rtw8922a_chip_info,
+ .variant = &rtw8922ae_vs_variant,
.quirks = NULL,
.bus = {
.pci = &rtw8922a_pci_info,
@@ -81,6 +92,10 @@ static const struct pci_device_id rtw89_8922ae_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8922),
.driver_data = (kernel_ulong_t)&rtw89_8922ae_info,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x892B),
+ .driver_data = (kernel_ulong_t)&rtw89_8922ae_vs_info,
+ },
{},
};
MODULE_DEVICE_TABLE(pci, rtw89_8922ae_id_table);
@@ -95,5 +110,5 @@ static struct pci_driver rtw89_8922ae_driver = {
module_pci_driver(rtw89_8922ae_driver);
MODULE_AUTHOR("Realtek Corporation");
-MODULE_DESCRIPTION("Realtek 802.11be wireless 8922AE driver");
+MODULE_DESCRIPTION("Realtek 802.11be wireless 8922AE/8922AE-VS driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index bcc287771b2a..871f45a6508c 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -42,7 +42,7 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
/* freq 6875 (ch 185, 20MHz) spans RTW89_SAR_6GHZ_SUBBAND_7_H
* and RTW89_SAR_6GHZ_SUBBAND_8, so directly describe it with
- * struct rtw89_sar_span in the following.
+ * struct rtw89_6ghz_span.
*/
case 6895 ... 7115:
@@ -50,63 +50,18 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
}
}
-struct rtw89_sar_span {
- enum rtw89_sar_subband subband_low;
- enum rtw89_sar_subband subband_high;
-};
-
-#define RTW89_SAR_SPAN_VALID(span) ((span)->subband_high)
-
-#define RTW89_SAR_6GHZ_SPAN_HEAD 6145
-#define RTW89_SAR_6GHZ_SPAN_IDX(center_freq) \
- ((((int)(center_freq) - RTW89_SAR_6GHZ_SPAN_HEAD) / 5) / 2)
-
-#define RTW89_DECL_SAR_6GHZ_SPAN(center_freq, subband_l, subband_h) \
- [RTW89_SAR_6GHZ_SPAN_IDX(center_freq)] = { \
- .subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
- .subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
- }
-
-/* Since 6GHz SAR subbands are not edge aligned, some cases span two SAR
- * subbands. In the following, we describe each of them with rtw89_sar_span.
- */
-static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = {
- RTW89_DECL_SAR_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
- RTW89_DECL_SAR_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
- RTW89_DECL_SAR_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
- RTW89_DECL_SAR_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
- RTW89_DECL_SAR_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
- RTW89_DECL_SAR_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
- RTW89_DECL_SAR_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
-};
-
static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev,
u32 center_freq, s32 *cfg)
{
struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
- const struct rtw89_sar_span *span = NULL;
enum rtw89_sar_subband subband_l, subband_h;
- int idx;
-
- if (center_freq >= RTW89_SAR_6GHZ_SPAN_HEAD) {
- idx = RTW89_SAR_6GHZ_SPAN_IDX(center_freq);
- /* To decrease size of rtw89_sar_overlapping_6ghz[],
- * RTW89_SAR_6GHZ_SPAN_IDX() truncates the leading NULLs
- * to make first span as index 0 of the table. So, if center
- * frequency is less than the first one, it will get netative.
- */
- if (idx >= 0 && idx < ARRAY_SIZE(rtw89_sar_overlapping_6ghz))
- span = &rtw89_sar_overlapping_6ghz[idx];
- }
+ const struct rtw89_6ghz_span *span;
+
+ span = rtw89_get_6ghz_span(rtwdev, center_freq);
if (span && RTW89_SAR_SPAN_VALID(span)) {
- subband_l = span->subband_low;
- subband_h = span->subband_high;
+ subband_l = span->sar_subband_low;
+ subband_h = span->sar_subband_high;
} else {
subband_l = rtw89_sar_get_subband(rtwdev, center_freq);
subband_h = subband_l;
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 7b203bb7f151..26a944d3b672 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -365,6 +365,7 @@ static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
ser_reset_vif(rtwdev, rtwvif);
rtwdev->total_sta_assoc = 0;
+ refcount_set(&rtwdev->refcount_ap_info, 0);
}
/* hal function */
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index b2e47829983f..70fe7cebc9d5 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -560,6 +560,9 @@ struct rtw89_phy_sts_iehdr {
#define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16)
#define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21)
+/* BE RXD - PHY RPT dword0 */
+#define BE_RXD_PHY_RSSI GENMASK(11, 0)
+
struct rtw89_phy_sts_ie00 {
__le32 w0;
__le32 w1;
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 3e81fd974ec1..01754d031bb4 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -620,7 +620,10 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
* need to unlock mutex
*/
mutex_unlock(&rtwdev->mutex);
- key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
+ if (ieee80211_vif_is_mld(wow_vif))
+ key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, rtwvif_link->link_id);
+ else
+ key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
mutex_lock(&rtwdev->mutex);
kfree(rekey_conf);
@@ -691,9 +694,7 @@ static void rtw89_wow_leave_deep_ps(struct rtw89_dev *rtwdev)
static void rtw89_wow_enter_deep_ps(struct rtw89_dev *rtwdev)
{
- struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
-
- __rtw89_enter_ps_mode(rtwdev, rtwvif_link);
+ __rtw89_enter_ps_mode(rtwdev);
}
static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev)
@@ -701,7 +702,7 @@ static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev)
struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
if (rtw89_wow_mgd_linked(rtwdev))
- rtw89_enter_lps(rtwdev, rtwvif_link, false);
+ rtw89_enter_lps(rtwdev, rtwvif_link->rtwvif, false);
else if (rtw89_wow_no_link(rtwdev))
rtw89_fw_h2c_fwips(rtwdev, rtwvif_link, true);
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 986b07bfa0ee..8fb58a5d911c 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2612,24 +2612,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
ret = -EBUSY;
- goto out;
+ goto out_unlock;
}
ret = wl12xx_init_vif_data(wl, vif);
if (ret < 0)
- goto out;
+ goto out_unlock;
wlvif->wl = wl;
role_type = wl12xx_get_role_type(wl, wlvif);
if (role_type == WL12XX_INVALID_ROLE_TYPE) {
ret = -EINVAL;
- goto out;
+ goto out_unlock;
}
ret = wlcore_allocate_hw_queue_base(wl, wlvif);
if (ret < 0)
- goto out;
+ goto out_unlock;
/*
* TODO: after the nvs issue will be solved, move this block
@@ -2644,7 +2644,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
ret = wl12xx_init_fw(wl);
if (ret < 0)
- goto out;
+ goto out_unlock;
}
/*
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index c07acfcbbd9c..7c57d4c8744a 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -88,7 +88,7 @@ static ssize_t hw_pg_ver_show(struct device *dev,
static DEVICE_ATTR_RO(hw_pg_ver);
static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -121,7 +121,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
static const struct bin_attribute fwlog_attr = {
.attr = { .name = "fwlog", .mode = 0400 },
- .read = wl1271_sysfs_read_fwlog,
+ .read_new = wl1271_sysfs_read_fwlog,
};
int wlcore_sysfs_init(struct wl1271 *wl)
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 3f338b8096c7..fc8ea58bc165 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -45,7 +45,7 @@ enum wl1271_tm_attrs {
};
#define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1)
-static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
+static const struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
[WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 },
[WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 },
[WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY,
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 347a15544afe..cf6a331d4042 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -5048,6 +5048,45 @@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xfffa),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ,
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /* As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field and 320MHz in
+ * 6GHz is supported include all the following
+ * MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#endif
};
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 04517bd3325a..a066977af0be 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/module.h>
+#include <linux/suspend.h>
#include <net/rtnetlink.h>
#include "iosm_ipc_imem.h"
@@ -18,6 +19,7 @@ MODULE_LICENSE("GPL v2");
/* WWAN GUID */
static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
+static bool pci_registered;
static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
{
@@ -448,7 +450,6 @@ static struct pci_driver iosm_ipc_driver = {
},
.id_table = iosm_ipc_ids,
};
-module_pci_driver(iosm_ipc_driver);
int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
size_t size, dma_addr_t *mapping, int direction)
@@ -530,3 +531,56 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
IPC_CB(skb)->mapping = 0;
dev_kfree_skb(skb);
}
+
+static int pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+ if (mode == PM_HIBERNATION_PREPARE || mode == PM_RESTORE_PREPARE) {
+ if (pci_registered) {
+ pci_unregister_driver(&iosm_ipc_driver);
+ pci_registered = false;
+ }
+ } else if (mode == PM_POST_HIBERNATION || mode == PM_POST_RESTORE) {
+ if (!pci_registered) {
+ int ret;
+
+ ret = pci_register_driver(&iosm_ipc_driver);
+ if (ret) {
+ pr_err(KBUILD_MODNAME ": unable to re-register PCI driver: %d\n",
+ ret);
+ } else {
+ pci_registered = true;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct notifier_block pm_notifier = {
+ .notifier_call = pm_notify,
+};
+
+static int __init iosm_ipc_driver_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&iosm_ipc_driver);
+ if (ret)
+ return ret;
+
+ pci_registered = true;
+
+ register_pm_notifier(&pm_notifier);
+
+ return 0;
+}
+module_init(iosm_ipc_driver_init);
+
+static void __exit iosm_ipc_driver_exit(void)
+{
+ unregister_pm_notifier(&pm_notifier);
+
+ if (pci_registered)
+ pci_unregister_driver(&iosm_ipc_driver);
+}
+module_exit(iosm_ipc_driver_exit);
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 8381b0dc7acb..02f2ec7cf4ce 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -43,6 +43,8 @@
#include "t7xx_state_monitor.h"
#include "t7xx_port_proxy.h"
+#define DRIVER_NAME "mtk_t7xx"
+
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
@@ -833,6 +835,7 @@ static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct t7xx_pci_dev *t7xx_dev;
+ void __iomem *iomem;
int ret;
t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
@@ -848,12 +851,21 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
- pci_name(pdev));
+ iomem = pcim_iomap_region(pdev, T7XX_PCI_IREG_BASE, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(iomem);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IREG BAR: %d\n", ret);
+ return -ENOMEM;
+ }
+ IREG_BASE(t7xx_dev) = iomem;
+
+ iomem = pcim_iomap_region(pdev, T7XX_PCI_EREG_BASE, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(iomem);
if (ret) {
- dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
+ dev_err(&pdev->dev, "Could not request EREG BAR: %d\n", ret);
return -ENOMEM;
}
+ t7xx_dev->base_addr.pcie_ext_reg_base = iomem;
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
@@ -867,9 +879,6 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
- t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
-
ret = t7xx_pci_pm_init(t7xx_dev);
if (ret)
return ret;
@@ -937,7 +946,7 @@ static const struct pci_device_id t7xx_pci_table[] = {
MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
static struct pci_driver t7xx_pci_driver = {
- .name = "mtk_t7xx",
+ .name = DRIVER_NAME,
.id_table = t7xx_pci_table,
.probe = t7xx_pci_probe,
.remove = t7xx_pci_remove,
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 956ae92f7573..2037cd6d4f4f 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -5,11 +5,16 @@
* Copyright (C) 2015, Marvell International Ltd.
*/
-#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/of_gpio.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+
#include <net/nfc/nci.h>
#include <net/nfc/nci_core.h>
+
#include "nfcmrvl.h"
static unsigned int hci_muxed;
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index 1ec651e31064..3425b68f0ddc 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -116,18 +116,16 @@ static void st21nfca_tx_work(struct work_struct *work)
struct nfc_dev *dev;
struct sk_buff *skb;
- if (info) {
- dev = info->hdev->ndev;
- skb = info->dep_info.tx_pending;
+ dev = info->hdev->ndev;
+ skb = info->dep_info.tx_pending;
- device_lock(&dev->dev);
+ device_lock(&dev->dev);
- nfc_hci_send_cmd_async(info->hdev, ST21NFCA_RF_READER_F_GATE,
- ST21NFCA_WR_XCHG_DATA, skb->data, skb->len,
- info->async_cb, info);
- device_unlock(&dev->dev);
- kfree_skb(skb);
- }
+ nfc_hci_send_cmd_async(info->hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_WR_XCHG_DATA, skb->data, skb->len,
+ info->async_cb, info);
+ device_unlock(&dev->dev);
+ kfree_skb(skb);
}
static void st21nfca_im_send_pdu(struct st21nfca_hci_info *info,
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 02c3d11a19c4..6d7861383806 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -11,7 +11,6 @@
#include <linux/i2c.h>
#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 4319ab50c10d..1de11b722f04 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1251,7 +1251,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
anv->admin_tagset.numa_node = NUMA_NO_NODE;
anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
- anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
anv->admin_tagset.driver_data = &anv->adminq;
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
@@ -1275,7 +1274,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
anv->tagset.timeout = NVME_IO_TIMEOUT;
anv->tagset.numa_node = NUMA_NO_NODE;
anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
- anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
anv->tagset.driver_data = &anv->ioq;
ret = blk_mq_alloc_tag_set(&anv->tagset);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a970168a3014..76b615d4d5b9 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -885,6 +885,12 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_OK;
}
+static void nvme_set_app_tag(struct request *req, struct nvme_command *cmnd)
+{
+ cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag);
+ cmnd->rw.lbatm = cpu_to_le16(0xffff);
+}
+
static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
struct request *req)
{
@@ -1017,18 +1023,17 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
control |= NVME_RW_PRINFO_PRACT;
}
- switch (ns->head->pi_type) {
- case NVME_NS_DPS_PI_TYPE3:
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
control |= NVME_RW_PRINFO_PRCHK_GUARD;
- break;
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- control |= NVME_RW_PRINFO_PRCHK_GUARD |
- NVME_RW_PRINFO_PRCHK_REF;
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) {
+ control |= NVME_RW_PRINFO_PRCHK_REF;
if (op == nvme_cmd_zone_append)
control |= NVME_RW_APPEND_PIREMAP;
nvme_set_ref_tag(ns, cmnd, req);
- break;
+ }
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) {
+ control |= NVME_RW_PRINFO_PRCHK_APP;
+ nvme_set_app_tag(req, cmnd);
}
}
@@ -2002,6 +2007,7 @@ static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns,
lim->atomic_write_hw_boundary = boundary;
lim->atomic_write_hw_unit_min = bs;
lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
+ lim->features |= BLK_FEAT_ATOMIC_WRITES;
}
static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
@@ -2128,9 +2134,10 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
struct queue_limits lim;
int ret;
- blk_mq_freeze_queue(ns->disk->queue);
lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim);
+
+ blk_mq_freeze_queue(ns->disk->queue);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
blk_mq_unfreeze_queue(ns->disk->queue);
@@ -2177,12 +2184,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
goto out;
}
+ lim = queue_limits_start_update(ns->disk->queue);
+
blk_mq_freeze_queue(ns->disk->queue);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
-
- lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim);
nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
nvme_set_chunk_sectors(ns, id, &lim);
@@ -2285,6 +2292,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
struct queue_limits *ns_lim = &ns->disk->queue->limits;
struct queue_limits lim;
+ lim = queue_limits_start_update(ns->head->disk->queue);
blk_mq_freeze_queue(ns->head->disk->queue);
/*
* queue_limits mixes values that are the hardware limitations
@@ -2301,7 +2309,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
* the splitting limits in to make sure we still obey possibly
* lower limitations of other controllers.
*/
- lim = queue_limits_start_update(ns->head->disk->queue);
lim.logical_block_size = ns_lim->logical_block_size;
lim.physical_block_size = ns_lim->physical_block_size;
lim.io_min = ns_lim->io_min;
@@ -3092,7 +3099,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
struct nvme_effects_log **log)
{
- struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
+ struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
int ret;
if (cel)
@@ -3109,7 +3116,11 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
return ret;
}
- xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+ old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ kfree(cel);
+ return xa_err(old);
+ }
out:
*log = cel;
return 0;
@@ -3171,6 +3182,25 @@ free_data:
return ret;
}
+static int nvme_init_effects_log(struct nvme_ctrl *ctrl,
+ u8 csi, struct nvme_effects_log **log)
+{
+ struct nvme_effects_log *effects, *old;
+
+ effects = kzalloc(sizeof(*effects), GFP_KERNEL);
+ if (!effects)
+ return -ENOMEM;
+
+ old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ kfree(effects);
+ return xa_err(old);
+ }
+
+ *log = effects;
+ return 0;
+}
+
static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
{
struct nvme_effects_log *log = ctrl->effects;
@@ -3217,10 +3247,9 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
}
if (!ctrl->effects) {
- ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
- if (!ctrl->effects)
- return -ENOMEM;
- xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
+ ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
+ if (ret < 0)
+ return ret;
}
nvme_init_known_nvm_effects(ctrl);
@@ -4564,7 +4593,6 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
/* Reserved for fabric connect and keep alive */
set->reserved_tags = 2;
set->numa_node = ctrl->numa_node;
- set->flags = BLK_MQ_F_NO_SCHED;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size;
@@ -4639,7 +4667,6 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
/* Reserved for fabric connect */
set->reserved_tags = 1;
set->numa_node = ctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b81af7919e94..094be164ffdc 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -16,7 +16,6 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
-#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index c4bb8dfe1a45..7be92d07430e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -1187,43 +1187,4 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}
-#ifdef CONFIG_NVME_VERBOSE_ERRORS
-const char *nvme_get_error_status_str(u16 status);
-const char *nvme_get_opcode_str(u8 opcode);
-const char *nvme_get_admin_opcode_str(u8 opcode);
-const char *nvme_get_fabrics_opcode_str(u8 opcode);
-#else /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const char *nvme_get_error_status_str(u16 status)
-{
- return "I/O Error";
-}
-static inline const char *nvme_get_opcode_str(u8 opcode)
-{
- return "I/O Cmd";
-}
-static inline const char *nvme_get_admin_opcode_str(u8 opcode)
-{
- return "Admin Cmd";
-}
-
-static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
-{
- return "Fabrics Cmd";
-}
-#endif /* CONFIG_NVME_VERBOSE_ERRORS */
-
-static inline const char *nvme_opcode_str(int qid, u8 opcode)
-{
- return qid ? nvme_get_opcode_str(opcode) :
- nvme_get_admin_opcode_str(opcode);
-}
-
-static inline const char *nvme_fabrics_opcode_str(
- int qid, const struct nvme_command *cmd)
-{
- if (nvme_is_fabrics(cmd))
- return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
-
- return nvme_opcode_str(qid, cmd->common.opcode);
-}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e2634f437f33..278bed4e35bb 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -8,7 +8,6 @@
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
@@ -373,7 +372,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
/*
* Ensure that the doorbell is updated before reading the event
* index from memory. The controller needs to provide similar
- * ordering to ensure the envent index is updated before reading
+ * ordering to ensure the event index is updated before reading
* the doorbell.
*/
mb();
@@ -463,7 +462,7 @@ static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL && offset)
- blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
+ blk_mq_map_hw_queues(map, dev->dev, offset);
else
blk_mq_map_queues(map);
qoff += map->nr_queues;
@@ -1148,13 +1147,13 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
}
}
-static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
- struct io_comp_batch *iob)
+static inline bool nvme_poll_cq(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob)
{
- int found = 0;
+ bool found = false;
while (nvme_cqe_pending(nvmeq)) {
- found++;
+ found = true;
/*
* load-load control dependency between phase and the rest of
* the cqe requires a full read memory barrier
@@ -2086,8 +2085,8 @@ static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size)
sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma,
GFP_KERNEL);
if (!dev->host_mem_descs) {
- dma_free_noncontiguous(dev->dev, dev->host_mem_size,
- dev->hmb_sgt, DMA_BIDIRECTIONAL);
+ dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt,
+ DMA_BIDIRECTIONAL);
dev->hmb_sgt = NULL;
return -ENOMEM;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index b127d41dbbfe..841238f38fdd 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -54,6 +54,8 @@ MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
#endif
+static atomic_t nvme_tcp_cpu_queues[NR_CPUS];
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
@@ -127,6 +129,7 @@ enum nvme_tcp_queue_flags {
NVME_TCP_Q_ALLOCATED = 0,
NVME_TCP_Q_LIVE = 1,
NVME_TCP_Q_POLLING = 2,
+ NVME_TCP_Q_IO_CPU_SET = 3,
};
enum nvme_tcp_recv_state {
@@ -1562,23 +1565,56 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
ctrl->io_queues[HCTX_TYPE_POLL];
}
+/**
+ * Track the number of queues assigned to each cpu using a global per-cpu
+ * counter and select the least used cpu from the mq_map. Our goal is to spread
+ * different controllers I/O threads across different cpu cores.
+ *
+ * Note that the accounting is not 100% perfect, but we don't need to be, we're
+ * simply putting our best effort to select the best candidate cpu core that we
+ * find at any given point.
+ */
static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
- int qid = nvme_tcp_queue_id(queue);
- int n = 0;
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int qid = nvme_tcp_queue_id(queue) - 1;
+ unsigned int *mq_map = NULL;
+ int cpu, min_queues = INT_MAX, io_cpu;
+
+ if (wq_unbound)
+ goto out;
if (nvme_tcp_default_queue(queue))
- n = qid - 1;
+ mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
else if (nvme_tcp_read_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+ mq_map = set->map[HCTX_TYPE_READ].mq_map;
else if (nvme_tcp_poll_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
- ctrl->io_queues[HCTX_TYPE_READ] - 1;
- if (wq_unbound)
- queue->io_cpu = WORK_CPU_UNBOUND;
- else
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ mq_map = set->map[HCTX_TYPE_POLL].mq_map;
+
+ if (WARN_ON(!mq_map))
+ goto out;
+
+ /* Search for the least used cpu from the mq_map */
+ io_cpu = WORK_CPU_UNBOUND;
+ for_each_online_cpu(cpu) {
+ int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
+
+ if (mq_map[cpu] != qid)
+ continue;
+ if (num_queues < min_queues) {
+ io_cpu = cpu;
+ min_queues = num_queues;
+ }
+ }
+ if (io_cpu != WORK_CPU_UNBOUND) {
+ queue->io_cpu = io_cpu;
+ atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
+ set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
+ }
+out:
+ dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
+ qid, queue->io_cpu);
}
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -1722,7 +1758,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
queue->sock->sk->sk_allocation = GFP_ATOMIC;
queue->sock->sk->sk_use_task_frag = false;
- nvme_tcp_set_queue_io_cpu(queue);
+ queue->io_cpu = WORK_CPU_UNBOUND;
queue->request = NULL;
queue->data_remaining = 0;
queue->ddgst_remaining = 0;
@@ -1844,6 +1880,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
return;
+ if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags))
+ atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]);
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
@@ -1878,9 +1917,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
nvme_tcp_init_recv_ctx(queue);
nvme_tcp_setup_sock_ops(queue);
- if (idx)
+ if (idx) {
+ nvme_tcp_set_queue_io_cpu(queue);
ret = nvmf_connect_io_queue(nctrl, idx);
- else
+ } else
ret = nvmf_connect_admin_queue(nctrl);
if (!ret) {
@@ -2845,6 +2885,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
static int __init nvme_tcp_init_module(void)
{
unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+ int cpu;
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
@@ -2862,6 +2903,9 @@ static int __init nvme_tcp_init_module(void)
if (!nvme_tcp_wq)
return -ENOMEM;
+ for_each_possible_cpu(cpu)
+ atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
+
nvmf_register_transport(&nvme_tcp_transport);
return 0;
}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 46be031f91b4..fb7446d6d682 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -115,3 +115,14 @@ config NVME_TARGET_AUTH
target side.
If unsure, say N.
+
+config NVME_TARGET_PCI_EPF
+ tristate "NVMe PCI Endpoint Function target support"
+ depends on NVME_TARGET && PCI_ENDPOINT
+ depends on NVME_CORE=y || NVME_CORE=NVME_TARGET
+ help
+ This enables the NVMe PCI Endpoint Function target driver support,
+ which allows creating a NVMe PCI controller using an endpoint mode
+ capable PCI controller.
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index f2b025bbe10c..ed8522911d1f 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
+obj-$(CONFIG_NVME_TARGET_PCI_EPF) += nvmet-pci-epf.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o pr.o
@@ -20,4 +21,5 @@ nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
nvme-fcloop-y += fcloop.o
nvmet-tcp-y += tcp.o
+nvmet-pci-epf-y += pci-epf.o
nvmet-$(CONFIG_TRACING) += trace.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index fa89b0549c36..e670dc185a96 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -12,6 +12,142 @@
#include <linux/unaligned.h>
#include "nvmet.h"
+static void nvmet_execute_delete_sq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!sqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_sqid(ctrl, sqid, false);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ status = ctrl->ops->delete_sq(ctrl, sqid);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_create_sq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_command *cmd = req->cmd;
+ u16 sqid = le16_to_cpu(cmd->create_sq.sqid);
+ u16 cqid = le16_to_cpu(cmd->create_sq.cqid);
+ u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags);
+ u16 qsize = le16_to_cpu(cmd->create_sq.qsize);
+ u64 prp1 = le64_to_cpu(cmd->create_sq.prp1);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!sqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_sqid(ctrl, sqid, true);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ /*
+ * Note: The NVMe specification allows multiple SQs to use the same CQ.
+ * However, the target code does not really support that. So for now,
+ * prevent this and fail the command if sqid and cqid are different.
+ */
+ if (!cqid || cqid != sqid) {
+ pr_err("SQ %u: Unsupported CQID %u\n", sqid, cqid);
+ status = NVME_SC_CQ_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
+ status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_delete_cq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!cqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_cqid(ctrl, cqid);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ status = ctrl->ops->delete_cq(ctrl, cqid);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_create_cq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_command *cmd = req->cmd;
+ u16 cqid = le16_to_cpu(cmd->create_cq.cqid);
+ u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags);
+ u16 qsize = le16_to_cpu(cmd->create_cq.qsize);
+ u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector);
+ u64 prp1 = le64_to_cpu(cmd->create_cq.prp1);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!cqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_cqid(ctrl, cqid);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
+ status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize,
+ prp1, irq_vector);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
{
u32 len = le16_to_cpu(cmd->get_log_page.numdu);
@@ -230,8 +366,18 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
+static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl,
+ struct nvme_effects_log *log)
{
+ /* For a PCI target controller, advertize support for the . */
+ if (nvmet_is_pci_ctrl(ctrl)) {
+ log->acs[nvme_admin_delete_sq] =
+ log->acs[nvme_admin_create_sq] =
+ log->acs[nvme_admin_delete_cq] =
+ log->acs[nvme_admin_create_cq] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+ }
+
log->acs[nvme_admin_get_log_page] =
log->acs[nvme_admin_identify] =
log->acs[nvme_admin_abort_cmd] =
@@ -240,7 +386,10 @@ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
log->acs[nvme_admin_async_event] =
log->acs[nvme_admin_keep_alive] =
cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+}
+static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
+{
log->iocs[nvme_cmd_read] =
log->iocs[nvme_cmd_flush] =
log->iocs[nvme_cmd_dsm] =
@@ -265,6 +414,7 @@ static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_effects_log *log;
u16 status = NVME_SC_SUCCESS;
@@ -276,6 +426,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
switch (req->cmd->get_log_page.csi) {
case NVME_CSI_NVM:
+ nvmet_get_cmd_effects_admin(ctrl, log);
nvmet_get_cmd_effects_nvm(log);
break;
case NVME_CSI_ZNS:
@@ -283,6 +434,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
status = NVME_SC_INVALID_IO_CMD_SET;
goto free;
}
+ nvmet_get_cmd_effects_admin(ctrl, log);
nvmet_get_cmd_effects_nvm(log);
nvmet_get_cmd_effects_zns(log);
break;
@@ -508,7 +660,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys *subsys = ctrl->subsys;
struct nvme_id_ctrl *id;
- u32 cmd_capsule_size;
+ u32 cmd_capsule_size, ctratt;
u16 status = 0;
if (!subsys->subsys_discovered) {
@@ -523,9 +675,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
goto out;
}
- /* XXX: figure out how to assign real vendors IDs. */
- id->vid = 0;
- id->ssvid = 0;
+ id->vid = cpu_to_le16(subsys->vendor_id);
+ id->ssvid = cpu_to_le16(subsys->subsys_vendor_id);
memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
@@ -557,8 +708,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* XXX: figure out what to do about RTD3R/RTD3 */
id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
- id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
- NVME_CTRL_ATTR_TBKAS);
+ ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS;
+ if (nvmet_is_pci_ctrl(ctrl))
+ ctratt |= NVME_CTRL_ATTR_RHII;
+ id->ctratt = cpu_to_le32(ctratt);
id->oacs = 0;
@@ -1105,6 +1258,92 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
return 0;
}
+static u16 nvmet_set_feat_host_id(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+ if (!nvmet_is_pci_ctrl(ctrl))
+ return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
+
+ /*
+ * The NVMe base specifications v2.1 recommends supporting 128-bits host
+ * IDs (section 5.1.25.1.28.1). However, that same section also says
+ * that "The controller may support a 64-bit Host Identifier and/or an
+ * extended 128-bit Host Identifier". So simplify this support and do
+ * not support 64-bits host IDs to avoid needing to check that all
+ * controllers associated with the same subsystem all use the same host
+ * ID size.
+ */
+ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid,
+ sizeof(req->sq->ctrl->hostid));
+}
+
+static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_irq_coalesce irqc = {
+ .time = (cdw11 >> 8) & 0xff,
+ .thr = cdw11 & 0xff,
+ };
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
+}
+
+static u16 nvmet_set_feat_irq_config(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_irq_config irqcfg = {
+ .iv = cdw11 & 0xffff,
+ .cd = (cdw11 >> 16) & 0x1,
+ };
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
+}
+
+static u16 nvmet_set_feat_arbitration(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_arbitration arb = {
+ .hpw = (cdw11 >> 24) & 0xff,
+ .mpw = (cdw11 >> 16) & 0xff,
+ .lpw = (cdw11 >> 8) & 0xff,
+ .ab = cdw11 & 0x3,
+ };
+
+ if (!ctrl->ops->set_feature) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
+}
+
void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
@@ -1118,6 +1357,9 @@ void nvmet_execute_set_features(struct nvmet_req *req)
return;
switch (cdw10 & 0xff) {
+ case NVME_FEAT_ARBITRATION:
+ status = nvmet_set_feat_arbitration(req);
+ break;
case NVME_FEAT_NUM_QUEUES:
ncqr = (cdw11 >> 16) & 0xffff;
nsqr = cdw11 & 0xffff;
@@ -1128,6 +1370,12 @@ void nvmet_execute_set_features(struct nvmet_req *req)
nvmet_set_result(req,
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
+ case NVME_FEAT_IRQ_COALESCE:
+ status = nvmet_set_feat_irq_coalesce(req);
+ break;
+ case NVME_FEAT_IRQ_CONFIG:
+ status = nvmet_set_feat_irq_config(req);
+ break;
case NVME_FEAT_KATO:
status = nvmet_set_feat_kato(req);
break;
@@ -1135,7 +1383,7 @@ void nvmet_execute_set_features(struct nvmet_req *req)
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
break;
case NVME_FEAT_HOST_ID:
- status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
+ status = nvmet_set_feat_host_id(req);
break;
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_set_feat_write_protect(req);
@@ -1172,6 +1420,79 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
return 0;
}
+static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_feat_irq_coalesce irqc = { };
+ u16 status;
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_get_feat_irq_config(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff;
+ struct nvmet_feat_irq_config irqcfg = { .iv = iv };
+ u16 status;
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_get_feat_arbitration(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_feat_arbitration arb = { };
+ u16 status;
+
+ if (!ctrl->ops->get_feature) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req,
+ ((u32)arb.hpw << 24) |
+ ((u32)arb.mpw << 16) |
+ ((u32)arb.lpw << 8) |
+ (arb.ab & 0x3));
+
+ return NVME_SC_SUCCESS;
+}
+
void nvmet_get_feat_kato(struct nvmet_req *req)
{
nvmet_set_result(req, req->sq->ctrl->kato * 1000);
@@ -1198,21 +1519,24 @@ void nvmet_execute_get_features(struct nvmet_req *req)
* need to come up with some fake values for these.
*/
#if 0
- case NVME_FEAT_ARBITRATION:
- break;
case NVME_FEAT_POWER_MGMT:
break;
case NVME_FEAT_TEMP_THRESH:
break;
case NVME_FEAT_ERR_RECOVERY:
break;
+ case NVME_FEAT_WRITE_ATOMIC:
+ break;
+#endif
+ case NVME_FEAT_ARBITRATION:
+ status = nvmet_get_feat_arbitration(req);
+ break;
case NVME_FEAT_IRQ_COALESCE:
+ status = nvmet_get_feat_irq_coalesce(req);
break;
case NVME_FEAT_IRQ_CONFIG:
+ status = nvmet_get_feat_irq_config(req);
break;
- case NVME_FEAT_WRITE_ATOMIC:
- break;
-#endif
case NVME_FEAT_ASYNC_EVENT:
nvmet_get_feat_async_event(req);
break;
@@ -1293,6 +1617,27 @@ out:
nvmet_req_complete(req, status);
}
+u32 nvmet_admin_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (nvme_is_fabrics(cmd))
+ return nvmet_fabrics_admin_cmd_data_len(req);
+ if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
+ return nvmet_discovery_cmd_data_len(req);
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ return nvmet_get_log_page_len(cmd);
+ case nvme_admin_identify:
+ return NVME_IDENTIFY_DATA_SIZE;
+ case nvme_admin_get_features:
+ return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10));
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -1307,13 +1652,30 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
+ /* For PCI controllers, admin commands shall not use SGL. */
+ if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
+ cmd->common.flags & NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+
if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_admin_cmd(req);
switch (cmd->common.opcode) {
+ case nvme_admin_delete_sq:
+ req->execute = nvmet_execute_delete_sq;
+ return 0;
+ case nvme_admin_create_sq:
+ req->execute = nvmet_execute_create_sq;
+ return 0;
case nvme_admin_get_log_page:
req->execute = nvmet_execute_get_log_page;
return 0;
+ case nvme_admin_delete_cq:
+ req->execute = nvmet_execute_delete_cq;
+ return 0;
+ case nvme_admin_create_cq:
+ req->execute = nvmet_execute_create_cq;
+ return 0;
case nvme_admin_identify:
req->execute = nvmet_execute_identify;
return 0;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 2b030f0efc38..e44ef69dffc2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -37,6 +37,7 @@ static struct nvmet_type_name_map nvmet_transport[] = {
{ NVMF_TRTYPE_RDMA, "rdma" },
{ NVMF_TRTYPE_FC, "fc" },
{ NVMF_TRTYPE_TCP, "tcp" },
+ { NVMF_TRTYPE_PCI, "pci" },
{ NVMF_TRTYPE_LOOP, "loop" },
};
@@ -46,6 +47,7 @@ static const struct nvmet_type_name_map nvmet_addr_family[] = {
{ NVMF_ADDR_FAMILY_IP6, "ipv6" },
{ NVMF_ADDR_FAMILY_IB, "ib" },
{ NVMF_ADDR_FAMILY_FC, "fc" },
+ { NVMF_ADDR_FAMILY_PCI, "pci" },
{ NVMF_ADDR_FAMILY_LOOP, "loop" },
};
@@ -1400,6 +1402,49 @@ out_unlock:
}
CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
+static ssize_t nvmet_subsys_attr_vendor_id_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "0x%x\n", to_subsys(item)->vendor_id);
+}
+
+static ssize_t nvmet_subsys_attr_vendor_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ u16 vid;
+
+ if (kstrtou16(page, 0, &vid))
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->vendor_id = vid;
+ up_write(&nvmet_config_sem);
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_vendor_id);
+
+static ssize_t nvmet_subsys_attr_subsys_vendor_id_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "0x%x\n",
+ to_subsys(item)->subsys_vendor_id);
+}
+
+static ssize_t nvmet_subsys_attr_subsys_vendor_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ u16 ssvid;
+
+ if (kstrtou16(page, 0, &ssvid))
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->subsys_vendor_id = ssvid;
+ up_write(&nvmet_config_sem);
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_subsys_vendor_id);
+
static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
char *page)
{
@@ -1628,6 +1673,8 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_serial,
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
+ &nvmet_subsys_attr_attr_vendor_id,
+ &nvmet_subsys_attr_attr_subsys_vendor_id,
&nvmet_subsys_attr_attr_model,
&nvmet_subsys_attr_attr_qid_max,
&nvmet_subsys_attr_attr_ieee_oui,
@@ -1782,6 +1829,7 @@ static struct config_group *nvmet_referral_make(
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&port->entry);
+ port->disc_addr.trtype = NVMF_TRTYPE_MAX;
config_group_init_type_name(&port->group, name, &nvmet_referral_type);
return &port->group;
@@ -2007,6 +2055,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->max_queue_size = -1; /* < 0 == let the transport choose */
+ port->disc_addr.trtype = NVMF_TRTYPE_MAX;
port->disc_addr.portid = cpu_to_le16(portid);
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index fde6c555af61..cdc4a09a6e8a 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -836,6 +836,89 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
complete(&sq->confirm_done);
}
+u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid)
+{
+ if (!ctrl->sqs)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+ if (cqid > ctrl->subsys->max_qid)
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ /*
+ * Note: For PCI controllers, the NVMe specifications allows multiple
+ * SQs to share a single CQ. However, we do not support this yet, so
+ * check that there is no SQ defined for a CQ. If one exist, then the
+ * CQ ID is invalid for creation as well as when the CQ is being
+ * deleted (as that would mean that the SQ was not deleted before the
+ * CQ).
+ */
+ if (ctrl->sqs[cqid])
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
+ u16 qid, u16 size)
+{
+ u16 status;
+
+ status = nvmet_check_cqid(ctrl, qid);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_cq_setup(ctrl, cq, qid, size);
+
+ return NVME_SC_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_create);
+
+u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid,
+ bool create)
+{
+ if (!ctrl->sqs)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+ if (sqid > ctrl->subsys->max_qid)
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ if ((create && ctrl->sqs[sqid]) ||
+ (!create && !ctrl->sqs[sqid]))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
+ u16 sqid, u16 size)
+{
+ u16 status;
+ int ret;
+
+ if (!kref_get_unless_zero(&ctrl->ref))
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+ status = nvmet_check_sqid(ctrl, sqid, true);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ ret = nvmet_sq_init(sq);
+ if (ret) {
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto ctrl_put;
+ }
+
+ nvmet_sq_setup(ctrl, sq, sqid, size);
+ sq->ctrl = ctrl;
+
+ return NVME_SC_SUCCESS;
+
+ctrl_put:
+ nvmet_ctrl_put(ctrl);
+ return status;
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_create);
+
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
struct nvmet_ctrl *ctrl = sq->ctrl;
@@ -929,6 +1012,33 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
return 0;
}
+static u32 nvmet_io_cmd_transfer_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+ u32 metadata_len = 0;
+
+ if (nvme_is_fabrics(cmd))
+ return nvmet_fabrics_io_cmd_data_len(req);
+
+ if (!req->ns)
+ return 0;
+
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_zone_append:
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
+ metadata_len = nvmet_rw_metadata_len(req);
+ return nvmet_rw_data_len(req) + metadata_len;
+ case nvme_cmd_dsm:
+ return nvmet_dsm_len(req);
+ case nvme_cmd_zone_mgmt_recv:
+ return (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+ default:
+ return 0;
+ }
+}
+
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -1030,12 +1140,15 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
/*
* For fabrics, PSDT field shall describe metadata pointer (MPTR) that
* contains an address of a single contiguous physical buffer that is
- * byte aligned.
+ * byte aligned. For PCI controllers, this is optional so not enforced.
*/
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
- req->error_loc = offsetof(struct nvme_common_command, flags);
- status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
- goto fail;
+ if (!req->sq->ctrl || !nvmet_is_pci_ctrl(req->sq->ctrl)) {
+ req->error_loc =
+ offsetof(struct nvme_common_command, flags);
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto fail;
+ }
}
if (unlikely(!req->sq->ctrl))
@@ -1077,11 +1190,27 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
+size_t nvmet_req_transfer_len(struct nvmet_req *req)
+{
+ if (likely(req->sq->qid != 0))
+ return nvmet_io_cmd_transfer_len(req);
+ if (unlikely(!req->sq->ctrl))
+ return nvmet_connect_cmd_data_len(req);
+ return nvmet_admin_cmd_data_len(req);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_transfer_len);
+
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{
if (unlikely(len != req->transfer_len)) {
+ u16 status;
+
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
+ if (req->cmd->common.flags & NVME_CMD_SGL_ALL)
+ status = NVME_SC_SGL_INVALID_DATA;
+ else
+ status = NVME_SC_INVALID_FIELD;
+ nvmet_req_complete(req, status | NVME_STATUS_DNR);
return false;
}
@@ -1092,8 +1221,14 @@ EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{
if (unlikely(data_len > req->transfer_len)) {
+ u16 status;
+
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
+ if (req->cmd->common.flags & NVME_CMD_SGL_ALL)
+ status = NVME_SC_SGL_INVALID_DATA;
+ else
+ status = NVME_SC_INVALID_FIELD;
+ nvmet_req_complete(req, status | NVME_STATUS_DNR);
return false;
}
@@ -1184,41 +1319,6 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
-static inline bool nvmet_cc_en(u32 cc)
-{
- return (cc >> NVME_CC_EN_SHIFT) & 0x1;
-}
-
-static inline u8 nvmet_cc_css(u32 cc)
-{
- return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
-}
-
-static inline u8 nvmet_cc_mps(u32 cc)
-{
- return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
-}
-
-static inline u8 nvmet_cc_ams(u32 cc)
-{
- return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
-}
-
-static inline u8 nvmet_cc_shn(u32 cc)
-{
- return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
-}
-
-static inline u8 nvmet_cc_iosqes(u32 cc)
-{
- return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
-}
-
-static inline u8 nvmet_cc_iocqes(u32 cc)
-{
- return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
-}
-
static inline bool nvmet_css_supported(u8 cc_css)
{
switch (cc_css << NVME_CC_CSS_SHIFT) {
@@ -1295,6 +1395,7 @@ void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
mutex_unlock(&ctrl->lock);
}
+EXPORT_SYMBOL_GPL(nvmet_update_cc);
static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
{
@@ -1402,15 +1503,15 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
* Note: ctrl->subsys->lock should be held when calling this function
*/
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
- struct nvmet_req *req)
+ struct device *p2p_client)
{
struct nvmet_ns *ns;
unsigned long idx;
- if (!req->p2p_client)
+ if (!p2p_client)
return;
- ctrl->p2p_client = get_device(req->p2p_client);
+ ctrl->p2p_client = get_device(p2p_client);
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -1439,45 +1540,44 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
ctrl->ops->delete_ctrl(ctrl);
}
-u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
- uuid_t *hostid)
+struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
{
struct nvmet_subsys *subsys;
struct nvmet_ctrl *ctrl;
+ u32 kato = args->kato;
+ u8 dhchap_status;
int ret;
- u16 status;
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
- subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+ args->status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
+ subsys = nvmet_find_get_subsys(args->port, args->subsysnqn);
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
- subsysnqn);
- req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
- req->error_loc = offsetof(struct nvme_common_command, dptr);
- goto out;
+ args->subsysnqn);
+ args->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ args->error_loc = offsetof(struct nvme_common_command, dptr);
+ return NULL;
}
down_read(&nvmet_config_sem);
- if (!nvmet_host_allowed(subsys, hostnqn)) {
+ if (!nvmet_host_allowed(subsys, args->hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
- hostnqn, subsysnqn);
- req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
+ args->hostnqn, args->subsysnqn);
+ args->result = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
- status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
- req->error_loc = offsetof(struct nvme_common_command, dptr);
+ args->status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
+ args->error_loc = offsetof(struct nvme_common_command, dptr);
goto out_put_subsystem;
}
up_read(&nvmet_config_sem);
- status = NVME_SC_INTERNAL;
+ args->status = NVME_SC_INTERNAL;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
goto out_put_subsystem;
mutex_init(&ctrl->lock);
- ctrl->port = req->port;
- ctrl->ops = req->ops;
+ ctrl->port = args->port;
+ ctrl->ops = args->ops;
#ifdef CONFIG_NVME_TARGET_PASSTHRU
/* By default, set loop targets to clear IDS by default */
@@ -1491,8 +1591,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
- memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
- memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+ memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
+ memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
@@ -1515,12 +1615,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
- status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
+ args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
goto out_free_sqs;
}
ctrl->cntlid = ret;
- uuid_copy(&ctrl->hostid, hostid);
+ uuid_copy(&ctrl->hostid, args->hostid);
/*
* Discovery controllers may use some arbitrary high value
@@ -1542,12 +1642,35 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (ret)
goto init_pr_fail;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
- nvmet_setup_p2p_ns_map(ctrl, req);
+ nvmet_setup_p2p_ns_map(ctrl, args->p2p_client);
nvmet_debugfs_ctrl_setup(ctrl);
mutex_unlock(&subsys->lock);
- *ctrlp = ctrl;
- return 0;
+ if (args->hostid)
+ uuid_copy(&ctrl->hostid, args->hostid);
+
+ dhchap_status = nvmet_setup_auth(ctrl);
+ if (dhchap_status) {
+ pr_err("Failed to setup authentication, dhchap status %u\n",
+ dhchap_status);
+ nvmet_ctrl_put(ctrl);
+ if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
+ args->status =
+ NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
+ else
+ args->status = NVME_SC_INTERNAL;
+ return NULL;
+ }
+
+ args->status = NVME_SC_SUCCESS;
+
+ pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s.\n",
+ nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
+ ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
+ ctrl->pi_support ? " T10-PI is enabled" : "",
+ nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
+
+ return ctrl;
init_pr_fail:
mutex_unlock(&subsys->lock);
@@ -1561,9 +1684,9 @@ out_free_ctrl:
kfree(ctrl);
out_put_subsystem:
nvmet_subsys_put(subsys);
-out:
- return status;
+ return NULL;
}
+EXPORT_SYMBOL_GPL(nvmet_alloc_ctrl);
static void nvmet_ctrl_free(struct kref *ref)
{
@@ -1599,6 +1722,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
{
kref_put(&ctrl->ref, nvmet_ctrl_free);
}
+EXPORT_SYMBOL_GPL(nvmet_ctrl_put);
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 28843df5fa7c..df7207640506 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -224,6 +224,9 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
}
list_for_each_entry(r, &req->port->referrals, entry) {
+ if (r->disc_addr.trtype == NVMF_TRTYPE_PCI)
+ continue;
+
nvmet_format_discovery_entry(hdr, r,
NVME_DISC_SUBSYS_NAME,
r->disc_addr.traddr,
@@ -352,6 +355,20 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
nvmet_req_complete(req, stat);
}
+u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ return nvmet_get_log_page_len(req->cmd);
+ case nvme_admin_identify:
+ return NVME_IDENTIFY_DATA_SIZE;
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 3f2857c17d95..2022757f08dc 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -179,6 +179,11 @@ static u8 nvmet_auth_failure2(void *d)
return data->rescode_exp;
}
+u32 nvmet_auth_send_data_len(struct nvmet_req *req)
+{
+ return le32_to_cpu(req->cmd->auth_send.tl);
+}
+
void nvmet_execute_auth_send(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -206,7 +211,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
offsetof(struct nvmf_auth_send_command, spsp1);
goto done;
}
- tl = le32_to_cpu(req->cmd->auth_send.tl);
+ tl = nvmet_auth_send_data_len(req);
if (!tl) {
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
@@ -429,6 +434,11 @@ static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
data->rescode_exp = req->sq->dhchap_status;
}
+u32 nvmet_auth_receive_data_len(struct nvmet_req *req)
+{
+ return le32_to_cpu(req->cmd->auth_receive.al);
+}
+
void nvmet_execute_auth_receive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -454,7 +464,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
offsetof(struct nvmf_auth_receive_command, spsp1);
goto done;
}
- al = le32_to_cpu(req->cmd->auth_receive.al);
+ al = nvmet_auth_receive_data_len(req);
if (!al) {
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index c49904ebb6c2..a7ff05b3be29 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -85,6 +85,22 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
+u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ return nvmet_auth_send_data_len(req);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_auth_receive_data_len(req);
+#endif
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -114,6 +130,22 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
return 0;
}
+u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ return nvmet_auth_send_data_len(req);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_auth_receive_data_len(req);
+#endif
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -213,73 +245,67 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
- u16 status;
- u8 dhchap_status;
+ struct nvmet_alloc_ctrl_args args = {
+ .port = req->port,
+ .ops = req->ops,
+ .p2p_client = req->p2p_client,
+ .kato = le32_to_cpu(c->kato),
+ };
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
- status = NVME_SC_INTERNAL;
+ args.status = NVME_SC_INTERNAL;
goto complete;
}
- status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
- if (status)
+ args.status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (args.status)
goto out;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
- req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
- status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
+ args.error_loc = offsetof(struct nvmf_connect_command, recfmt);
+ args.status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out;
}
if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
- req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+ args.status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
+ args.result = IPO_IATTR_CONNECT_DATA(cntlid);
goto out;
}
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
- status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
- le32_to_cpu(c->kato), &ctrl, &d->hostid);
- if (status)
- goto out;
- dhchap_status = nvmet_setup_auth(ctrl);
- if (dhchap_status) {
- pr_err("Failed to setup authentication, dhchap status %u\n",
- dhchap_status);
- nvmet_ctrl_put(ctrl);
- if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
- status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR);
- else
- status = NVME_SC_INTERNAL;
+ args.subsysnqn = d->subsysnqn;
+ args.hostnqn = d->hostnqn;
+ args.hostid = &d->hostid;
+ args.kato = c->kato;
+
+ ctrl = nvmet_alloc_ctrl(&args);
+ if (!ctrl)
goto out;
- }
- status = nvmet_install_queue(ctrl, req);
- if (status) {
+ args.status = nvmet_install_queue(ctrl, req);
+ if (args.status) {
nvmet_ctrl_put(ctrl);
goto out;
}
- pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
- nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
- ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
- ctrl->pi_support ? " T10-PI is enabled" : "",
- nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
- req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
+ args.result = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
- nvmet_req_complete(req, status);
+ req->error_loc = args.error_loc;
+ req->cqe->result.u32 = args.result;
+ nvmet_req_complete(req, args.status);
}
static void nvmet_execute_io_connect(struct nvmet_req *req)
@@ -343,6 +369,17 @@ out_ctrl_put:
goto out;
}
+u32 nvmet_connect_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (!nvme_is_fabrics(cmd) ||
+ cmd->fabrics.fctype != nvme_fabrics_type_connect)
+ return 0;
+
+ return sizeof(struct nvmf_connect_data);
+}
+
u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index eaf31c823cbe..c1f574fe3280 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -272,6 +272,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
iter_flags = SG_MITER_FROM_SG;
}
+ if (req->cmd->rw.control & NVME_RW_LR)
+ opf |= REQ_FAILFAST_DEV;
+
if (is_pci_p2pdma_page(sg_page(req->sg)))
opf |= REQ_NOMERGE;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7233549f7c8a..b540216c0c9a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -245,6 +245,8 @@ struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
+ void *drvdata;
+
bool reset_tbkas;
struct mutex lock;
@@ -331,6 +333,8 @@ struct nvmet_subsys {
struct config_group namespaces_group;
struct config_group allowed_hosts_group;
+ u16 vendor_id;
+ u16 subsys_vendor_id;
char *model_number;
u32 ieee_oui;
char *firmware_rev;
@@ -411,6 +415,18 @@ struct nvmet_fabrics_ops {
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
+
+ /* Operations mandatory for PCI target controllers */
+ u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 flags,
+ u16 qsize, u64 prp1);
+ u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid);
+ u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags,
+ u16 qsize, u64 prp1, u16 irq_vector);
+ u16 (*delete_cq)(struct nvmet_ctrl *ctrl, u16 cqid);
+ u16 (*set_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
+ void *feat_data);
+ u16 (*get_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
+ void *feat_data);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@@ -520,18 +536,24 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
+u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
+u32 nvmet_admin_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
+u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
+u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
+u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
+size_t nvmet_req_transfer_len(struct nvmet_req *req);
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
@@ -542,19 +564,37 @@ void nvmet_execute_set_features(struct nvmet_req *req);
void nvmet_execute_get_features(struct nvmet_req *req);
void nvmet_execute_keep_alive(struct nvmet_req *req);
+u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
u16 size);
+u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
+ u16 size);
+u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create);
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
u16 size);
+u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
+ u16 size);
void nvmet_sq_destroy(struct nvmet_sq *sq);
int nvmet_sq_init(struct nvmet_sq *sq);
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
-u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
- uuid_t *hostid);
+
+struct nvmet_alloc_ctrl_args {
+ struct nvmet_port *port;
+ char *subsysnqn;
+ char *hostnqn;
+ uuid_t *hostid;
+ const struct nvmet_fabrics_ops *ops;
+ struct device *p2p_client;
+ u32 kato;
+ u32 result;
+ u16 error_loc;
+ u16 status;
+};
+
+struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args);
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
const char *hostnqn, u16 cntlid,
struct nvmet_req *req);
@@ -696,6 +736,11 @@ static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
return subsys->type != NVME_NQN_NVME;
}
+static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl)
+{
+ return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
@@ -737,6 +782,41 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
+static inline bool nvmet_cc_en(u32 cc)
+{
+ return (cc >> NVME_CC_EN_SHIFT) & 0x1;
+}
+
+static inline u8 nvmet_cc_css(u32 cc)
+{
+ return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
+}
+
+static inline u8 nvmet_cc_mps(u32 cc)
+{
+ return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
+}
+
+static inline u8 nvmet_cc_ams(u32 cc)
+{
+ return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
+}
+
+static inline u8 nvmet_cc_shn(u32 cc)
+{
+ return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
+}
+
+static inline u8 nvmet_cc_iosqes(u32 cc)
+{
+ return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
+}
+
+static inline u8 nvmet_cc_iocqes(u32 cc)
+{
+ return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
+}
+
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
{
@@ -773,7 +853,9 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
}
#ifdef CONFIG_NVME_TARGET_AUTH
+u32 nvmet_auth_send_data_len(struct nvmet_req *req);
void nvmet_execute_auth_send(struct nvmet_req *req);
+u32 nvmet_auth_receive_data_len(struct nvmet_req *req);
void nvmet_execute_auth_receive(struct nvmet_req *req);
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
@@ -831,4 +913,26 @@ static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
{
percpu_ref_put(&pc_ref->ref);
}
+
+/*
+ * Data for the get_feature() and set_feature() operations of PCI target
+ * controllers.
+ */
+struct nvmet_feat_irq_coalesce {
+ u8 thr;
+ u8 time;
+};
+
+struct nvmet_feat_irq_config {
+ u16 iv;
+ bool cd;
+};
+
+struct nvmet_feat_arbitration {
+ u8 hpw;
+ u8 mpw;
+ u8 lpw;
+ u8 ab;
+};
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 30b21936b0c6..26e2907ce8bb 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -261,6 +261,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
{
struct scatterlist *sg;
struct bio *bio;
+ int ret = -EINVAL;
int i;
if (req->sg_cnt > BIO_MAX_VECS)
@@ -277,16 +278,19 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
}
for_each_sg(req->sg, sg, req->sg_cnt, i) {
- if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
- sg->offset) < sg->length) {
- nvmet_req_bio_put(req, bio);
- return -EINVAL;
- }
+ if (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) <
+ sg->length)
+ goto out_bio_put;
}
- blk_rq_bio_prep(rq, bio, req->sg_cnt);
-
+ ret = blk_rq_append_bio(rq, bio);
+ if (ret)
+ goto out_bio_put;
return 0;
+
+out_bio_put:
+ nvmet_req_bio_put(req, bio);
+ return ret;
}
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
new file mode 100644
index 000000000000..ac30b42cc622
--- /dev/null
+++ b/drivers/nvme/target/pci-epf.c
@@ -0,0 +1,2591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe PCI Endpoint Function target driver.
+ *
+ * Copyright (c) 2024, Western Digital Corporation or its affiliates.
+ * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com>
+ * REDS Institute, HEIG-VD, HES-SO, Switzerland
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvme.h>
+#include <linux/pci_ids.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci_regs.h>
+#include <linux/slab.h>
+
+#include "nvmet.h"
+
+static LIST_HEAD(nvmet_pci_epf_ports);
+static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
+
+/*
+ * Default and maximum allowed data transfer size. For the default,
+ * allow up to 128 page-sized segments. For the maximum allowed,
+ * use 4 times the default (which is completely arbitrary).
+ */
+#define NVMET_PCI_EPF_MAX_SEGS 128
+#define NVMET_PCI_EPF_MDTS_KB \
+ (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10))
+#define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4)
+
+/*
+ * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an
+ * interrupt vector to the host. This default 8 is completely arbitrary and can
+ * be changed by the host with a nvme_set_features command.
+ */
+#define NVMET_PCI_EPF_IV_THRESHOLD 8
+
+/*
+ * BAR CC register and SQ polling intervals.
+ */
+#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(5)
+#define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5)
+#define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000)
+
+/*
+ * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ.
+ */
+#define NVMET_PCI_EPF_SQ_AB 8
+
+/*
+ * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ
+ * is full, in which case we retry the CQ processing after this interval.
+ */
+#define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1)
+
+enum nvmet_pci_epf_queue_flags {
+ NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */
+ NVMET_PCI_EPF_Q_LIVE, /* The queue is live */
+ NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */
+};
+
+/*
+ * IRQ vector descriptor.
+ */
+struct nvmet_pci_epf_irq_vector {
+ unsigned int vector;
+ unsigned int ref;
+ bool cd;
+ int nr_irqs;
+};
+
+struct nvmet_pci_epf_queue {
+ union {
+ struct nvmet_sq nvme_sq;
+ struct nvmet_cq nvme_cq;
+ };
+ struct nvmet_pci_epf_ctrl *ctrl;
+ unsigned long flags;
+
+ u64 pci_addr;
+ size_t pci_size;
+ struct pci_epc_map pci_map;
+
+ u16 qid;
+ u16 depth;
+ u16 vector;
+ u16 head;
+ u16 tail;
+ u16 phase;
+ u32 db;
+
+ size_t qes;
+
+ struct nvmet_pci_epf_irq_vector *iv;
+ struct workqueue_struct *iod_wq;
+ struct delayed_work work;
+ spinlock_t lock;
+ struct list_head list;
+};
+
+/*
+ * PCI Root Complex (RC) address data segment for mapping an admin or
+ * I/O command buffer @buf of @length bytes to the PCI address @pci_addr.
+ */
+struct nvmet_pci_epf_segment {
+ void *buf;
+ u64 pci_addr;
+ u32 length;
+};
+
+/*
+ * Command descriptors.
+ */
+struct nvmet_pci_epf_iod {
+ struct list_head link;
+
+ struct nvmet_req req;
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ unsigned int status;
+
+ struct nvmet_pci_epf_ctrl *ctrl;
+
+ struct nvmet_pci_epf_queue *sq;
+ struct nvmet_pci_epf_queue *cq;
+
+ /* Data transfer size and direction for the command. */
+ size_t data_len;
+ enum dma_data_direction dma_dir;
+
+ /*
+ * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we
+ * use only @data_seg. Otherwise, the array of segments @data_segs is
+ * allocated to manage multiple PCI address data segments. @data_sgl and
+ * @data_sgt are used to setup the command request for execution by the
+ * target core.
+ */
+ unsigned int nr_data_segs;
+ struct nvmet_pci_epf_segment data_seg;
+ struct nvmet_pci_epf_segment *data_segs;
+ struct scatterlist data_sgl;
+ struct sg_table data_sgt;
+
+ struct work_struct work;
+ struct completion done;
+};
+
+/*
+ * PCI target controller private data.
+ */
+struct nvmet_pci_epf_ctrl {
+ struct nvmet_pci_epf *nvme_epf;
+ struct nvmet_port *port;
+ struct nvmet_ctrl *tctrl;
+ struct device *dev;
+
+ unsigned int nr_queues;
+ struct nvmet_pci_epf_queue *sq;
+ struct nvmet_pci_epf_queue *cq;
+ unsigned int sq_ab;
+
+ mempool_t iod_pool;
+ void *bar;
+ u64 cap;
+ u32 cc;
+ u32 csts;
+
+ size_t io_sqes;
+ size_t io_cqes;
+
+ size_t mps_shift;
+ size_t mps;
+ size_t mps_mask;
+
+ unsigned int mdts;
+
+ struct delayed_work poll_cc;
+ struct delayed_work poll_sqs;
+
+ struct mutex irq_lock;
+ struct nvmet_pci_epf_irq_vector *irq_vectors;
+ unsigned int irq_vector_threshold;
+
+ bool link_up;
+ bool enabled;
+};
+
+/*
+ * PCI EPF driver private data.
+ */
+struct nvmet_pci_epf {
+ struct pci_epf *epf;
+
+ const struct pci_epc_features *epc_features;
+
+ void *reg_bar;
+ size_t msix_table_offset;
+
+ unsigned int irq_type;
+ unsigned int nr_vectors;
+
+ struct nvmet_pci_epf_ctrl ctrl;
+
+ bool dma_enabled;
+ struct dma_chan *dma_tx_chan;
+ struct mutex dma_tx_lock;
+ struct dma_chan *dma_rx_chan;
+ struct mutex dma_rx_lock;
+
+ struct mutex mmio_lock;
+
+ /* PCI endpoint function configfs attributes. */
+ struct config_group group;
+ __le16 portid;
+ char subsysnqn[NVMF_NQN_SIZE];
+ unsigned int mdts_kb;
+};
+
+static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off)
+{
+ __le32 *bar_reg = ctrl->bar + off;
+
+ return le32_to_cpu(READ_ONCE(*bar_reg));
+}
+
+static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off, u32 val)
+{
+ __le32 *bar_reg = ctrl->bar + off;
+
+ WRITE_ONCE(*bar_reg, cpu_to_le32(val));
+}
+
+static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off)
+{
+ return (u64)nvmet_pci_epf_bar_read32(ctrl, off) |
+ ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32);
+}
+
+static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off, u64 val)
+{
+ nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF);
+ nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF);
+}
+
+static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf,
+ u64 pci_addr, size_t size, struct pci_epc_map *map)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
+ pci_addr, size, map);
+}
+
+static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf,
+ struct pci_epc_map *map)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map);
+}
+
+struct nvmet_pci_epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg)
+{
+ struct nvmet_pci_epf_dma_filter *filter = arg;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev &&
+ (filter->dma_mask & caps.directions);
+}
+
+static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+ struct device *dev = &epf->dev;
+ struct nvmet_pci_epf_dma_filter filter;
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+
+ mutex_init(&nvme_epf->dma_rx_lock);
+ mutex_init(&nvme_epf->dma_tx_lock);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ filter.dev = epf->epc->dev.parent;
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+
+ chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
+ if (!chan)
+ goto out_dma_no_rx;
+
+ nvme_epf->dma_rx_chan = chan;
+
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
+ if (!chan)
+ goto out_dma_no_tx;
+
+ nvme_epf->dma_tx_chan = chan;
+
+ nvme_epf->dma_enabled = true;
+
+ dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
+ dma_chan_name(chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+
+ dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
+ dma_chan_name(chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+
+ return;
+
+out_dma_no_tx:
+ dma_release_channel(nvme_epf->dma_rx_chan);
+ nvme_epf->dma_rx_chan = NULL;
+
+out_dma_no_rx:
+ mutex_destroy(&nvme_epf->dma_rx_lock);
+ mutex_destroy(&nvme_epf->dma_tx_lock);
+ nvme_epf->dma_enabled = false;
+
+ dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n");
+}
+
+static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf)
+{
+ if (!nvme_epf->dma_enabled)
+ return;
+
+ dma_release_channel(nvme_epf->dma_tx_chan);
+ nvme_epf->dma_tx_chan = NULL;
+ dma_release_channel(nvme_epf->dma_rx_chan);
+ nvme_epf->dma_rx_chan = NULL;
+ mutex_destroy(&nvme_epf->dma_rx_lock);
+ mutex_destroy(&nvme_epf->dma_tx_lock);
+ nvme_epf->dma_enabled = false;
+}
+
+static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf,
+ struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config sconf = {};
+ struct device *dev = &epf->dev;
+ struct device *dma_dev;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+ dma_addr_t dma_addr;
+ struct mutex *lock;
+ int ret;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ lock = &nvme_epf->dma_rx_lock;
+ chan = nvme_epf->dma_rx_chan;
+ sconf.direction = DMA_DEV_TO_MEM;
+ sconf.src_addr = seg->pci_addr;
+ break;
+ case DMA_TO_DEVICE:
+ lock = &nvme_epf->dma_tx_lock;
+ chan = nvme_epf->dma_tx_chan;
+ sconf.direction = DMA_MEM_TO_DEV;
+ sconf.dst_addr = seg->pci_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(lock);
+
+ dma_dev = dmaengine_get_dma_device(chan);
+ dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir);
+ ret = dma_mapping_error(dma_dev, dma_addr);
+ if (ret)
+ goto unlock;
+
+ ret = dmaengine_slave_config(chan, &sconf);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto unmap;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length,
+ sconf.direction, DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto unmap;
+ }
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret);
+ goto unmap;
+ }
+
+ if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) {
+ dev_err(dev, "DMA transfer failed\n");
+ ret = -EIO;
+ }
+
+ dmaengine_terminate_sync(chan);
+
+unmap:
+ dma_unmap_single(dma_dev, dma_addr, seg->length, dir);
+
+unlock:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
+static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf,
+ struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
+{
+ u64 pci_addr = seg->pci_addr;
+ u32 length = seg->length;
+ void *buf = seg->buf;
+ struct pci_epc_map map;
+ int ret = -EINVAL;
+
+ /*
+ * Note: MMIO transfers do not need serialization but this is a
+ * simple way to avoid using too many mapping windows.
+ */
+ mutex_lock(&nvme_epf->mmio_lock);
+
+ while (length) {
+ ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map);
+ if (ret)
+ break;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ memcpy_fromio(buf, map.virt_addr, map.pci_size);
+ break;
+ case DMA_TO_DEVICE:
+ memcpy_toio(map.virt_addr, buf, map.pci_size);
+ break;
+ default:
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ pci_addr += map.pci_size;
+ buf += map.pci_size;
+ length -= map.pci_size;
+
+ nvmet_pci_epf_mem_unmap(nvme_epf, &map);
+ }
+
+unlock:
+ mutex_unlock(&nvme_epf->mmio_lock);
+
+ return ret;
+}
+
+static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf,
+ struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
+{
+ if (nvme_epf->dma_enabled)
+ return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir);
+
+ return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir);
+}
+
+static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl,
+ void *buf, u64 pci_addr, u32 length,
+ enum dma_data_direction dir)
+{
+ struct nvmet_pci_epf_segment seg = {
+ .buf = buf,
+ .pci_addr = pci_addr,
+ .length = length,
+ };
+
+ return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir);
+}
+
+static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ ctrl->irq_vectors = kcalloc(ctrl->nr_queues,
+ sizeof(struct nvmet_pci_epf_irq_vector),
+ GFP_KERNEL);
+ if (!ctrl->irq_vectors)
+ return -ENOMEM;
+
+ mutex_init(&ctrl->irq_lock);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ if (ctrl->irq_vectors) {
+ mutex_destroy(&ctrl->irq_lock);
+ kfree(ctrl->irq_vectors);
+ ctrl->irq_vectors = NULL;
+ }
+}
+
+static struct nvmet_pci_epf_irq_vector *
+nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
+{
+ struct nvmet_pci_epf_irq_vector *iv;
+ int i;
+
+ lockdep_assert_held(&ctrl->irq_lock);
+
+ for (i = 0; i < ctrl->nr_queues; i++) {
+ iv = &ctrl->irq_vectors[i];
+ if (iv->ref && iv->vector == vector)
+ return iv;
+ }
+
+ return NULL;
+}
+
+static struct nvmet_pci_epf_irq_vector *
+nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
+{
+ struct nvmet_pci_epf_irq_vector *iv;
+ int i;
+
+ mutex_lock(&ctrl->irq_lock);
+
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
+ if (iv) {
+ iv->ref++;
+ goto unlock;
+ }
+
+ for (i = 0; i < ctrl->nr_queues; i++) {
+ iv = &ctrl->irq_vectors[i];
+ if (!iv->ref)
+ break;
+ }
+
+ if (WARN_ON_ONCE(!iv))
+ goto unlock;
+
+ iv->ref = 1;
+ iv->vector = vector;
+ iv->nr_irqs = 0;
+
+unlock:
+ mutex_unlock(&ctrl->irq_lock);
+
+ return iv;
+}
+
+static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl,
+ u16 vector)
+{
+ struct nvmet_pci_epf_irq_vector *iv;
+
+ mutex_lock(&ctrl->irq_lock);
+
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
+ if (iv) {
+ iv->ref--;
+ if (!iv->ref) {
+ iv->vector = 0;
+ iv->nr_irqs = 0;
+ }
+ }
+
+ mutex_unlock(&ctrl->irq_lock);
+}
+
+static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *cq, bool force)
+{
+ struct nvmet_pci_epf_irq_vector *iv = cq->iv;
+ bool ret;
+
+ if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ return false;
+
+ /* IRQ coalescing for the admin queue is not allowed. */
+ if (!cq->qid)
+ return true;
+
+ if (iv->cd)
+ return true;
+
+ if (force) {
+ ret = iv->nr_irqs > 0;
+ } else {
+ iv->nr_irqs++;
+ ret = iv->nr_irqs >= ctrl->irq_vector_threshold;
+ }
+ if (ret)
+ iv->nr_irqs = 0;
+
+ return ret;
+}
+
+static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *cq, bool force)
+{
+ struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
+ struct pci_epf *epf = nvme_epf->epf;
+ int ret = 0;
+
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ return;
+
+ mutex_lock(&ctrl->irq_lock);
+
+ if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force))
+ goto unlock;
+
+ switch (nvme_epf->irq_type) {
+ case PCI_IRQ_MSIX:
+ case PCI_IRQ_MSI:
+ ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
+ nvme_epf->irq_type, cq->vector + 1);
+ if (!ret)
+ break;
+ /*
+ * If we got an error, it is likely because the host is using
+ * legacy IRQs (e.g. BIOS, grub).
+ */
+ fallthrough;
+ case PCI_IRQ_INTX:
+ ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
+ PCI_IRQ_INTX, 0);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret);
+
+unlock:
+ mutex_unlock(&ctrl->irq_lock);
+}
+
+static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod)
+{
+ return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode);
+}
+
+static void nvmet_pci_epf_exec_iod_work(struct work_struct *work);
+
+static struct nvmet_pci_epf_iod *
+nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl;
+ struct nvmet_pci_epf_iod *iod;
+
+ iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL);
+ if (unlikely(!iod))
+ return NULL;
+
+ memset(iod, 0, sizeof(*iod));
+ iod->req.cmd = &iod->cmd;
+ iod->req.cqe = &iod->cqe;
+ iod->req.port = ctrl->port;
+ iod->ctrl = ctrl;
+ iod->sq = sq;
+ iod->cq = &ctrl->cq[sq->qid];
+ INIT_LIST_HEAD(&iod->link);
+ iod->dma_dir = DMA_NONE;
+ INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work);
+ init_completion(&iod->done);
+
+ return iod;
+}
+
+/*
+ * Allocate or grow a command table of PCI segments.
+ */
+static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod,
+ int nsegs)
+{
+ struct nvmet_pci_epf_segment *segs;
+ int nr_segs = iod->nr_data_segs + nsegs;
+
+ segs = krealloc(iod->data_segs,
+ nr_segs * sizeof(struct nvmet_pci_epf_segment),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!segs)
+ return -ENOMEM;
+
+ iod->nr_data_segs = nr_segs;
+ iod->data_segs = segs;
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod)
+{
+ int i;
+
+ if (iod->data_segs) {
+ for (i = 0; i < iod->nr_data_segs; i++)
+ kfree(iod->data_segs[i].buf);
+ if (iod->data_segs != &iod->data_seg)
+ kfree(iod->data_segs);
+ }
+ if (iod->data_sgt.nents > 1)
+ sg_free_table(&iod->data_sgt);
+ mempool_free(iod, &iod->ctrl->iod_pool);
+}
+
+static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf;
+ struct nvmet_pci_epf_segment *seg = &iod->data_segs[0];
+ int i, ret;
+
+ /* Split the data transfer according to the PCI segments. */
+ for (i = 0; i < iod->nr_data_segs; i++, seg++) {
+ ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir);
+ if (ret) {
+ iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl,
+ u64 prp)
+{
+ return prp & ctrl->mps_mask;
+}
+
+static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl,
+ u64 prp)
+{
+ return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp);
+}
+
+/*
+ * Transfer a PRP list from the host and return the number of prps.
+ */
+static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp,
+ size_t xfer_len, __le64 *prps)
+{
+ size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift;
+ u32 length;
+ int ret;
+
+ /*
+ * Compute the number of PRPs required for the number of bytes to
+ * transfer (xfer_len). If this number overflows the memory page size
+ * with the PRP list pointer specified, only return the space available
+ * in the memory page, the last PRP in there will be a PRP list pointer
+ * to the remaining PRPs.
+ */
+ length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3);
+ ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ return length >> 3;
+}
+
+static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_iod *iod)
+{
+ struct nvme_command *cmd = &iod->cmd;
+ struct nvmet_pci_epf_segment *seg;
+ size_t size = 0, ofst, prp_size, xfer_len;
+ size_t transfer_len = iod->data_len;
+ int nr_segs, nr_prps = 0;
+ u64 pci_addr, prp;
+ int i = 0, ret;
+ __le64 *prps;
+
+ prps = kzalloc(ctrl->mps, GFP_KERNEL);
+ if (!prps)
+ goto err_internal;
+
+ /*
+ * Allocate PCI segments for the command: this considers the worst case
+ * scenario where all prps are discontiguous, so get as many segments
+ * as we can have prps. In practice, most of the time, we will have
+ * far less PCI segments than prps.
+ */
+ prp = le64_to_cpu(cmd->common.dptr.prp1);
+ if (!prp)
+ goto err_invalid_field;
+
+ ofst = nvmet_pci_epf_prp_ofst(ctrl, prp);
+ nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift;
+
+ ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
+ if (ret)
+ goto err_internal;
+
+ /* Set the first segment using prp1. */
+ seg = &iod->data_segs[0];
+ seg->pci_addr = prp;
+ seg->length = nvmet_pci_epf_prp_size(ctrl, prp);
+
+ size = seg->length;
+ pci_addr = prp + size;
+ nr_segs = 1;
+
+ /*
+ * Now build the PCI address segments using the PRP lists, starting
+ * from prp2.
+ */
+ prp = le64_to_cpu(cmd->common.dptr.prp2);
+ if (!prp)
+ goto err_invalid_field;
+
+ while (size < transfer_len) {
+ xfer_len = transfer_len - size;
+
+ if (!nr_prps) {
+ nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp,
+ xfer_len, prps);
+ if (nr_prps < 0)
+ goto err_internal;
+
+ i = 0;
+ ofst = 0;
+ }
+
+ /* Current entry */
+ prp = le64_to_cpu(prps[i]);
+ if (!prp)
+ goto err_invalid_field;
+
+ /* Did we reach the last PRP entry of the list? */
+ if (xfer_len > ctrl->mps && i == nr_prps - 1) {
+ /* We need more PRPs: PRP is a list pointer. */
+ nr_prps = 0;
+ continue;
+ }
+
+ /* Only the first PRP is allowed to have an offset. */
+ if (nvmet_pci_epf_prp_ofst(ctrl, prp))
+ goto err_invalid_offset;
+
+ if (prp != pci_addr) {
+ /* Discontiguous prp: new segment. */
+ nr_segs++;
+ if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs))
+ goto err_internal;
+
+ seg++;
+ seg->pci_addr = prp;
+ seg->length = 0;
+ pci_addr = prp;
+ }
+
+ prp_size = min_t(size_t, ctrl->mps, xfer_len);
+ seg->length += prp_size;
+ pci_addr += prp_size;
+ size += prp_size;
+
+ i++;
+ }
+
+ iod->nr_data_segs = nr_segs;
+ ret = 0;
+
+ if (size != transfer_len) {
+ dev_err(ctrl->dev,
+ "PRPs transfer length mismatch: got %zu B, need %zu B\n",
+ size, transfer_len);
+ goto err_internal;
+ }
+
+ kfree(prps);
+
+ return 0;
+
+err_invalid_offset:
+ dev_err(ctrl->dev, "PRPs list invalid offset\n");
+ iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
+ goto err;
+
+err_invalid_field:
+ dev_err(ctrl->dev, "PRPs list invalid field\n");
+ iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto err;
+
+err_internal:
+ dev_err(ctrl->dev, "PRPs list internal error\n");
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+err:
+ kfree(prps);
+ return -EINVAL;
+}
+
+static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_iod *iod)
+{
+ struct nvme_command *cmd = &iod->cmd;
+ size_t transfer_len = iod->data_len;
+ int ret, nr_segs = 1;
+ u64 prp1, prp2 = 0;
+ size_t prp1_size;
+
+ prp1 = le64_to_cpu(cmd->common.dptr.prp1);
+ prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1);
+
+ /* For commands crossing a page boundary, we should have prp2. */
+ if (transfer_len > prp1_size) {
+ prp2 = le64_to_cpu(cmd->common.dptr.prp2);
+ if (!prp2) {
+ iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+ if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) {
+ iod->status =
+ NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+ if (prp2 != prp1 + prp1_size)
+ nr_segs = 2;
+ }
+
+ if (nr_segs == 1) {
+ iod->nr_data_segs = 1;
+ iod->data_segs = &iod->data_seg;
+ iod->data_segs[0].pci_addr = prp1;
+ iod->data_segs[0].length = transfer_len;
+ return 0;
+ }
+
+ ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
+ if (ret) {
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ return ret;
+ }
+
+ iod->data_segs[0].pci_addr = prp1;
+ iod->data_segs[0].length = prp1_size;
+ iod->data_segs[1].pci_addr = prp2;
+ iod->data_segs[1].length = transfer_len - prp1_size;
+
+ return 0;
+}
+
+static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
+ u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1);
+ size_t ofst;
+
+ /* Get the PCI address segments for the command using its PRPs. */
+ ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1);
+ if (ofst & 0x3) {
+ iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+
+ if (iod->data_len + ofst <= ctrl->mps * 2)
+ return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod);
+
+ return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod);
+}
+
+/*
+ * Transfer an SGL segment from the host and return the number of data
+ * descriptors and the next segment descriptor, if any.
+ */
+static struct nvme_sgl_desc *
+nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvme_sgl_desc *desc, unsigned int *nr_sgls)
+{
+ struct nvme_sgl_desc *sgls;
+ u32 length = le32_to_cpu(desc->length);
+ int nr_descs, ret;
+ void *buf;
+
+ buf = kmalloc(length, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ kfree(buf);
+ return NULL;
+ }
+
+ sgls = buf;
+ nr_descs = length / sizeof(struct nvme_sgl_desc);
+ if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) ||
+ sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
+ /*
+ * We have another SGL segment following this one: do not count
+ * it as a regular data SGL descriptor and return it to the
+ * caller.
+ */
+ *desc = sgls[nr_descs - 1];
+ nr_descs--;
+ } else {
+ /* We do not have another SGL segment after this one. */
+ desc->length = 0;
+ }
+
+ *nr_sgls = nr_descs;
+
+ return sgls;
+}
+
+static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_iod *iod)
+{
+ struct nvme_command *cmd = &iod->cmd;
+ struct nvme_sgl_desc seg = cmd->common.dptr.sgl;
+ struct nvme_sgl_desc *sgls = NULL;
+ int n = 0, i, nr_sgls;
+ int ret;
+
+ /*
+ * We do not support inline data nor keyed SGLs, so we should be seeing
+ * only segment descriptors.
+ */
+ if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) &&
+ seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
+ iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
+ return -EIO;
+ }
+
+ while (seg.length) {
+ sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls);
+ if (!sgls) {
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ return -EIO;
+ }
+
+ /* Grow the PCI segment table as needed. */
+ ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls);
+ if (ret) {
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ /*
+ * Parse the SGL descriptors to build the PCI segment table,
+ * checking the descriptor type as we go.
+ */
+ for (i = 0; i < nr_sgls; i++) {
+ if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) {
+ iod->status = NVME_SC_SGL_INVALID_TYPE |
+ NVME_STATUS_DNR;
+ goto out;
+ }
+ iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr);
+ iod->data_segs[n].length = le32_to_cpu(sgls[i].length);
+ n++;
+ }
+
+ kfree(sgls);
+ }
+
+ out:
+ if (iod->status != NVME_SC_SUCCESS) {
+ kfree(sgls);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
+ struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl;
+
+ if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
+ /* Single data descriptor case. */
+ iod->nr_data_segs = 1;
+ iod->data_segs = &iod->data_seg;
+ iod->data_seg.pci_addr = le64_to_cpu(sgl->addr);
+ iod->data_seg.length = le32_to_cpu(sgl->length);
+ return 0;
+ }
+
+ return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod);
+}
+
+static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
+ struct nvmet_req *req = &iod->req;
+ struct nvmet_pci_epf_segment *seg;
+ struct scatterlist *sg;
+ int ret, i;
+
+ if (iod->data_len > ctrl->mdts) {
+ iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+
+ /*
+ * Get the PCI address segments for the command data buffer using either
+ * its SGLs or PRPs.
+ */
+ if (iod->cmd.common.flags & NVME_CMD_SGL_ALL)
+ ret = nvmet_pci_epf_iod_parse_sgls(iod);
+ else
+ ret = nvmet_pci_epf_iod_parse_prps(iod);
+ if (ret)
+ return ret;
+
+ /* Get a command buffer using SGLs matching the PCI segments. */
+ if (iod->nr_data_segs == 1) {
+ sg_init_table(&iod->data_sgl, 1);
+ iod->data_sgt.sgl = &iod->data_sgl;
+ iod->data_sgt.nents = 1;
+ iod->data_sgt.orig_nents = 1;
+ } else {
+ ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs,
+ GFP_KERNEL);
+ if (ret)
+ goto err_nomem;
+ }
+
+ for_each_sgtable_sg(&iod->data_sgt, sg, i) {
+ seg = &iod->data_segs[i];
+ seg->buf = kmalloc(seg->length, GFP_KERNEL);
+ if (!seg->buf)
+ goto err_nomem;
+ sg_set_buf(sg, seg->buf, seg->length);
+ }
+
+ req->transfer_len = iod->data_len;
+ req->sg = iod->data_sgt.sgl;
+ req->sg_cnt = iod->data_sgt.nents;
+
+ return 0;
+
+err_nomem:
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ return -ENOMEM;
+}
+
+static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_queue *cq = iod->cq;
+ unsigned long flags;
+
+ /* Print an error message for failed commands, except AENs. */
+ iod->status = le16_to_cpu(iod->cqe.status) >> 1;
+ if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event)
+ dev_err(iod->ctrl->dev,
+ "CQ[%d]: Command %s (0x%x) status 0x%0x\n",
+ iod->sq->qid, nvmet_pci_epf_iod_name(iod),
+ iod->cmd.common.opcode, iod->status);
+
+ /*
+ * Add the command to the list of completed commands and schedule the
+ * CQ work.
+ */
+ spin_lock_irqsave(&cq->lock, flags);
+ list_add_tail(&iod->link, &cq->list);
+ queue_delayed_work(system_highpri_wq, &cq->work, 0);
+ spin_unlock_irqrestore(&cq->lock, flags);
+}
+
+static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue)
+{
+ struct nvmet_pci_epf_iod *iod;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ while (!list_empty(&queue->list)) {
+ iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod,
+ link);
+ list_del_init(&iod->link);
+ nvmet_pci_epf_free_iod(iod);
+ }
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+static int nvmet_pci_epf_add_port(struct nvmet_port *port)
+{
+ mutex_lock(&nvmet_pci_epf_ports_mutex);
+ list_add_tail(&port->entry, &nvmet_pci_epf_ports);
+ mutex_unlock(&nvmet_pci_epf_ports_mutex);
+ return 0;
+}
+
+static void nvmet_pci_epf_remove_port(struct nvmet_port *port)
+{
+ mutex_lock(&nvmet_pci_epf_ports_mutex);
+ list_del_init(&port->entry);
+ mutex_unlock(&nvmet_pci_epf_ports_mutex);
+}
+
+static struct nvmet_port *
+nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid)
+{
+ struct nvmet_port *p, *port = NULL;
+
+ mutex_lock(&nvmet_pci_epf_ports_mutex);
+ list_for_each_entry(p, &nvmet_pci_epf_ports, entry) {
+ if (p->disc_addr.portid == portid) {
+ port = p;
+ break;
+ }
+ }
+ mutex_unlock(&nvmet_pci_epf_ports_mutex);
+
+ return port;
+}
+
+static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
+{
+ struct nvmet_pci_epf_iod *iod =
+ container_of(req, struct nvmet_pci_epf_iod, req);
+
+ iod->status = le16_to_cpu(req->cqe->status) >> 1;
+
+ /* If we have no data to transfer, directly complete the command. */
+ if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
+ nvmet_pci_epf_complete_iod(iod);
+ return;
+ }
+
+ complete(&iod->done);
+}
+
+static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12;
+
+ return ilog2(ctrl->mdts) - page_shift;
+}
+
+static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
+ u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
+ u16 status;
+
+ if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ if (!(flags & NVME_QUEUE_PHYS_CONTIG))
+ return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
+
+ if (flags & NVME_CQ_IRQ_ENABLED)
+ set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
+
+ cq->pci_addr = pci_addr;
+ cq->qid = cqid;
+ cq->depth = qsize + 1;
+ cq->vector = vector;
+ cq->head = 0;
+ cq->tail = 0;
+ cq->phase = 1;
+ cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32));
+ nvmet_pci_epf_bar_write32(ctrl, cq->db, 0);
+
+ if (!cqid)
+ cq->qes = sizeof(struct nvme_completion);
+ else
+ cq->qes = ctrl->io_cqes;
+ cq->pci_size = cq->qes * cq->depth;
+
+ cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
+ if (!cq->iv) {
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto err;
+ }
+
+ status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
+ if (status != NVME_SC_SUCCESS)
+ goto err;
+
+ dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
+ cqid, qsize, cq->qes, cq->vector);
+
+ return NVME_SC_SUCCESS;
+
+err:
+ clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
+ clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
+ return status;
+}
+
+static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
+
+ if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ cancel_delayed_work_sync(&cq->work);
+ nvmet_pci_epf_drain_queue(cq);
+ nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
+ u16 sqid, u16 flags, u16 qsize, u64 pci_addr)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
+ u16 status;
+
+ if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ if (!(flags & NVME_QUEUE_PHYS_CONTIG))
+ return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
+
+ sq->pci_addr = pci_addr;
+ sq->qid = sqid;
+ sq->depth = qsize + 1;
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 0;
+ sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32));
+ nvmet_pci_epf_bar_write32(ctrl, sq->db, 0);
+ if (!sqid)
+ sq->qes = 1UL << NVME_ADM_SQES;
+ else
+ sq->qes = ctrl->io_sqes;
+ sq->pci_size = sq->qes * sq->depth;
+
+ status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
+ if (status != NVME_SC_SUCCESS)
+ goto out_clear_bit;
+
+ sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
+ min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
+ if (!sq->iod_wq) {
+ dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid);
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto out_destroy_sq;
+ }
+
+ dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
+ sqid, qsize, sq->qes);
+
+ return NVME_SC_SUCCESS;
+
+out_destroy_sq:
+ nvmet_sq_destroy(&sq->nvme_sq);
+out_clear_bit:
+ clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
+ return status;
+}
+
+static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
+
+ if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ flush_workqueue(sq->iod_wq);
+ destroy_workqueue(sq->iod_wq);
+ sq->iod_wq = NULL;
+
+ nvmet_pci_epf_drain_queue(sq);
+
+ if (sq->nvme_sq.ctrl)
+ nvmet_sq_destroy(&sq->nvme_sq);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl,
+ u8 feat, void *data)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_feat_arbitration *arb;
+ struct nvmet_feat_irq_coalesce *irqc;
+ struct nvmet_feat_irq_config *irqcfg;
+ struct nvmet_pci_epf_irq_vector *iv;
+ u16 status;
+
+ switch (feat) {
+ case NVME_FEAT_ARBITRATION:
+ arb = data;
+ if (!ctrl->sq_ab)
+ arb->ab = 0x7;
+ else
+ arb->ab = ilog2(ctrl->sq_ab);
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_COALESCE:
+ irqc = data;
+ irqc->thr = ctrl->irq_vector_threshold;
+ irqc->time = 0;
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_CONFIG:
+ irqcfg = data;
+ mutex_lock(&ctrl->irq_lock);
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
+ if (iv) {
+ irqcfg->cd = iv->cd;
+ status = NVME_SC_SUCCESS;
+ } else {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+ mutex_unlock(&ctrl->irq_lock);
+ return status;
+
+ default:
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+}
+
+static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl,
+ u8 feat, void *data)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_feat_arbitration *arb;
+ struct nvmet_feat_irq_coalesce *irqc;
+ struct nvmet_feat_irq_config *irqcfg;
+ struct nvmet_pci_epf_irq_vector *iv;
+ u16 status;
+
+ switch (feat) {
+ case NVME_FEAT_ARBITRATION:
+ arb = data;
+ if (arb->ab == 0x7)
+ ctrl->sq_ab = 0;
+ else
+ ctrl->sq_ab = 1 << arb->ab;
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_COALESCE:
+ /*
+ * Since we do not implement precise IRQ coalescing timing,
+ * ignore the time field.
+ */
+ irqc = data;
+ ctrl->irq_vector_threshold = irqc->thr + 1;
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_CONFIG:
+ irqcfg = data;
+ mutex_lock(&ctrl->irq_lock);
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
+ if (iv) {
+ iv->cd = irqcfg->cd;
+ status = NVME_SC_SUCCESS;
+ } else {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+ mutex_unlock(&ctrl->irq_lock);
+ return status;
+
+ default:
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+}
+
+static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_PCI,
+ .add_port = nvmet_pci_epf_add_port,
+ .remove_port = nvmet_pci_epf_remove_port,
+ .queue_response = nvmet_pci_epf_queue_response,
+ .get_mdts = nvmet_pci_epf_get_mdts,
+ .create_cq = nvmet_pci_epf_create_cq,
+ .delete_cq = nvmet_pci_epf_delete_cq,
+ .create_sq = nvmet_pci_epf_create_sq,
+ .delete_sq = nvmet_pci_epf_delete_sq,
+ .get_feature = nvmet_pci_epf_get_feat,
+ .set_feature = nvmet_pci_epf_set_feat,
+};
+
+static void nvmet_pci_epf_cq_work(struct work_struct *work);
+
+static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl,
+ unsigned int qid, bool sq)
+{
+ struct nvmet_pci_epf_queue *queue;
+
+ if (sq) {
+ queue = &ctrl->sq[qid];
+ set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags);
+ } else {
+ queue = &ctrl->cq[qid];
+ INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);
+ }
+ queue->ctrl = ctrl;
+ queue->qid = qid;
+ spin_lock_init(&queue->lock);
+ INIT_LIST_HEAD(&queue->list);
+}
+
+static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ unsigned int qid;
+
+ ctrl->sq = kcalloc(ctrl->nr_queues,
+ sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
+ if (!ctrl->sq)
+ return -ENOMEM;
+
+ ctrl->cq = kcalloc(ctrl->nr_queues,
+ sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
+ if (!ctrl->cq) {
+ kfree(ctrl->sq);
+ ctrl->sq = NULL;
+ return -ENOMEM;
+ }
+
+ for (qid = 0; qid < ctrl->nr_queues; qid++) {
+ nvmet_pci_epf_init_queue(ctrl, qid, true);
+ nvmet_pci_epf_init_queue(ctrl, qid, false);
+ }
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ kfree(ctrl->sq);
+ ctrl->sq = NULL;
+ kfree(ctrl->cq);
+ ctrl->cq = NULL;
+}
+
+static int nvmet_pci_epf_map_queue(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *queue)
+{
+ struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
+ int ret;
+
+ ret = nvmet_pci_epf_mem_map(nvme_epf, queue->pci_addr,
+ queue->pci_size, &queue->pci_map);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to map queue %u (err=%d)\n",
+ queue->qid, ret);
+ return ret;
+ }
+
+ if (queue->pci_map.pci_size < queue->pci_size) {
+ dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
+ queue->qid);
+ nvmet_pci_epf_mem_unmap(nvme_epf, &queue->pci_map);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline void nvmet_pci_epf_unmap_queue(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *queue)
+{
+ nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &queue->pci_map);
+}
+
+static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_iod *iod =
+ container_of(work, struct nvmet_pci_epf_iod, work);
+ struct nvmet_req *req = &iod->req;
+ int ret;
+
+ if (!iod->ctrl->link_up) {
+ nvmet_pci_epf_free_iod(iod);
+ return;
+ }
+
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) {
+ iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq,
+ &nvmet_pci_epf_fabrics_ops))
+ goto complete;
+
+ iod->data_len = nvmet_req_transfer_len(req);
+ if (iod->data_len) {
+ /*
+ * Get the data DMA transfer direction. Here "device" means the
+ * PCI root-complex host.
+ */
+ if (nvme_is_write(&iod->cmd))
+ iod->dma_dir = DMA_FROM_DEVICE;
+ else
+ iod->dma_dir = DMA_TO_DEVICE;
+
+ /*
+ * Setup the command data buffer and get the command data from
+ * the host if needed.
+ */
+ ret = nvmet_pci_epf_alloc_iod_data_buf(iod);
+ if (!ret && iod->dma_dir == DMA_FROM_DEVICE)
+ ret = nvmet_pci_epf_transfer_iod_data(iod);
+ if (ret) {
+ nvmet_req_uninit(req);
+ goto complete;
+ }
+ }
+
+ req->execute(req);
+
+ /*
+ * If we do not have data to transfer after the command execution
+ * finishes, nvmet_pci_epf_queue_response() will complete the command
+ * directly. No need to wait for the completion in this case.
+ */
+ if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE)
+ return;
+
+ wait_for_completion(&iod->done);
+
+ if (iod->status == NVME_SC_SUCCESS) {
+ WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
+ nvmet_pci_epf_transfer_iod_data(iod);
+ }
+
+complete:
+ nvmet_pci_epf_complete_iod(iod);
+}
+
+static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *sq)
+{
+ struct nvmet_pci_epf_iod *iod;
+ int ret, n = 0;
+
+ sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
+ while (sq->head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
+ iod = nvmet_pci_epf_alloc_iod(sq);
+ if (!iod)
+ break;
+
+ /* Get the NVMe command submitted by the host. */
+ ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd,
+ sq->pci_addr + sq->head * sq->qes,
+ sq->qes, DMA_FROM_DEVICE);
+ if (ret) {
+ /* Not much we can do... */
+ nvmet_pci_epf_free_iod(iod);
+ break;
+ }
+
+ dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n",
+ sq->qid, sq->head, sq->tail,
+ nvmet_pci_epf_iod_name(iod));
+
+ sq->head++;
+ if (sq->head == sq->depth)
+ sq->head = 0;
+ n++;
+
+ queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work);
+
+ sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
+ }
+
+ return n;
+}
+
+static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_ctrl *ctrl =
+ container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
+ struct nvmet_pci_epf_queue *sq;
+ unsigned long last = 0;
+ int i, nr_sqs;
+
+ while (ctrl->link_up && ctrl->enabled) {
+ nr_sqs = 0;
+ /* Do round-robin arbitration. */
+ for (i = 0; i < ctrl->nr_queues; i++) {
+ sq = &ctrl->sq[i];
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ continue;
+ if (nvmet_pci_epf_process_sq(ctrl, sq))
+ nr_sqs++;
+ }
+
+ if (nr_sqs) {
+ last = jiffies;
+ continue;
+ }
+
+ /*
+ * If we have not received any command on any queue for more
+ * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and
+ * reschedule. This avoids "burning" a CPU when the controller
+ * is idle for a long time.
+ */
+ if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE))
+ break;
+
+ cpu_relax();
+ }
+
+ schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL);
+}
+
+static void nvmet_pci_epf_cq_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_queue *cq =
+ container_of(work, struct nvmet_pci_epf_queue, work.work);
+ struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl;
+ struct nvme_completion *cqe;
+ struct nvmet_pci_epf_iod *iod;
+ unsigned long flags;
+ int ret, n = 0;
+
+ ret = nvmet_pci_epf_map_queue(ctrl, cq);
+ if (ret)
+ goto again;
+
+ while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
+
+ /* Check that the CQ is not full. */
+ cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db);
+ if (cq->head == cq->tail + 1) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ spin_lock_irqsave(&cq->lock, flags);
+ iod = list_first_entry_or_null(&cq->list,
+ struct nvmet_pci_epf_iod, link);
+ if (iod)
+ list_del_init(&iod->link);
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ if (!iod)
+ break;
+
+ /* Post the IOD completion entry. */
+ cqe = &iod->cqe;
+ cqe->status = cpu_to_le16((iod->status << 1) | cq->phase);
+
+ dev_dbg(ctrl->dev,
+ "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n",
+ cq->qid, nvmet_pci_epf_iod_name(iod), iod->status,
+ le64_to_cpu(cqe->result.u64), cq->head, cq->tail,
+ cq->phase);
+
+ memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes,
+ cqe, cq->qes);
+
+ cq->tail++;
+ if (cq->tail >= cq->depth) {
+ cq->tail = 0;
+ cq->phase ^= 1;
+ }
+
+ nvmet_pci_epf_free_iod(iod);
+
+ /* Signal the host. */
+ nvmet_pci_epf_raise_irq(ctrl, cq, false);
+ n++;
+ }
+
+ nvmet_pci_epf_unmap_queue(ctrl, cq);
+
+ /*
+ * We do not support precise IRQ coalescing time (100ns units as per
+ * NVMe specifications). So if we have posted completion entries without
+ * reaching the interrupt coalescing threshold, raise an interrupt.
+ */
+ if (n)
+ nvmet_pci_epf_raise_irq(ctrl, cq, true);
+
+again:
+ if (ret < 0)
+ queue_delayed_work(system_highpri_wq, &cq->work,
+ NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
+}
+
+static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ u64 pci_addr, asq, acq;
+ u32 aqa;
+ u16 status, qsize;
+
+ if (ctrl->enabled)
+ return 0;
+
+ dev_info(ctrl->dev, "Enabling controller\n");
+
+ ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12;
+ ctrl->mps = 1UL << ctrl->mps_shift;
+ ctrl->mps_mask = ctrl->mps - 1;
+
+ ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc);
+ if (ctrl->io_sqes < sizeof(struct nvme_command)) {
+ dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n",
+ ctrl->io_sqes, sizeof(struct nvme_command));
+ return -EINVAL;
+ }
+
+ ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
+ if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
+ dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
+ ctrl->io_sqes, sizeof(struct nvme_completion));
+ return -EINVAL;
+ }
+
+ /* Create the admin queue. */
+ aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA);
+ asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ);
+ acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ);
+
+ qsize = (aqa & 0x0fff0000) >> 16;
+ pci_addr = acq & GENMASK_ULL(63, 12);
+ status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0,
+ NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG,
+ qsize, pci_addr, 0);
+ if (status != NVME_SC_SUCCESS) {
+ dev_err(ctrl->dev, "Failed to create admin completion queue\n");
+ return -EINVAL;
+ }
+
+ qsize = aqa & 0x00000fff;
+ pci_addr = asq & GENMASK_ULL(63, 12);
+ status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG,
+ qsize, pci_addr);
+ if (status != NVME_SC_SUCCESS) {
+ dev_err(ctrl->dev, "Failed to create admin submission queue\n");
+ nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
+ return -EINVAL;
+ }
+
+ ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB;
+ ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD;
+ ctrl->enabled = true;
+
+ /* Start polling the controller SQs. */
+ schedule_delayed_work(&ctrl->poll_sqs, 0);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ int qid;
+
+ if (!ctrl->enabled)
+ return;
+
+ dev_info(ctrl->dev, "Disabling controller\n");
+
+ ctrl->enabled = false;
+ cancel_delayed_work_sync(&ctrl->poll_sqs);
+
+ /* Delete all I/O queues first. */
+ for (qid = 1; qid < ctrl->nr_queues; qid++)
+ nvmet_pci_epf_delete_sq(ctrl->tctrl, qid);
+
+ for (qid = 1; qid < ctrl->nr_queues; qid++)
+ nvmet_pci_epf_delete_cq(ctrl->tctrl, qid);
+
+ /* Delete the admin queue last. */
+ nvmet_pci_epf_delete_sq(ctrl->tctrl, 0);
+ nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
+}
+
+static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_ctrl *ctrl =
+ container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work);
+ u32 old_cc, new_cc;
+ int ret;
+
+ if (!ctrl->tctrl)
+ return;
+
+ old_cc = ctrl->cc;
+ new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC);
+ ctrl->cc = new_cc;
+
+ if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) {
+ ret = nvmet_pci_epf_enable_ctrl(ctrl);
+ if (ret)
+ return;
+ ctrl->csts |= NVME_CSTS_RDY;
+ }
+
+ if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) {
+ nvmet_pci_epf_disable_ctrl(ctrl);
+ ctrl->csts &= ~NVME_CSTS_RDY;
+ }
+
+ if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) {
+ nvmet_pci_epf_disable_ctrl(ctrl);
+ ctrl->csts |= NVME_CSTS_SHST_CMPLT;
+ }
+
+ if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc))
+ ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
+
+ nvmet_update_cc(ctrl->tctrl, ctrl->cc);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
+
+ schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
+}
+
+static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ struct nvmet_ctrl *tctrl = ctrl->tctrl;
+
+ ctrl->bar = ctrl->nvme_epf->reg_bar;
+
+ /* Copy the target controller capabilities as a base. */
+ ctrl->cap = tctrl->cap;
+
+ /* Contiguous Queues Required (CQR). */
+ ctrl->cap |= 0x1ULL << 16;
+
+ /* Set Doorbell stride to 4B (DSTRB). */
+ ctrl->cap &= ~GENMASK_ULL(35, 32);
+
+ /* Clear NVM Subsystem Reset Supported (NSSRS). */
+ ctrl->cap &= ~(0x1ULL << 36);
+
+ /* Clear Boot Partition Support (BPS). */
+ ctrl->cap &= ~(0x1ULL << 45);
+
+ /* Clear Persistent Memory Region Supported (PMRS). */
+ ctrl->cap &= ~(0x1ULL << 56);
+
+ /* Clear Controller Memory Buffer Supported (CMBS). */
+ ctrl->cap &= ~(0x1ULL << 57);
+
+ /* Controller configuration. */
+ ctrl->cc = tctrl->cc & (~NVME_CC_ENABLE);
+
+ /* Controller status. */
+ ctrl->csts = ctrl->tctrl->csts;
+
+ nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
+}
+
+static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf,
+ unsigned int max_nr_queues)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+ struct nvmet_alloc_ctrl_args args = {};
+ char hostnqn[NVMF_NQN_SIZE];
+ uuid_t id;
+ int ret;
+
+ memset(ctrl, 0, sizeof(*ctrl));
+ ctrl->dev = &nvme_epf->epf->dev;
+ mutex_init(&ctrl->irq_lock);
+ ctrl->nvme_epf = nvme_epf;
+ ctrl->mdts = nvme_epf->mdts_kb * SZ_1K;
+ INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work);
+ INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work);
+
+ ret = mempool_init_kmalloc_pool(&ctrl->iod_pool,
+ max_nr_queues * NVMET_MAX_QUEUE_SIZE,
+ sizeof(struct nvmet_pci_epf_iod));
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to initialize IOD mempool\n");
+ return ret;
+ }
+
+ ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid);
+ if (!ctrl->port) {
+ dev_err(ctrl->dev, "Port not found\n");
+ ret = -EINVAL;
+ goto out_mempool_exit;
+ }
+
+ /* Create the target controller. */
+ uuid_gen(&id);
+ snprintf(hostnqn, NVMF_NQN_SIZE,
+ "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
+ args.port = ctrl->port;
+ args.subsysnqn = nvme_epf->subsysnqn;
+ memset(&id, 0, sizeof(uuid_t));
+ args.hostid = &id;
+ args.hostnqn = hostnqn;
+ args.ops = &nvmet_pci_epf_fabrics_ops;
+
+ ctrl->tctrl = nvmet_alloc_ctrl(&args);
+ if (!ctrl->tctrl) {
+ dev_err(ctrl->dev, "Failed to create target controller\n");
+ ret = -ENOMEM;
+ goto out_mempool_exit;
+ }
+ ctrl->tctrl->drvdata = ctrl;
+
+ /* We do not support protection information for now. */
+ if (ctrl->tctrl->pi_support) {
+ dev_err(ctrl->dev,
+ "Protection information (PI) is not supported\n");
+ ret = -ENOTSUPP;
+ goto out_put_ctrl;
+ }
+
+ /* Allocate our queues, up to the maximum number. */
+ ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues);
+ ret = nvmet_pci_epf_alloc_queues(ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
+ /*
+ * Allocate the IRQ vectors descriptors. We cannot have more than the
+ * maximum number of queues.
+ */
+ ret = nvmet_pci_epf_alloc_irq_vectors(ctrl);
+ if (ret)
+ goto out_free_queues;
+
+ dev_info(ctrl->dev,
+ "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n",
+ ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1,
+ ctrl->mdts);
+
+ /* Initialize BAR 0 using the target controller CAP. */
+ nvmet_pci_epf_init_bar(ctrl);
+
+ return 0;
+
+out_free_queues:
+ nvmet_pci_epf_free_queues(ctrl);
+out_put_ctrl:
+ nvmet_ctrl_put(ctrl->tctrl);
+ ctrl->tctrl = NULL;
+out_mempool_exit:
+ mempool_exit(&ctrl->iod_pool);
+ return ret;
+}
+
+static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
+}
+
+static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ cancel_delayed_work_sync(&ctrl->poll_cc);
+
+ nvmet_pci_epf_disable_ctrl(ctrl);
+}
+
+static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ if (!ctrl->tctrl)
+ return;
+
+ dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n",
+ ctrl->tctrl->subsys->subsysnqn);
+
+ nvmet_pci_epf_stop_ctrl(ctrl);
+
+ nvmet_pci_epf_free_queues(ctrl);
+ nvmet_pci_epf_free_irq_vectors(ctrl);
+
+ nvmet_ctrl_put(ctrl->tctrl);
+ ctrl->tctrl = NULL;
+
+ mempool_exit(&ctrl->iod_pool);
+}
+
+static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+ const struct pci_epc_features *epc_features = nvme_epf->epc_features;
+ size_t reg_size, reg_bar_size;
+ size_t msix_table_size = 0;
+
+ /*
+ * The first free BAR will be our register BAR and per NVMe
+ * specifications, it must be BAR 0.
+ */
+ if (pci_epc_get_first_free_bar(epc_features) != BAR_0) {
+ dev_err(&epf->dev, "BAR 0 is not free\n");
+ return -ENODEV;
+ }
+
+ if (epc_features->bar[BAR_0].only_64bit)
+ epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+ /*
+ * Calculate the size of the register bar: NVMe registers first with
+ * enough space for the doorbells, followed by the MSI-X table
+ * if supported.
+ */
+ reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32));
+ reg_size = ALIGN(reg_size, 8);
+
+ if (epc_features->msix_capable) {
+ size_t pba_size;
+
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
+ nvme_epf->msix_table_offset = reg_size;
+ pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
+
+ reg_size += msix_table_size + pba_size;
+ }
+
+ if (epc_features->bar[BAR_0].type == BAR_FIXED) {
+ if (reg_size > epc_features->bar[BAR_0].fixed_size) {
+ dev_err(&epf->dev,
+ "BAR 0 size %llu B too small, need %zu B\n",
+ epc_features->bar[BAR_0].fixed_size,
+ reg_size);
+ return -ENOMEM;
+ }
+ reg_bar_size = epc_features->bar[BAR_0].fixed_size;
+ } else {
+ reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096));
+ }
+
+ nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0,
+ epc_features, PRIMARY_INTERFACE);
+ if (!nvme_epf->reg_bar) {
+ dev_err(&epf->dev, "Failed to allocate BAR 0\n");
+ return -ENOMEM;
+ }
+ memset(nvme_epf->reg_bar, 0, reg_bar_size);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ if (!nvme_epf->reg_bar)
+ return;
+
+ pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE);
+ nvme_epf->reg_bar = NULL;
+}
+
+static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[BAR_0]);
+}
+
+static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf)
+{
+ const struct pci_epc_features *epc_features = nvme_epf->epc_features;
+ struct pci_epf *epf = nvme_epf->epf;
+ int ret;
+
+ /* Enable MSI-X if supported, otherwise, use MSI. */
+ if (epc_features->msix_capable && epf->msix_interrupts) {
+ ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no,
+ epf->msix_interrupts, BAR_0,
+ nvme_epf->msix_table_offset);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to configure MSI-X\n");
+ return ret;
+ }
+
+ nvme_epf->nr_vectors = epf->msix_interrupts;
+ nvme_epf->irq_type = PCI_IRQ_MSIX;
+
+ return 0;
+ }
+
+ if (epc_features->msi_capable && epf->msi_interrupts) {
+ ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no,
+ epf->msi_interrupts);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to configure MSI\n");
+ return ret;
+ }
+
+ nvme_epf->nr_vectors = epf->msi_interrupts;
+ nvme_epf->irq_type = PCI_IRQ_MSI;
+
+ return 0;
+ }
+
+ /* MSI and MSI-X are not supported: fall back to INTx. */
+ nvme_epf->nr_vectors = 1;
+ nvme_epf->irq_type = PCI_IRQ_INTX;
+
+ return 0;
+}
+
+static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ const struct pci_epc_features *epc_features = nvme_epf->epc_features;
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+ unsigned int max_nr_queues = NVMET_NR_QUEUES;
+ int ret;
+
+ /* For now, do not support virtual functions. */
+ if (epf->vfunc_no > 0) {
+ dev_err(&epf->dev, "Virtual functions are not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Cap the maximum number of queues we can support on the controller
+ * with the number of IRQs we can use.
+ */
+ if (epc_features->msix_capable && epf->msix_interrupts) {
+ dev_info(&epf->dev,
+ "PCI endpoint controller supports MSI-X, %u vectors\n",
+ epf->msix_interrupts);
+ max_nr_queues = min(max_nr_queues, epf->msix_interrupts);
+ } else if (epc_features->msi_capable && epf->msi_interrupts) {
+ dev_info(&epf->dev,
+ "PCI endpoint controller supports MSI, %u vectors\n",
+ epf->msi_interrupts);
+ max_nr_queues = min(max_nr_queues, epf->msi_interrupts);
+ }
+
+ if (max_nr_queues < 2) {
+ dev_err(&epf->dev, "Invalid maximum number of queues %u\n",
+ max_nr_queues);
+ return -EINVAL;
+ }
+
+ /* Create the target controller. */
+ ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues);
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to create NVMe PCI target controller (err=%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Set device ID, class, etc. */
+ epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
+ epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
+ ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no,
+ epf->header);
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to write configuration header (err=%d)\n", ret);
+ goto out_destroy_ctrl;
+ }
+
+ ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[BAR_0]);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret);
+ goto out_destroy_ctrl;
+ }
+
+ /*
+ * Enable interrupts and start polling the controller BAR if we do not
+ * have a link up notifier.
+ */
+ ret = nvmet_pci_epf_init_irq(nvme_epf);
+ if (ret)
+ goto out_clear_bar;
+
+ if (!epc_features->linkup_notifier) {
+ ctrl->link_up = true;
+ nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
+ }
+
+ return 0;
+
+out_clear_bar:
+ nvmet_pci_epf_clear_bar(nvme_epf);
+out_destroy_ctrl:
+ nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
+ return ret;
+}
+
+static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+
+ ctrl->link_up = false;
+ nvmet_pci_epf_destroy_ctrl(ctrl);
+
+ nvmet_pci_epf_deinit_dma(nvme_epf);
+ nvmet_pci_epf_clear_bar(nvme_epf);
+}
+
+static int nvmet_pci_epf_link_up(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+
+ ctrl->link_up = true;
+ nvmet_pci_epf_start_ctrl(ctrl);
+
+ return 0;
+}
+
+static int nvmet_pci_epf_link_down(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+
+ ctrl->link_up = false;
+ nvmet_pci_epf_stop_ctrl(ctrl);
+
+ return 0;
+}
+
+static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = {
+ .epc_init = nvmet_pci_epf_epc_init,
+ .epc_deinit = nvmet_pci_epf_epc_deinit,
+ .link_up = nvmet_pci_epf_link_up,
+ .link_down = nvmet_pci_epf_link_down,
+};
+
+static int nvmet_pci_epf_bind(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ const struct pci_epc_features *epc_features;
+ struct pci_epc *epc = epf->epc;
+ int ret;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
+ if (!epc_features) {
+ dev_err(&epf->dev, "epc_features not implemented\n");
+ return -EOPNOTSUPP;
+ }
+ nvme_epf->epc_features = epc_features;
+
+ ret = nvmet_pci_epf_configure_bar(nvme_epf);
+ if (ret)
+ return ret;
+
+ nvmet_pci_epf_init_dma(nvme_epf);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_unbind(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+
+ nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
+
+ if (epc->init_complete) {
+ nvmet_pci_epf_deinit_dma(nvme_epf);
+ nvmet_pci_epf_clear_bar(nvme_epf);
+ }
+
+ nvmet_pci_epf_free_bar(nvme_epf);
+}
+
+static struct pci_epf_header nvme_epf_pci_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .progif_code = 0x02, /* NVM Express */
+ .baseclass_code = PCI_BASE_CLASS_STORAGE,
+ .subclass_code = 0x08, /* Non-Volatile Memory controller */
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static int nvmet_pci_epf_probe(struct pci_epf *epf,
+ const struct pci_epf_device_id *id)
+{
+ struct nvmet_pci_epf *nvme_epf;
+ int ret;
+
+ nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL);
+ if (!nvme_epf)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock);
+ if (ret)
+ return ret;
+
+ nvme_epf->epf = epf;
+ nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB;
+
+ epf->event_ops = &nvmet_pci_epf_event_ops;
+ epf->header = &nvme_epf_pci_header;
+ epf_set_drvdata(epf, nvme_epf);
+
+ return 0;
+}
+
+#define to_nvme_epf(epf_group) \
+ container_of(epf_group, struct nvmet_pci_epf, group)
+
+static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid));
+}
+
+static ssize_t nvmet_pci_epf_portid_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+ u16 portid;
+
+ /* Do not allow setting this when the function is already started. */
+ if (nvme_epf->ctrl.tctrl)
+ return -EBUSY;
+
+ if (!len)
+ return -EINVAL;
+
+ if (kstrtou16(page, 0, &portid))
+ return -EINVAL;
+
+ nvme_epf->portid = cpu_to_le16(portid);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_pci_epf_, portid);
+
+static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn);
+}
+
+static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ /* Do not allow setting this when the function is already started. */
+ if (nvme_epf->ctrl.tctrl)
+ return -EBUSY;
+
+ if (!len)
+ return -EINVAL;
+
+ strscpy(nvme_epf->subsysnqn, page, len);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn);
+
+static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb);
+}
+
+static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+ unsigned long mdts_kb;
+ int ret;
+
+ if (nvme_epf->ctrl.tctrl)
+ return -EBUSY;
+
+ ret = kstrtoul(page, 0, &mdts_kb);
+ if (ret)
+ return ret;
+ if (!mdts_kb)
+ mdts_kb = NVMET_PCI_EPF_MDTS_KB;
+ else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB)
+ mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB;
+
+ if (!is_power_of_2(mdts_kb))
+ return -EINVAL;
+
+ nvme_epf->mdts_kb = mdts_kb;
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb);
+
+static struct configfs_attribute *nvmet_pci_epf_attrs[] = {
+ &nvmet_pci_epf_attr_portid,
+ &nvmet_pci_epf_attr_subsysnqn,
+ &nvmet_pci_epf_attr_mdts_kb,
+ NULL,
+};
+
+static const struct config_item_type nvmet_pci_epf_group_type = {
+ .ct_attrs = nvmet_pci_epf_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+
+ config_group_init_type_name(&nvme_epf->group, "nvme",
+ &nvmet_pci_epf_group_type);
+
+ return &nvme_epf->group;
+}
+
+static const struct pci_epf_device_id nvmet_pci_epf_ids[] = {
+ { .name = "nvmet_pci_epf" },
+ {},
+};
+
+static struct pci_epf_ops nvmet_pci_epf_ops = {
+ .bind = nvmet_pci_epf_bind,
+ .unbind = nvmet_pci_epf_unbind,
+ .add_cfs = nvmet_pci_epf_add_cfs,
+};
+
+static struct pci_epf_driver nvmet_pci_epf_driver = {
+ .driver.name = "nvmet_pci_epf",
+ .probe = nvmet_pci_epf_probe,
+ .id_table = nvmet_pci_epf_ids,
+ .ops = &nvmet_pci_epf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init nvmet_pci_epf_init_module(void)
+{
+ int ret;
+
+ ret = pci_epf_register_driver(&nvmet_pci_epf_driver);
+ if (ret)
+ return ret;
+
+ ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops);
+ if (ret) {
+ pci_epf_unregister_driver(&nvmet_pci_epf_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit nvmet_pci_epf_cleanup_module(void)
+{
+ nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops);
+ pci_epf_unregister_driver(&nvmet_pci_epf_driver);
+}
+
+module_init(nvmet_pci_epf_init_module);
+module_exit(nvmet_pci_epf_cleanup_module);
+
+MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver");
+MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 3aef35b05111..29a60fabfcc8 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -586,8 +586,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
unsigned int len = sg->length;
- if (bio_add_pc_page(bdev_get_queue(bio->bi_bdev), bio,
- sg_page(sg), len, sg->offset) != len) {
+ if (bio_add_page(bio, sg_page(sg), len, sg->offset) != len) {
status = NVME_SC_INTERNAL;
goto out_put_bio;
}
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c1f1c810e810..2c22a35005ca 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -16,25 +16,10 @@
#include <linux/string.h>
#include <linux/dma-direct.h> /* for bus_dma_region */
-#include "of_private.h"
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
-#define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
+/* Uncomment me to enable of_dump_addr() debugging output */
+// #define DEBUG
-/* Debug utility */
-#ifdef DEBUG
-static void of_dump_addr(const char *s, const __be32 *addr, int na)
-{
- pr_debug("%s", s);
- while (na--)
- pr_cont(" %08x", be32_to_cpu(*(addr++)));
- pr_cont("\n");
-}
-#else
-static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
-#endif
+#include "of_private.h"
/* Callbacks for bus specific translators */
struct of_bus {
@@ -200,17 +185,15 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
{
- u64 end = start;
-
if (overflows_type(start, r->start))
return -EOVERFLOW;
- if (size && check_add_overflow(end, size - 1, &end))
- return -EOVERFLOW;
- if (overflows_type(end, r->end))
- return -EOVERFLOW;
r->start = start;
- r->end = end;
+
+ if (!size)
+ r->end = wrapping_sub(typeof(r->end), r->start, 1);
+ else if (size && check_add_overflow(r->start, size - 1, &r->end))
+ return -EOVERFLOW;
return 0;
}
@@ -340,6 +323,15 @@ static int of_bus_default_flags_match(struct device_node *np)
return of_property_present(np, "#address-cells") && (of_bus_n_addr_cells(np) == 3);
}
+static int of_bus_default_match(struct device_node *np)
+{
+ /*
+ * Check for presence first since of_bus_n_addr_cells() will warn when
+ * walking parent nodes.
+ */
+ return of_property_present(np, "#address-cells");
+}
+
/*
* Array of bus specific translators
*/
@@ -384,7 +376,7 @@ static const struct of_bus of_busses[] = {
{
.name = "default",
.addresses = "reg",
- .match = NULL,
+ .match = of_bus_default_match,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.translate = of_bus_default_translate,
@@ -399,7 +391,6 @@ static const struct of_bus *of_match_bus(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(of_busses); i++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
- BUG();
return NULL;
}
@@ -521,6 +512,8 @@ static u64 __of_translate_address(struct device_node *node,
if (parent == NULL)
return OF_BAD_ADDR;
bus = of_match_bus(parent);
+ if (!bus)
+ return OF_BAD_ADDR;
/* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
@@ -564,6 +557,8 @@ static u64 __of_translate_address(struct device_node *node,
/* Get new parent bus and counts */
pbus = of_match_bus(parent);
+ if (!pbus)
+ return OF_BAD_ADDR;
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
pr_err("Bad cell count for %pOF\n", dev);
@@ -703,7 +698,7 @@ const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
/* match the parent's bus type */
bus = of_match_bus(parent);
- if (strcmp(bus->name, "pci") && (bar_no >= 0))
+ if (!bus || (strcmp(bus->name, "pci") && (bar_no >= 0)))
return NULL;
/* Get "reg" or "assigned-addresses" property */
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 6f5abea2462a..af6c68bbb427 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -894,10 +894,10 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
/* The path could begin with an alias */
if (*path != '/') {
int len;
- const char *p = separator;
+ const char *p = strchrnul(path, '/');
- if (!p)
- p = strchrnul(path, '/');
+ if (separator && separator < p)
+ p = separator;
len = p - path;
/* of_aliases must not be NULL */
@@ -1027,19 +1027,15 @@ struct device_node *of_find_node_with_property(struct device_node *from,
const char *prop_name)
{
struct device_node *np;
- const struct property *pp;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
for_each_of_allnodes_from(from, np) {
- for (pp = np->properties; pp; pp = pp->next) {
- if (of_prop_cmp(pp->name, prop_name) == 0) {
- of_node_get(np);
- goto out;
- }
+ if (__of_find_property(np, prop_name, NULL)) {
+ of_node_get(np);
+ break;
}
}
-out:
of_node_put(from);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
@@ -1453,8 +1449,8 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
struct device_node *cur, *new = NULL;
const __be32 *map, *mask, *pass;
- static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
- static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(0) };
+ static const __be32 dummy_mask[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(~0) };
+ static const __be32 dummy_pass[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(0) };
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
int i, ret, map_len, match;
@@ -1546,7 +1542,6 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
* specifier into the out_args structure, keeping the
* bits specified in <list>-map-pass-thru.
*/
- match_array = map - new_size;
for (i = 0; i < new_size; i++) {
__be32 val = *(map - new_size + i);
@@ -1555,6 +1550,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
val |= cpu_to_be32(out_args->args[i]) & pass[i];
}
+ initial_match_array[i] = val;
out_args->args[i] = be32_to_cpu(val);
}
out_args->args_count = list_size = new_size;
@@ -1822,8 +1818,7 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np,
* for storing the resulting tree
*
* The function scans all the properties of the 'aliases' node and populates
- * the global lookup table with the properties. It returns the
- * number of alias properties found, or an error code in case of failure.
+ * the global lookup table with the properties.
*/
void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
{
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0121100372b4..6b5516ee670f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) "OF: fdt: " fmt
-#include <linux/acpi.h>
#include <linux/crash_dump.h>
#include <linux/crc32.h>
#include <linux/kernel.h>
@@ -497,6 +496,7 @@ static void __init fdt_reserve_elfcorehdr(void)
void __init early_init_fdt_scan_reserved_mem(void)
{
int n;
+ int res;
u64 base, size;
if (!initial_boot_params)
@@ -507,7 +507,11 @@ void __init early_init_fdt_scan_reserved_mem(void)
/* Process header /memreserve/ fields */
for (n = 0; ; n++) {
- fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
+ res = fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
+ if (res) {
+ pr_err("Invalid memory reservation block index %d\n", n);
+ break;
+ }
if (!size)
break;
memblock_reserve(base, size);
@@ -1215,14 +1219,7 @@ void __init unflatten_device_tree(void)
/* Save the statically-placed regions in the reserved_mem array */
fdt_scan_reserved_mem_reg_nodes();
- /* Don't use the bootloader provided DTB if ACPI is enabled */
- if (!acpi_disabled)
- fdt = NULL;
-
- /*
- * Populate an empty root node when ACPI is enabled or bootloader
- * doesn't provide one.
- */
+ /* Populate an empty root node when bootloader doesn't provide one */
if (!fdt) {
fdt = (void *) __dtb_empty_root_begin;
/* fdt_totalsize() will be used for copy size */
@@ -1264,18 +1261,9 @@ void __init unflatten_and_copy_device_tree(void)
}
#ifdef CONFIG_SYSFS
-static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- memcpy(buf, initial_boot_params + off, count);
- return count;
-}
-
static int __init of_fdt_raw_init(void)
{
- static struct bin_attribute of_fdt_raw_attr =
- __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
+ static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(fdt);
if (!initial_boot_params)
return 0;
@@ -1285,8 +1273,9 @@ static int __init of_fdt_raw_init(void)
pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
return 0;
}
- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
- return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
+ bin_attr_fdt.private = initial_boot_params;
+ bin_attr_fdt.size = fdt_totalsize(initial_boot_params);
+ return sysfs_create_bin_file(firmware_kobj, &bin_attr_fdt);
}
late_initcall(of_fdt_raw_init);
#endif
diff --git a/drivers/of/fdt_address.c b/drivers/of/fdt_address.c
index 9804d7f06705..f358d2c80754 100644
--- a/drivers/of/fdt_address.c
+++ b/drivers/of/fdt_address.c
@@ -17,23 +17,10 @@
#include <linux/of_fdt.h>
#include <linux/sizes.h>
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
- (ns) > 0)
-
-/* Debug utility */
-#ifdef DEBUG
-static void __init of_dump_addr(const char *s, const __be32 *addr, int na)
-{
- pr_debug("%s", s);
- while(na--)
- pr_cont(" %08x", *(addr++));
- pr_cont("\n");
-}
-#else
-static void __init of_dump_addr(const char *s, const __be32 *addr, int na) { }
-#endif
+/* Uncomment me to enable of_dump_addr() debugging output */
+// #define DEBUG
+
+#include "of_private.h"
/* Callbacks for bus specific translators */
struct of_bus {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 98b1cf78ecac..6c843d54ebb1 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -171,7 +171,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
struct device_node *ipar, *tnode, *old = NULL;
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
- const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+ const __be32 *tmp, dummy_imask[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(~0) };
u32 intsize = 1, addrsize;
int i, rc = -EINVAL;
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index cab9b169dc67..aa887166f0d2 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -29,7 +29,7 @@ const struct kobj_type of_node_ktype = {
};
static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct property *pp = container_of(bin_attr, struct property, attr);
@@ -77,7 +77,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
pp->attr.attr.name = safe_name(&np->kobj, pp->name);
pp->attr.attr.mode = secure ? 0400 : 0444;
pp->attr.size = secure ? 0 : pp->length;
- pp->attr.read = of_node_property_read;
+ pp->attr.read_new = of_node_property_read;
rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index ea5a0951ec5e..f3e1193c8ded 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -119,6 +119,8 @@ extern void *__unflatten_device_tree(const void *blob,
void *(*dt_alloc)(u64 size, u64 align),
bool detached);
+void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
+
/**
* General utilities for working with live trees.
*
@@ -188,4 +190,22 @@ void __init fdt_scan_reserved_mem_reg_nodes(void);
bool of_fdt_device_is_available(const void *blob, unsigned long node);
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
+#define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
+
+/* Debug utility */
+#ifdef DEBUG
+static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int na)
+{
+ pr_debug("%s", s);
+ while (na--)
+ pr_cont(" %08x", be32_to_cpu(*(addr++)));
+ pr_cont("\n");
+}
+#else
+static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int na) { }
+#endif
+
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 45517b9e57b1..75e819f66a56 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -52,7 +52,8 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
memblock_phys_free(base, size);
}
- kmemleak_ignore_phys(base);
+ if (!err)
+ kmemleak_ignore_phys(base);
return err;
}
@@ -262,6 +263,11 @@ void __init fdt_scan_reserved_mem_reg_nodes(void)
uname);
continue;
}
+
+ if (len > t_len)
+ pr_warn("%s() ignores %d regions in node '%s'\n",
+ __func__, len / t_len - 1, uname);
+
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
@@ -409,12 +415,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
- if (len != dt_root_addr_cells * sizeof(__be32)) {
+ if (len != dt_root_size_cells * sizeof(__be32)) {
pr_err("invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
- align = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ align = dt_mem_next_cell(dt_root_size_cells, &prop);
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
@@ -435,13 +441,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
return -EINVAL;
}
- base = 0;
-
while (len > 0) {
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
+ base = 0;
ret = __reserved_mem_alloc_in_range(size, align,
start, end, nomap, &base);
if (ret == 0) {
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 7eda43c66c91..cb0cb374b21f 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -19,6 +19,8 @@
#include <linux/of.h>
#include <linux/of_pdt.h>
+#include "of_private.h"
+
static struct of_pdt_ops *of_pdt_prom_ops __initdata;
#if defined(CONFIG_SPARC)
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 9bafcff3e628..c6d8afb284e8 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -24,16 +24,6 @@
#include "of_private.h"
-const struct of_device_id of_default_bus_match_table[] = {
- { .compatible = "simple-bus", },
- { .compatible = "simple-mfd", },
- { .compatible = "isa", },
-#ifdef CONFIG_ARM_AMBA
- { .compatible = "arm,amba-bus", },
-#endif /* CONFIG_ARM_AMBA */
- {} /* Empty terminated list */
-};
-
/**
* of_find_device_by_node - Find the platform_device associated with a node
* @np: Pointer to device tree node
@@ -484,8 +474,17 @@ int of_platform_default_populate(struct device_node *root,
const struct of_dev_auxdata *lookup,
struct device *parent)
{
- return of_platform_populate(root, of_default_bus_match_table, lookup,
- parent);
+ static const struct of_device_id match_table[] = {
+ { .compatible = "simple-bus", },
+ { .compatible = "simple-mfd", },
+ { .compatible = "isa", },
+#ifdef CONFIG_ARM_AMBA
+ { .compatible = "arm,amba-bus", },
+#endif /* CONFIG_ARM_AMBA */
+ {} /* Empty terminated list */
+ };
+
+ return of_platform_populate(root, match_table, lookup, parent);
}
EXPORT_SYMBOL_GPL(of_platform_default_populate);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index cfc8aea002e4..208d922cc24c 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -32,6 +32,32 @@
#include "of_private.h"
/**
+ * of_property_read_bool - Find a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a boolean property in a device node. Usage on non-boolean
+ * property types is deprecated.
+ *
+ * Return: true if the property exists false otherwise.
+ */
+bool of_property_read_bool(const struct device_node *np, const char *propname)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ /*
+ * Boolean properties should not have a value. Testing for property
+ * presence should either use of_property_present() or just read the
+ * property value and check the returned error code.
+ */
+ if (prop && prop->length)
+ pr_warn("%pOF: Read of boolean property '%s' with a value.\n", np, propname);
+
+ return prop ? true : false;
+}
+EXPORT_SYMBOL(of_property_read_bool);
+
+/**
* of_graph_is_present() - check graph's presence
* @node: pointer to device_node containing graph port
*
@@ -966,6 +992,12 @@ of_fwnode_device_get_dma_attr(const struct fwnode_handle *fwnode)
static bool of_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
+ return of_property_present(to_of_node(fwnode), propname);
+}
+
+static bool of_fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
return of_property_read_bool(to_of_node(fwnode), propname);
}
@@ -1390,9 +1422,9 @@ static struct device_node *parse_interrupt_map(struct device_node *np,
addrcells = of_bus_n_addr_cells(np);
imap = of_get_property(np, "interrupt-map", &imaplen);
- imaplen /= sizeof(*imap);
if (!imap)
return NULL;
+ imaplen /= sizeof(*imap);
imap_end = imap + imaplen;
@@ -1560,6 +1592,7 @@ const struct fwnode_operations of_fwnode_ops = {
.device_dma_supported = of_fwnode_device_dma_supported,
.device_get_dma_attr = of_fwnode_device_get_dma_attr,
.property_present = of_fwnode_property_present,
+ .property_read_bool = of_fwnode_property_read_bool,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
.get_name = of_fwnode_get_name,
diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
index fa39611071b3..cd310b26b50c 100644
--- a/drivers/of/unittest-data/tests-platform.dtsi
+++ b/drivers/of/unittest-data/tests-platform.dtsi
@@ -34,5 +34,18 @@
};
};
};
+
+ platform-tests-2 {
+ // No #address-cells or #size-cells
+ node {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ test-device@100 {
+ compatible = "test-sub-device";
+ reg = <0x100 1>;
+ };
+ };
+ };
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 438fd70fa995..bc3426480f3a 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -161,6 +161,15 @@ static void __init of_unittest_find_node_by_name(void)
"option alias path test, subcase #1 failed\n");
of_node_put(np);
+ np = of_find_node_opts_by_path("testcase-alias/phandle-tests/consumer-a:testaliasoption",
+ &options);
+ name = kasprintf(GFP_KERNEL, "%pOF", np);
+ unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name) &&
+ !strcmp("testaliasoption", options),
+ "option alias path test, subcase #2 failed\n");
+ of_node_put(np);
+ kfree(name);
+
np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
unittest(np, "NULL option alias path test failed\n");
of_node_put(np);
@@ -1380,6 +1389,7 @@ static void __init of_unittest_bus_3cell_ranges(void)
static void __init of_unittest_reg(void)
{
struct device_node *np;
+ struct resource res;
int ret;
u64 addr, size;
@@ -1396,6 +1406,19 @@ static void __init of_unittest_reg(void)
np, addr);
of_node_put(np);
+
+ np = of_find_node_by_path("/testcase-data/platform-tests-2/node/test-device@100");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ unittest(ret == -EINVAL, "of_address_to_resource(%pOF) expected error on untranslatable address\n",
+ np);
+
+ of_node_put(np);
+
}
struct of_unittest_expected_res {
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 0311b18319a4..73e9a3b2f29b 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -101,11 +101,55 @@ struct opp_table *_find_opp_table(struct device *dev)
* representation in the OPP table and manage the clock configuration themselves
* in an platform specific way.
*/
-static bool assert_single_clk(struct opp_table *opp_table)
+static bool assert_single_clk(struct opp_table *opp_table,
+ unsigned int __always_unused index)
{
return !WARN_ON(opp_table->clk_count > 1);
}
+/*
+ * Returns true if clock table is large enough to contain the clock index.
+ */
+static bool assert_clk_index(struct opp_table *opp_table,
+ unsigned int index)
+{
+ return opp_table->clk_count > index;
+}
+
+/*
+ * Returns true if bandwidth table is large enough to contain the bandwidth index.
+ */
+static bool assert_bandwidth_index(struct opp_table *opp_table,
+ unsigned int index)
+{
+ return opp_table->path_count > index;
+}
+
+/**
+ * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp
+ * @opp: opp for which bandwidth has to be returned for
+ * @peak: select peak or average bandwidth
+ * @index: bandwidth index
+ *
+ * Return: bandwidth in kBps, else return 0
+ */
+unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
+{
+ if (IS_ERR_OR_NULL(opp)) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ if (index >= opp->opp_table->path_count)
+ return 0;
+
+ if (!opp->bandwidth)
+ return 0;
+
+ return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw);
+
/**
* dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
@@ -499,12 +543,12 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
unsigned long opp_key, unsigned long key),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
/* Assert that the requirement is met */
- if (assert && !assert(opp_table))
+ if (assert && !assert(opp_table, index))
return ERR_PTR(-EINVAL);
mutex_lock(&opp_table->lock);
@@ -532,7 +576,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
unsigned long opp_key, unsigned long key),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
struct opp_table *opp_table;
struct dev_pm_opp *opp;
@@ -555,7 +599,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
static struct dev_pm_opp *_find_key_exact(struct device *dev,
unsigned long key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
/*
* The value of key will be updated here, but will be ignored as the
@@ -568,7 +612,7 @@ static struct dev_pm_opp *_find_key_exact(struct device *dev,
static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
return _opp_table_find_key(opp_table, key, index, available, read,
_compare_ceil, assert);
@@ -577,7 +621,7 @@ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
return _find_key(dev, key, index, available, read, _compare_ceil,
assert);
@@ -586,7 +630,7 @@ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
static struct dev_pm_opp *_find_key_floor(struct device *dev,
unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
return _find_key(dev, key, index, available, read, _compare_floor,
assert);
@@ -647,7 +691,8 @@ struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
u32 index, bool available)
{
- return _find_key_exact(dev, freq, index, available, _read_freq, NULL);
+ return _find_key_exact(dev, freq, index, available, _read_freq,
+ assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
@@ -707,7 +752,8 @@ struct dev_pm_opp *
dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
u32 index)
{
- return _find_key_ceil(dev, freq, index, true, _read_freq, NULL);
+ return _find_key_ceil(dev, freq, index, true, _read_freq,
+ assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
@@ -760,7 +806,7 @@ struct dev_pm_opp *
dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
u32 index)
{
- return _find_key_floor(dev, freq, index, true, _read_freq, NULL);
+ return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
@@ -878,7 +924,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
unsigned long temp = *bw;
struct dev_pm_opp *opp;
- opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
+ opp = _find_key_ceil(dev, &temp, index, true, _read_bw,
+ assert_bandwidth_index);
*bw = temp;
return opp;
}
@@ -909,7 +956,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned long temp = *bw;
struct dev_pm_opp *opp;
- opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
+ opp = _find_key_floor(dev, &temp, index, true, _read_bw,
+ assert_bandwidth_index);
*bw = temp;
return opp;
}
@@ -1480,11 +1528,6 @@ err:
return ERR_PTR(ret);
}
-void _get_opp_table_kref(struct opp_table *opp_table)
-{
- kref_get(&opp_table->kref);
-}
-
static struct opp_table *_update_opp_table_clk(struct device *dev,
struct opp_table *opp_table,
bool getclk)
@@ -1645,6 +1688,17 @@ static void _opp_table_kref_release(struct kref *kref)
kfree(opp_table);
}
+void _get_opp_table_kref(struct opp_table *opp_table)
+{
+ kref_get(&opp_table->kref);
+}
+
+void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
+{
+ _get_opp_table_kref(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref);
+
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
@@ -1679,6 +1733,7 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
{
kref_get(&opp->kref);
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get);
void dev_pm_opp_put(struct dev_pm_opp *opp)
{
@@ -1702,7 +1757,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
if (IS_ERR(opp_table))
return;
- if (!assert_single_clk(opp_table))
+ if (!assert_single_clk(opp_table, 0))
goto put_table;
mutex_lock(&opp_table->lock);
@@ -2054,7 +2109,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
unsigned long tol, u_volt = data->u_volt;
int ret;
- if (!assert_single_clk(opp_table))
+ if (!assert_single_clk(opp_table, 0))
return -EINVAL;
new_opp = _opp_allocate(opp_table);
@@ -2810,7 +2865,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
return r;
}
- if (!assert_single_clk(opp_table)) {
+ if (!assert_single_clk(opp_table, 0)) {
r = -EINVAL;
goto put_table;
}
@@ -2886,7 +2941,7 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
return r;
}
- if (!assert_single_clk(opp_table)) {
+ if (!assert_single_clk(opp_table, 0)) {
r = -EINVAL;
goto put_table;
}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index fd5ed2858258..a24f76f5fd01 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -926,7 +926,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
ret = _of_opp_alloc_required_opps(opp_table, new_opp);
if (ret)
- goto free_opp;
+ goto put_node;
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
@@ -976,6 +976,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
free_required_opps:
_of_opp_free_required_opps(opp_table, new_opp);
+put_node:
+ of_node_put(np);
free_opp:
_opp_free(new_opp);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 430651e7424a..5c7c81190e41 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -250,7 +250,6 @@ struct opp_table {
};
/* Routines internal to opp core */
-void dev_pm_opp_get(struct dev_pm_opp *opp);
bool _opp_remove_all_static(struct opp_table *opp_table);
void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 6a830166d37f..ace736b025b1 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -300,7 +300,7 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->elbi_base))
return PTR_ERR(ep->elbi_base);
- ret = devm_clk_bulk_get_all_enable(dev, &ep->clks);
+ ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index cdd5be16021d..6084b38bdda1 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2053,6 +2053,7 @@ static struct irq_chip hv_msi_irq_chip = {
.irq_set_affinity = irq_chip_set_affinity_parent,
#ifdef CONFIG_X86
.irq_ack = irq_chip_ack_parent,
+ .flags = IRQCHIP_MOVE_DEFERRED,
#elif defined(CONFIG_ARM64)
.irq_eoi = irq_chip_eoi_parent,
#endif
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 35270172c833..f57ea36d125d 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1670,6 +1670,19 @@ static void pci_dma_cleanup(struct device *dev)
iommu_device_unuse_default_domain(dev);
}
+/*
+ * pci_device_irq_get_affinity - get IRQ affinity mask for device
+ * @dev: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @dev and @irq_vec.
+ */
+static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
+ unsigned int irq_vec)
+{
+ return pci_irq_get_affinity(to_pci_dev(dev), irq_vec);
+}
+
const struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
@@ -1677,6 +1690,7 @@ const struct bus_type pci_bus_type = {
.probe = pci_device_probe,
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
+ .irq_get_affinity = pci_device_irq_get_affinity,
.dev_groups = pci_dev_groups,
.bus_groups = pci_bus_groups,
.drv_groups = pci_drv_groups,
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index b59cacc740fa..0a5e7efbce2c 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -303,14 +303,17 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
if (ret)
return ret;
- ret = devm_request_irq(&srv->device, srv->irq, pcie_bwnotif_irq,
- IRQF_SHARED, "PCIe bwctrl", srv);
- if (ret)
- return ret;
-
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- port->link_bwctrl = no_free_ptr(data);
+ port->link_bwctrl = data;
+
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
+ }
+
pcie_bwnotif_enable(srv);
}
}
@@ -331,11 +334,15 @@ static void pcie_bwnotif_remove(struct pcie_device *srv)
pcie_cooling_device_unregister(data->cdev);
- pcie_bwnotif_disable(srv->port);
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
+ pcie_bwnotif_disable(srv->port);
+
+ free_irq(srv->irq, srv);
- scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem)
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem)
srv->port->link_bwctrl = NULL;
+ }
+ }
}
static int pcie_bwnotif_suspend(struct pcie_device *srv)
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index 1d4d01e1275e..06fd317529fc 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -168,6 +168,8 @@ static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE,
[PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_ALL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_BRANCH,
+ [PERF_COUNT_HW_BRANCH_MISSES] = M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC,
};
/* sysfs definitions */
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index b20fa600e510..ef959e66db7c 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1713,8 +1713,8 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
goto done;
}
- for (i = 0; i < CMN_MAX_DTCS; i++)
- if (val->dtc_count[i] == CMN_DT_NUM_COUNTERS)
+ for_each_hw_dtc_idx(hw, dtc, idx)
+ if (val->dtc_count[dtc] == CMN_DT_NUM_COUNTERS)
goto done;
for_each_hw_dn(hw, dn, i) {
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index d0ef611240aa..8116c7846a46 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -54,65 +54,24 @@ static struct attribute *scf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(scf_cache_wb, 0xF3),
NV_CSPMU_EVENT_ATTR_4(socket, rd_data, 0x101),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_rsp, 0x105),
NV_CSPMU_EVENT_ATTR_4(socket, wb_data, 0x109),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_rsp, 0x10d),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_data, 0x111),
NV_CSPMU_EVENT_ATTR_4(socket, rd_outstanding, 0x115),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_outstanding, 0x119),
- NV_CSPMU_EVENT_ATTR_4(socket, wb_outstanding, 0x11d),
- NV_CSPMU_EVENT_ATTR_4(socket, wr_outstanding, 0x121),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_outstanding, 0x125),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_outstanding, 0x129),
NV_CSPMU_EVENT_ATTR_4(socket, rd_access, 0x12d),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_access, 0x131),
NV_CSPMU_EVENT_ATTR_4(socket, wb_access, 0x135),
NV_CSPMU_EVENT_ATTR_4(socket, wr_access, 0x139),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_access, 0x13d),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_access, 0x141),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_data, 0x145),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_access, 0x149),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_access, 0x14d),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_outstanding, 0x151),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_outstanding, 0x155),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_data, 0x159),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_access, 0x15d),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_access, 0x161),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_outstanding, 0x165),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_outstanding, 0x169),
ARM_CSPMU_EVENT_ATTR(gmem_rd_data, 0x16d),
ARM_CSPMU_EVENT_ATTR(gmem_rd_access, 0x16e),
ARM_CSPMU_EVENT_ATTR(gmem_rd_outstanding, 0x16f),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_rsp, 0x170),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_access, 0x171),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_outstanding, 0x172),
ARM_CSPMU_EVENT_ATTR(gmem_wb_data, 0x173),
ARM_CSPMU_EVENT_ATTR(gmem_wb_access, 0x174),
- ARM_CSPMU_EVENT_ATTR(gmem_wb_outstanding, 0x175),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_rsp, 0x176),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_access, 0x177),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_outstanding, 0x178),
ARM_CSPMU_EVENT_ATTR(gmem_wr_data, 0x179),
- ARM_CSPMU_EVENT_ATTR(gmem_wr_outstanding, 0x17a),
ARM_CSPMU_EVENT_ATTR(gmem_wr_access, 0x17b),
NV_CSPMU_EVENT_ATTR_4(socket, wr_data, 0x17c),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_data, 0x180),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_data, 0x184),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_access, 0x188),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_outstanding, 0x18c),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_data, 0x190),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_data, 0x194),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_access, 0x198),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_outstanding, 0x19c),
-
ARM_CSPMU_EVENT_ATTR(gmem_wr_total_bytes, 0x1a0),
ARM_CSPMU_EVENT_ATTR(remote_socket_wr_total_bytes, 0x1a1),
ARM_CSPMU_EVENT_ATTR(remote_socket_rd_data, 0x1a2),
@@ -122,35 +81,12 @@ static struct attribute *scf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(cmem_rd_data, 0x1a5),
ARM_CSPMU_EVENT_ATTR(cmem_rd_access, 0x1a6),
ARM_CSPMU_EVENT_ATTR(cmem_rd_outstanding, 0x1a7),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_rsp, 0x1a8),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_access, 0x1a9),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_outstanding, 0x1aa),
ARM_CSPMU_EVENT_ATTR(cmem_wb_data, 0x1ab),
ARM_CSPMU_EVENT_ATTR(cmem_wb_access, 0x1ac),
- ARM_CSPMU_EVENT_ATTR(cmem_wb_outstanding, 0x1ad),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_rsp, 0x1ae),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_access, 0x1af),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_outstanding, 0x1b0),
ARM_CSPMU_EVENT_ATTR(cmem_wr_data, 0x1b1),
- ARM_CSPMU_EVENT_ATTR(cmem_wr_outstanding, 0x1b2),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_data, 0x1b3),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_access, 0x1b7),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_access, 0x1bb),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_outstanding, 0x1bf),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_outstanding, 0x1c3),
-
- ARM_CSPMU_EVENT_ATTR(ocu_prb_access, 0x1c7),
- ARM_CSPMU_EVENT_ATTR(ocu_prb_data, 0x1c8),
- ARM_CSPMU_EVENT_ATTR(ocu_prb_outstanding, 0x1c9),
ARM_CSPMU_EVENT_ATTR(cmem_wr_access, 0x1ca),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_access, 0x1cb),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_data, 0x1cf),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_data, 0x1d3),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_outstanding, 0x1d7),
-
ARM_CSPMU_EVENT_ATTR(cmem_wr_total_bytes, 0x1db),
ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
@@ -194,6 +130,7 @@ static struct attribute *pcie_pmu_format_attrs[] = {
static struct attribute *nvlink_c2c_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
+ ARM_CSPMU_FORMAT_ATTR(port, "config1:0-1"),
NULL,
};
@@ -238,10 +175,12 @@ static u32 nv_cspmu_event_filter(const struct perf_event *event)
const struct nv_cspmu_ctx *ctx =
to_nv_cspmu_ctx(to_arm_cspmu(event->pmu));
- if (ctx->filter_mask == 0)
+ const u32 filter_val = event->attr.config1 & ctx->filter_mask;
+
+ if (filter_val == 0)
return ctx->filter_default_val;
- return event->attr.config1 & ctx->filter_mask;
+ return filter_val;
}
enum nv_cspmu_name_fmt {
@@ -274,7 +213,7 @@ static const struct nv_cspmu_match nv_cspmu_match[] = {
{
.prodid = 0x104,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = 0x0,
+ .filter_mask = NV_NVL_C2C_FILTER_ID_MASK,
.filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c1_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
@@ -284,7 +223,7 @@ static const struct nv_cspmu_match nv_cspmu_match[] = {
{
.prodid = 0x105,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = 0x0,
+ .filter_mask = NV_NVL_C2C_FILTER_ID_MASK,
.filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c0_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index fd5b78732603..f5e6878db9d6 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -85,6 +85,7 @@ struct arm_spe_pmu {
#define SPE_PMU_FEAT_LDS (1UL << 4)
#define SPE_PMU_FEAT_ERND (1UL << 5)
#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
+#define SPE_PMU_FEAT_DISCARD (1UL << 7)
#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
u64 features;
@@ -193,6 +194,9 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
#define ATTR_CFG_FLD_store_filter_LO 34
#define ATTR_CFG_FLD_store_filter_HI 34
+#define ATTR_CFG_FLD_discard_CFG config /* PMBLIMITR_EL1.FM = DISCARD */
+#define ATTR_CFG_FLD_discard_LO 35
+#define ATTR_CFG_FLD_discard_HI 35
#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
#define ATTR_CFG_FLD_event_filter_LO 0
@@ -216,6 +220,7 @@ GEN_PMU_FORMAT_ATTR(store_filter);
GEN_PMU_FORMAT_ATTR(event_filter);
GEN_PMU_FORMAT_ATTR(inv_event_filter);
GEN_PMU_FORMAT_ATTR(min_latency);
+GEN_PMU_FORMAT_ATTR(discard);
static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_ts_enable.attr,
@@ -228,6 +233,7 @@ static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_event_filter.attr,
&format_attr_inv_event_filter.attr,
&format_attr_min_latency.attr,
+ &format_attr_discard.attr,
NULL,
};
@@ -238,6 +244,9 @@ static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
+ if (attr == &format_attr_discard.attr && !(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
+ return 0;
+
if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
return 0;
@@ -502,6 +511,12 @@ static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
u64 base, limit;
struct arm_spe_pmu_buf *buf;
+ if (ATTR_CFG_GET_FLD(&event->attr, discard)) {
+ limit = FIELD_PREP(PMBLIMITR_EL1_FM, PMBLIMITR_EL1_FM_DISCARD);
+ limit |= PMBLIMITR_EL1_E;
+ goto out_write_limit;
+ }
+
/* Start a new aux session */
buf = perf_aux_output_begin(handle, event);
if (!buf) {
@@ -743,6 +758,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
+ if (ATTR_CFG_GET_FLD(&event->attr, discard) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
+ return -EOPNOTSUPP;
+
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
@@ -1027,6 +1046,9 @@ static void __arm_spe_pmu_dev_probe(void *info)
if (FIELD_GET(PMSIDR_EL1_ERND, reg))
spe_pmu->features |= SPE_PMU_FEAT_ERND;
+ if (spe_pmu->pmsver >= ID_AA64DFR0_EL1_PMSVer_V1P2)
+ spe_pmu->features |= SPE_PMU_FEAT_DISCARD;
+
/* This field has a spaced out encoding, so just use a look-up */
fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
switch (fld) {
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index 9cbea9675e21..cccecae9823f 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -20,7 +20,6 @@
#include <linux/sysfs.h>
#include <linux/types.h>
-#define DWC_PCIE_VSEC_RAS_DES_ID 0x02
#define DWC_PCIE_EVENT_CNT_CTL 0x8
/*
@@ -100,14 +99,23 @@ struct dwc_pcie_dev_info {
struct list_head dev_node;
};
-struct dwc_pcie_vendor_id {
- int vendor_id;
+struct dwc_pcie_pmu_vsec_id {
+ u16 vendor_id;
+ u16 vsec_id;
+ u8 vsec_rev;
};
-static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = {
- {.vendor_id = PCI_VENDOR_ID_ALIBABA },
- {.vendor_id = PCI_VENDOR_ID_AMPERE },
- {.vendor_id = PCI_VENDOR_ID_QCOM },
+/*
+ * VSEC IDs are allocated by the vendor, so a given ID may mean different
+ * things to different vendors. See PCIe r6.0, sec 7.9.5.2.
+ */
+static const struct dwc_pcie_pmu_vsec_id dwc_pcie_pmu_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_ALIBABA,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_AMPERE,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
{} /* terminator */
};
@@ -199,8 +207,8 @@ static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05),
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06),
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07),
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08),
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x08),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x09),
/* Group #1 */
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload, 0x20),
@@ -519,31 +527,28 @@ static void dwc_pcie_unregister_pmu(void *data)
perf_pmu_unregister(&pcie_pmu->pmu);
}
-static bool dwc_pcie_match_des_cap(struct pci_dev *pdev)
+static u16 dwc_pcie_des_cap(struct pci_dev *pdev)
{
- const struct dwc_pcie_vendor_id *vid;
- u16 vsec = 0;
+ const struct dwc_pcie_pmu_vsec_id *vid;
+ u16 vsec;
u32 val;
if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
- return false;
+ return 0;
- for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) {
+ for (vid = dwc_pcie_pmu_vsec_ids; vid->vendor_id; vid++) {
vsec = pci_find_vsec_capability(pdev, vid->vendor_id,
- DWC_PCIE_VSEC_RAS_DES_ID);
- if (vsec)
- break;
+ vid->vsec_id);
+ if (vsec) {
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER,
+ &val);
+ if (PCI_VNDR_HEADER_REV(val) == vid->vsec_rev) {
+ pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
+ return vsec;
+ }
+ }
}
- if (!vsec)
- return false;
-
- pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
- if (PCI_VNDR_HEADER_REV(val) != 0x04)
- return false;
-
- pci_dbg(pdev,
- "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
- return true;
+ return 0;
}
static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info)
@@ -587,7 +592,7 @@ static int dwc_pcie_pmu_notifier(struct notifier_block *nb,
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
- if (!dwc_pcie_match_des_cap(pdev))
+ if (!dwc_pcie_des_cap(pdev))
return NOTIFY_DONE;
if (dwc_pcie_register_dev(pdev))
return NOTIFY_BAD;
@@ -612,13 +617,14 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
struct pci_dev *pdev = plat_dev->dev.platform_data;
struct dwc_pcie_pmu *pcie_pmu;
char *name;
- u32 sbdf, val;
+ u32 sbdf;
u16 vsec;
int ret;
- vsec = pci_find_vsec_capability(pdev, pdev->vendor,
- DWC_PCIE_VSEC_RAS_DES_ID);
- pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ vsec = dwc_pcie_des_cap(pdev);
+ if (!vsec)
+ return -ENODEV;
+
sbdf = plat_dev->id;
name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf);
if (!name)
@@ -730,7 +736,7 @@ static int __init dwc_pcie_pmu_init(void)
int ret;
for_each_pci_dev(pdev) {
- if (!dwc_pcie_match_des_cap(pdev))
+ if (!dwc_pcie_des_cap(pdev))
continue;
ret = dwc_pcie_register_dev(pdev);
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 3c856d9a4e97..843f163e6c33 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -63,8 +63,21 @@
static DEFINE_IDA(ddr_ida);
+/*
+ * V1 support 1 read transaction, 1 write transaction and 1 read beats
+ * event which corresponding respecitively to counter 2, 3 and 4.
+ */
+#define DDR_PERF_AXI_FILTER_V1 0x1
+
+/*
+ * V2 support 1 read beats and 3 write beats events which corresponding
+ * respecitively to counter 2-5.
+ */
+#define DDR_PERF_AXI_FILTER_V2 0x2
+
struct imx_ddr_devtype_data {
const char *identifier; /* system PMU identifier for userspace */
+ unsigned int filter_ver; /* AXI filter version */
};
struct ddr_pmu {
@@ -83,24 +96,27 @@ struct ddr_pmu {
static const struct imx_ddr_devtype_data imx91_devtype_data = {
.identifier = "imx91",
+ .filter_ver = DDR_PERF_AXI_FILTER_V1
};
static const struct imx_ddr_devtype_data imx93_devtype_data = {
.identifier = "imx93",
+ .filter_ver = DDR_PERF_AXI_FILTER_V1
};
static const struct imx_ddr_devtype_data imx95_devtype_data = {
.identifier = "imx95",
+ .filter_ver = DDR_PERF_AXI_FILTER_V2
};
-static inline bool is_imx93(struct ddr_pmu *pmu)
+static inline bool axi_filter_v1(struct ddr_pmu *pmu)
{
- return pmu->devtype_data == &imx93_devtype_data;
+ return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V1;
}
-static inline bool is_imx95(struct ddr_pmu *pmu)
+static inline bool axi_filter_v2(struct ddr_pmu *pmu)
{
- return pmu->devtype_data == &imx95_devtype_data;
+ return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V2;
}
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
@@ -155,7 +171,7 @@ static const struct attribute_group ddr_perf_cpumask_attr_group = {
struct imx9_pmu_events_attr {
struct device_attribute attr;
u64 id;
- const void *devtype_data;
+ const struct imx_ddr_devtype_data *devtype_data;
};
static ssize_t ddr_pmu_event_show(struct device *dev,
@@ -307,7 +323,8 @@ ddr_perf_events_attrs_is_visible(struct kobject *kobj,
if (!eattr->devtype_data)
return attr->mode;
- if (eattr->devtype_data != ddr_pmu->devtype_data)
+ if (eattr->devtype_data != ddr_pmu->devtype_data &&
+ eattr->devtype_data->filter_ver != ddr_pmu->devtype_data->filter_ver)
return 0;
return attr->mode;
@@ -624,11 +641,11 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
- if (is_imx93(pmu))
+ if (axi_filter_v1(pmu))
/* read trans, write trans, read beat */
imx93_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
- if (is_imx95(pmu))
+ if (axi_filter_v2(pmu))
/* write beat, read beat2, read beat1, read beat */
imx95_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
index 3f3fb1de11f5..b879b81adfdd 100644
--- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -180,20 +180,18 @@ MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match);
static int hisi_cpa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *cpa_pmu)
{
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &cpa_pmu->sicl_id)) {
+ hisi_uncore_pmu_init_topology(cpa_pmu, &pdev->dev);
+
+ if (cpa_pmu->topo.sicl_id < 0) {
dev_err(&pdev->dev, "Can not read sicl-id\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &cpa_pmu->index_id)) {
+ if (cpa_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id\n");
return -EINVAL;
}
- cpa_pmu->ccl_id = -1;
- cpa_pmu->sccl_id = -1;
cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cpa_pmu->base))
return PTR_ERR(cpa_pmu->base);
@@ -227,34 +225,11 @@ static const struct attribute_group hisi_cpa_pmu_events_group = {
.attrs = hisi_cpa_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = {
- .attrs = hisi_cpa_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_cpa_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_cpa_pmu_identifier_attrs[] = {
- &hisi_cpa_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_cpa_pmu_identifier_group = {
- .attrs = hisi_cpa_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = {
&hisi_cpa_pmu_format_group,
&hisi_cpa_pmu_events_group,
- &hisi_cpa_pmu_cpumask_attr_group,
- &hisi_cpa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -311,8 +286,8 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u",
- cpa_pmu->sicl_id, cpa_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%d",
+ cpa_pmu->topo.sicl_id, cpa_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -389,6 +364,7 @@ static void __exit hisi_cpa_pmu_module_exit(void)
}
module_exit(hisi_cpa_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index a6ebf2ec99d3..7e490f8868f2 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -111,14 +111,14 @@ static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
* so there is no need to write event type, while it is programmable counter in
* PMU v2.
*/
-static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *ddrc_pmu, int idx,
u32 type)
{
u32 offset;
- if (hha_pmu->identifier >= HISI_PMU_V2) {
+ if (ddrc_pmu->identifier >= HISI_PMU_V2) {
offset = DDRC_V2_EVENT_TYPE + 4 * idx;
- writel(type, hha_pmu->base + offset);
+ writel(type, ddrc_pmu->base + offset);
}
}
@@ -297,23 +297,22 @@ MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *ddrc_pmu)
{
+ hisi_uncore_pmu_init_topology(ddrc_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and DDRC channel ID to identify the
* DDRC PMU, while SCCL_ID is in MPIDR[aff2].
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
- &ddrc_pmu->index_id)) {
+ &ddrc_pmu->topo.index_id)) {
dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &ddrc_pmu->sccl_id)) {
+ if (ddrc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
return -EINVAL;
}
- /* DDRC PMUs only share the same SCCL */
- ddrc_pmu->ccl_id = -1;
ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddrc_pmu->base)) {
@@ -323,8 +322,7 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
if (ddrc_pmu->identifier >= HISI_PMU_V2) {
- if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
- &ddrc_pmu->sub_id)) {
+ if (ddrc_pmu->topo.sub_id < 0) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
@@ -382,42 +380,19 @@ static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
.attrs = hisi_ddrc_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
- .attrs = hisi_ddrc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_ddrc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
- &hisi_ddrc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
- .attrs = hisi_ddrc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
&hisi_ddrc_pmu_v1_format_group,
&hisi_ddrc_pmu_v1_events_group,
- &hisi_ddrc_pmu_cpumask_attr_group,
- &hisi_ddrc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
&hisi_ddrc_pmu_v2_format_group,
&hisi_ddrc_pmu_v2_events_group,
- &hisi_ddrc_pmu_cpumask_attr_group,
- &hisi_ddrc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -501,13 +476,13 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
if (ddrc_pmu->identifier >= HISI_PMU_V2)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "hisi_sccl%u_ddrc%u_%u",
- ddrc_pmu->sccl_id, ddrc_pmu->index_id,
- ddrc_pmu->sub_id);
+ "hisi_sccl%d_ddrc%d_%d",
+ ddrc_pmu->topo.sccl_id, ddrc_pmu->topo.index_id,
+ ddrc_pmu->topo.sub_id);
else
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
- ddrc_pmu->index_id);
+ "hisi_sccl%d_ddrc%d", ddrc_pmu->topo.sccl_id,
+ ddrc_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -575,10 +550,10 @@ static void __exit hisi_ddrc_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_ddrc_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
-
}
module_exit(hisi_ddrc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 32624872596f..ca609db86046 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -295,12 +295,13 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
unsigned long long id;
acpi_status status;
+ hisi_uncore_pmu_init_topology(hha_pmu, &pdev->dev);
+
/*
* Use SCCL_ID and UID to identify the HHA PMU, while
* SCCL_ID is in MPIDR[aff2].
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &hha_pmu->sccl_id)) {
+ if (hha_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
return -EINVAL;
}
@@ -309,8 +310,7 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
* Early versions of BIOS support _UID by mistake, so we support
* both "hisilicon, idx-id" as preference, if available.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &hha_pmu->index_id)) {
+ if (hha_pmu->topo.index_id < 0) {
status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
"_UID", NULL, &id);
if (ACPI_FAILURE(status)) {
@@ -318,10 +318,8 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
return -EINVAL;
}
- hha_pmu->index_id = id;
+ hha_pmu->topo.index_id = id;
}
- /* HHA PMUs only share the same SCCL */
- hha_pmu->ccl_id = -1;
hha_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hha_pmu->base)) {
@@ -407,42 +405,19 @@ static const struct attribute_group hisi_hha_pmu_v2_events_group = {
.attrs = hisi_hha_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
- .attrs = hisi_hha_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_hha_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_hha_pmu_identifier_attrs[] = {
- &hisi_hha_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_hha_pmu_identifier_group = {
- .attrs = hisi_hha_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = {
&hisi_hha_pmu_v1_format_group,
&hisi_hha_pmu_v1_events_group,
- &hisi_hha_pmu_cpumask_attr_group,
- &hisi_hha_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = {
&hisi_hha_pmu_v2_format_group,
&hisi_hha_pmu_v2_events_group,
- &hisi_hha_pmu_cpumask_attr_group,
- &hisi_hha_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -510,8 +485,8 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
- hha_pmu->sccl_id, hha_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_hha%d",
+ hha_pmu->topo.sccl_id, hha_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -581,6 +556,7 @@ static void __exit hisi_hha_pmu_module_exit(void)
}
module_exit(hisi_hha_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index c235b46ce873..412fc3a97963 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -355,18 +355,18 @@ MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
+ hisi_uncore_pmu_init_topology(l3c_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
* SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &l3c_pmu->sccl_id)) {
+ if (l3c_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read l3c sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
- &l3c_pmu->ccl_id)) {
+ if (l3c_pmu->topo.ccl_id < 0) {
dev_err(&pdev->dev, "Can not read l3c ccl-id!\n");
return -EINVAL;
}
@@ -441,42 +441,19 @@ static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
.attrs = hisi_l3c_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
- .attrs = hisi_l3c_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_l3c_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_l3c_pmu_identifier_attrs[] = {
- &hisi_l3c_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_l3c_pmu_identifier_group = {
- .attrs = hisi_l3c_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
&hisi_l3c_pmu_v1_format_group,
&hisi_l3c_pmu_v1_events_group,
- &hisi_l3c_pmu_cpumask_attr_group,
- &hisi_l3c_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
&hisi_l3c_pmu_v2_format_group,
&hisi_l3c_pmu_v2_events_group,
- &hisi_l3c_pmu_cpumask_attr_group,
- &hisi_l3c_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -544,8 +521,8 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
- l3c_pmu->sccl_id, l3c_pmu->ccl_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d",
+ l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id);
if (!name)
return -ENOMEM;
@@ -615,6 +592,7 @@ static void __exit hisi_l3c_pmu_module_exit(void)
}
module_exit(hisi_l3c_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index c0f5d7c73e06..a0142684e379 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -269,25 +269,22 @@ static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
static int hisi_pa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *pa_pmu)
{
+ hisi_uncore_pmu_init_topology(pa_pmu, &pdev->dev);
+
/*
* As PA PMU is in a SICL, use the SICL_ID and the index ID
* to identify the PA PMU.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &pa_pmu->sicl_id)) {
+ if (pa_pmu->topo.sicl_id < 0) {
dev_err(&pdev->dev, "Cannot read sicl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &pa_pmu->index_id)) {
+ if (pa_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
- pa_pmu->ccl_id = -1;
- pa_pmu->sccl_id = -1;
-
pa_pmu->dev_info = device_get_match_data(&pdev->dev);
if (!pa_pmu->dev_info)
return -ENODEV;
@@ -356,29 +353,6 @@ static const struct attribute_group hisi_h60pa_pmu_events_group = {
.attrs = hisi_h60pa_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = {
- .attrs = hisi_pa_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_pa_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
- &hisi_pa_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_pa_pmu_identifier_group = {
- .attrs = hisi_pa_pmu_identifier_attrs,
-};
-
static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
.mask_offset = PA_INT_MASK,
.clear_offset = PA_INT_CLEAR,
@@ -388,8 +362,8 @@ static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v2_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -402,8 +376,8 @@ static const struct hisi_pmu_dev_info hisi_h32pa_v2 = {
static const struct attribute_group *hisi_pa_pmu_v3_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v3_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -422,8 +396,8 @@ static struct hisi_pa_pmu_int_regs hisi_h60pa_pmu_regs = {
static const struct attribute_group *hisi_h60pa_pmu_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_h60pa_pmu_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -488,9 +462,9 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%u",
- pa_pmu->sicl_id, pa_pmu->dev_info->name,
- pa_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%d",
+ pa_pmu->topo.sicl_id, pa_pmu->dev_info->name,
+ pa_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -569,6 +543,7 @@ static void __exit hisi_pa_pmu_module_exit(void)
}
module_exit(hisi_pa_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 918cdc31de57..ef058b1dd509 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/property.h>
#include <asm/cputype.h>
#include <asm/local64.h>
@@ -34,7 +35,7 @@ ssize_t hisi_event_sysfs_show(struct device *dev,
return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
}
-EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
+EXPORT_SYMBOL_NS_GPL(hisi_event_sysfs_show, "HISI_PMU");
/*
* sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
@@ -46,7 +47,52 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
}
-EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
+EXPORT_SYMBOL_NS_GPL(hisi_cpumask_sysfs_show, "HISI_PMU");
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static ssize_t hisi_associated_cpus_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &hisi_pmu->associated_cpus);
+}
+static DEVICE_ATTR(associated_cpus, 0444, hisi_associated_cpus_sysfs_show, NULL);
+
+static struct attribute *hisi_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ &dev_attr_associated_cpus.attr,
+ NULL
+};
+
+const struct attribute_group hisi_pmu_cpumask_attr_group = {
+ .attrs = hisi_pmu_cpumask_attrs,
+};
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_cpumask_attr_group, "HISI_PMU");
+
+ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
+}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_identifier_attr_show, "HISI_PMU");
+
+static struct device_attribute hisi_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_pmu_identifier_attrs[] = {
+ &hisi_pmu_identifier_attr.attr,
+ NULL
+};
+
+const struct attribute_group hisi_pmu_identifier_group = {
+ .attrs = hisi_pmu_identifier_attrs,
+};
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_identifier_group, "HISI_PMU");
static bool hisi_validate_event_group(struct perf_event *event)
{
@@ -96,17 +142,7 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
return idx;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
-
-ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
-
- return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
-}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_get_event_idx, "HISI_PMU");
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{
@@ -165,7 +201,7 @@ int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_init_irq, "HISI_PMU");
int hisi_uncore_pmu_event_init(struct perf_event *event)
{
@@ -219,7 +255,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_event_init, "HISI_PMU");
/*
* Set the counter to count the event that we're interested in,
@@ -273,7 +309,7 @@ void hisi_uncore_pmu_set_event_period(struct perf_event *event)
/* Write start value to the hardware event counter */
hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_set_event_period, "HISI_PMU");
void hisi_uncore_pmu_event_update(struct perf_event *event)
{
@@ -294,7 +330,7 @@ void hisi_uncore_pmu_event_update(struct perf_event *event)
HISI_MAX_PERIOD(hisi_pmu->counter_bits);
local64_add(delta, &event->count);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_event_update, "HISI_PMU");
void hisi_uncore_pmu_start(struct perf_event *event, int flags)
{
@@ -317,7 +353,7 @@ void hisi_uncore_pmu_start(struct perf_event *event, int flags)
hisi_uncore_pmu_enable_event(event);
perf_event_update_userpage(event);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_start, "HISI_PMU");
void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
{
@@ -334,7 +370,7 @@ void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
hisi_uncore_pmu_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_stop, "HISI_PMU");
int hisi_uncore_pmu_add(struct perf_event *event, int flags)
{
@@ -357,7 +393,7 @@ int hisi_uncore_pmu_add(struct perf_event *event, int flags)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_add, "HISI_PMU");
void hisi_uncore_pmu_del(struct perf_event *event, int flags)
{
@@ -369,14 +405,14 @@ void hisi_uncore_pmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_del, "HISI_PMU");
void hisi_uncore_pmu_read(struct perf_event *event)
{
/* Read hardware counter and update the perf counter statistics */
hisi_uncore_pmu_event_update(event);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_read, "HISI_PMU");
void hisi_uncore_pmu_enable(struct pmu *pmu)
{
@@ -389,7 +425,7 @@ void hisi_uncore_pmu_enable(struct pmu *pmu)
hisi_pmu->ops->start_counters(hisi_pmu);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_enable, "HISI_PMU");
void hisi_uncore_pmu_disable(struct pmu *pmu)
{
@@ -397,7 +433,7 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
hisi_pmu->ops->stop_counters(hisi_pmu);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_disable, "HISI_PMU");
/*
@@ -444,22 +480,19 @@ static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
*/
static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
{
+ struct hisi_pmu_topology *topo = &hisi_pmu->topo;
int sccl_id, ccl_id;
- /* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */
- if (hisi_pmu->sccl_id == -1)
- return true;
-
- if (hisi_pmu->ccl_id == -1) {
+ if (topo->ccl_id == -1) {
/* If CCL_ID is -1, the PMU only shares the same SCCL */
hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
- return sccl_id == hisi_pmu->sccl_id;
+ return sccl_id == topo->sccl_id;
}
hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
- return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
+ return sccl_id == topo->sccl_id && ccl_id == topo->ccl_id;
}
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
@@ -467,13 +500,25 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
node);
- if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
+ /*
+ * If the CPU is not associated to PMU, initialize the hisi_pmu->on_cpu
+ * based on the locality if it hasn't been initialized yet. For PMUs
+ * do have associated CPUs, it'll be updated later.
+ */
+ if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu)) {
+ if (hisi_pmu->on_cpu != -1)
+ return 0;
+
+ hisi_pmu->on_cpu = cpumask_local_spread(0, dev_to_node(hisi_pmu->dev));
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(hisi_pmu->on_cpu)));
return 0;
+ }
cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
- /* If another CPU is already managing this PMU, simply return. */
- if (hisi_pmu->on_cpu != -1)
+ /* If another associated CPU is already managing this PMU, simply return. */
+ if (hisi_pmu->on_cpu != -1 &&
+ cpumask_test_cpu(hisi_pmu->on_cpu, &hisi_pmu->associated_cpus))
return 0;
/* Use this CPU in cpumask for event counting */
@@ -484,7 +529,7 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_online_cpu, "HISI_PMU");
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
@@ -492,9 +537,6 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
node);
unsigned int target;
- if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
- return 0;
-
/* Nothing to do if this CPU doesn't own the PMU */
if (hisi_pmu->on_cpu != cpu)
return 0;
@@ -502,10 +544,17 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
/* Give up ownership of the PMU */
hisi_pmu->on_cpu = -1;
- /* Choose a new CPU to migrate ownership of the PMU to */
+ /*
+ * Migrate ownership of the PMU to a new CPU chosen from PMU's online
+ * associated CPUs if possible, if no associated CPU online then
+ * migrate to one online CPU.
+ */
target = cpumask_any_and_but(&hisi_pmu->associated_cpus,
cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
@@ -515,7 +564,36 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_offline_cpu, "HISI_PMU");
+
+/*
+ * Retrieve the topology information from the firmware for the hisi_pmu device.
+ * The topology ID will be -1 if we cannot initialize it, it may either due to
+ * the PMU doesn't locate on this certain topology or the firmware needs to be
+ * fixed.
+ */
+void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev)
+{
+ struct hisi_pmu_topology *topo = &hisi_pmu->topo;
+
+ topo->sccl_id = -1;
+ topo->ccl_id = -1;
+ topo->index_id = -1;
+ topo->sub_id = -1;
+
+ if (device_property_read_u32(dev, "hisilicon,scl-id", &topo->sccl_id))
+ dev_dbg(dev, "no scl-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,ccl-id", &topo->ccl_id))
+ dev_dbg(dev, "no ccl-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,idx-id", &topo->index_id))
+ dev_dbg(dev, "no idx-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,sub-id", &topo->sub_id))
+ dev_dbg(dev, "no sub-id present\n");
+}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_init_topology, "HISI_PMU");
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
{
@@ -535,7 +613,7 @@ void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
pmu->attr_groups = hisi_pmu->pmu_events.attr_groups;
pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
}
-EXPORT_SYMBOL_GPL(hisi_pmu_init);
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_init, "HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC uncore Performance Monitor driver framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 25b2d43b72bf..f4fed2544877 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -81,27 +81,55 @@ struct hisi_pmu_hwevents {
const struct attribute_group **attr_groups;
};
+/**
+ * struct hisi_pmu_topology - Describe the topology hierarchy on which the PMU
+ * is located.
+ * @sccl_id: ID of the SCCL on which the PMU locate is located.
+ * @sicl_id: ID of the SICL on which the PMU locate is located.
+ * @scl_id: ID used by the core which is unaware of the SCCL/SICL.
+ * @ccl_id: ID of the CCL (CPU cluster) on which the PMU is located.
+ * @index_id: the ID of the PMU module if there're several PMUs at a
+ * particularly location in the topology.
+ * @sub_id: submodule ID of the PMU. For example we use this for DDRC PMU v2
+ * since each DDRC has more than one DMC
+ *
+ * The ID will be -1 if the PMU isn't located on a certain topology.
+ */
+struct hisi_pmu_topology {
+ /*
+ * SCCL (Super CPU CLuster) and SICL (Super I/O Cluster) are parallel
+ * so a PMU cannot locate on a SCCL and a SICL. If the SCCL/SICL
+ * distinction is not relevant, use scl_id instead.
+ */
+ union {
+ int sccl_id;
+ int sicl_id;
+ int scl_id;
+ };
+ int ccl_id;
+ int index_id;
+ int sub_id;
+};
+
/* Generic pmu struct for different pmu types */
struct hisi_pmu {
struct pmu pmu;
const struct hisi_uncore_ops *ops;
const struct hisi_pmu_dev_info *dev_info;
struct hisi_pmu_hwevents pmu_events;
- /* associated_cpus: All CPUs associated with the PMU */
+ struct hisi_pmu_topology topo;
+ /*
+ * CPUs associated to the PMU and are preferred to use for counting.
+ * Could be empty if PMU has no association (e.g. PMU on SICL), in
+ * which case any online CPU will be used.
+ */
cpumask_t associated_cpus;
/* CPU used for counting */
int on_cpu;
int irq;
struct device *dev;
struct hlist_node node;
- int sccl_id;
- int sicl_id;
- int ccl_id;
void __iomem *base;
- /* the ID of the PMU modules */
- u32 index_id;
- /* For DDRC PMU v2: each DDRC has more than one DMC */
- u32 sub_id;
int num_counters;
int counter_bits;
/* check event code range */
@@ -109,6 +137,10 @@ struct hisi_pmu {
u32 identifier;
};
+/* Generic implementation of cpumask/identifier group */
+extern const struct attribute_group hisi_pmu_cpumask_attr_group;
+extern const struct attribute_group hisi_pmu_identifier_group;
+
int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
void hisi_uncore_pmu_read(struct perf_event *event);
int hisi_uncore_pmu_add(struct perf_event *event, int flags);
@@ -132,6 +164,7 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
char *page);
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
+void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module);
#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index c5f4764ee888..dbd079016fc4 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -288,25 +288,22 @@ MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match);
static int hisi_sllc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *sllc_pmu)
{
+ hisi_uncore_pmu_init_topology(sllc_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and the index ID to identify the SLLC PMU,
* while SCCL_ID is from MPIDR_EL1 by CPU.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &sllc_pmu->sccl_id)) {
+ if (sllc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Cannot read sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &sllc_pmu->index_id)) {
+ if (sllc_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
- /* SLLC PMUs only share the same SCCL */
- sllc_pmu->ccl_id = -1;
-
sllc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sllc_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for sllc_pmu resource.\n");
@@ -347,34 +344,11 @@ static const struct attribute_group hisi_sllc_pmu_v2_events_group = {
.attrs = hisi_sllc_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_sllc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_sllc_pmu_cpumask_attr_group = {
- .attrs = hisi_sllc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_sllc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
- &hisi_sllc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_sllc_pmu_identifier_group = {
- .attrs = hisi_sllc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = {
&hisi_sllc_pmu_v2_format_group,
&hisi_sllc_pmu_v2_events_group,
- &hisi_sllc_pmu_cpumask_attr_group,
- &hisi_sllc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -433,8 +407,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_sllc%u",
- sllc_pmu->sccl_id, sllc_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_sllc%d",
+ sllc_pmu->topo.sccl_id, sllc_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -507,6 +481,7 @@ static void __exit hisi_sllc_pmu_module_exit(void)
}
module_exit(hisi_sllc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SLLC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
index 481dcc9e8fbf..03cb9b564b99 100644
--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -11,7 +11,6 @@
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
-#include <linux/property.h>
#include "hisi_uncore_pmu.h"
@@ -366,25 +365,24 @@ static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx)
static int hisi_uc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *uc_pmu)
{
+ hisi_uncore_pmu_init_topology(uc_pmu, &pdev->dev);
+
/*
* Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to
* identify the topology information of UC PMU devices in the chip.
* They have some CCLs per SCCL and then 4 UC PMU per CCL.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &uc_pmu->sccl_id)) {
+ if (uc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read uc sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
- &uc_pmu->ccl_id)) {
+ if (uc_pmu->topo.ccl_id < 0) {
dev_err(&pdev->dev, "Can not read uc ccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
- &uc_pmu->sub_id)) {
+ if (uc_pmu->topo.sub_id < 0) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
@@ -439,34 +437,11 @@ static const struct attribute_group hisi_uc_pmu_events_group = {
.attrs = hisi_uc_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_uc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_uc_pmu_cpumask_attr_group = {
- .attrs = hisi_uc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_uc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_uc_pmu_identifier_attrs[] = {
- &hisi_uc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_uc_pmu_identifier_group = {
- .attrs = hisi_uc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_uc_pmu_attr_groups[] = {
&hisi_uc_pmu_format_group,
&hisi_uc_pmu_events_group,
- &hisi_uc_pmu_cpumask_attr_group,
- &hisi_uc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -538,8 +513,9 @@ static int hisi_uc_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%u",
- uc_pmu->sccl_id, uc_pmu->ccl_id, uc_pmu->sub_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%d",
+ uc_pmu->topo.sccl_id, uc_pmu->topo.ccl_id,
+ uc_pmu->topo.sub_id);
if (!name)
return -ENOMEM;
@@ -613,6 +589,7 @@ static void __exit hisi_uc_pmu_module_exit(void)
}
module_exit(hisi_uc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 8860d9f687ae..039feded9152 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
+/*
+ * Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
*
- * Copyright (C) 2021 Marvell.
+ * Copyright (C) 2021-2024 Marvell.
*/
#include <linux/init.h>
@@ -14,24 +15,29 @@
#include <linux/platform_device.h>
/* Performance Counters Operating Mode Control Registers */
-#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
-#define OP_MODE_CTRL_VAL_MANNUAL 0x1
+#define CN10K_DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
+#define ODY_DDRC_PERF_CNT_OP_MODE_CTRL 0x20020
+#define OP_MODE_CTRL_VAL_MANUAL 0x1
/* Performance Counters Start Operation Control Registers */
-#define DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define CN10K_DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define ODY_DDRC_PERF_CNT_START_OP_CTRL 0x200A0
#define START_OP_CTRL_VAL_START 0x1ULL
#define START_OP_CTRL_VAL_ACTIVE 0x2
/* Performance Counters End Operation Control Registers */
-#define DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define CN10K_DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define ODY_DDRC_PERF_CNT_END_OP_CTRL 0x200E0
#define END_OP_CTRL_VAL_END 0x1ULL
/* Performance Counters End Status Registers */
-#define DDRC_PERF_CNT_END_STATUS 0x8038
+#define CN10K_DDRC_PERF_CNT_END_STATUS 0x8038
+#define ODY_DDRC_PERF_CNT_END_STATUS 0x20120
#define END_STATUS_VAL_END_TIMER_MODE_END 0x1
/* Performance Counters Configuration Registers */
-#define DDRC_PERF_CFG_BASE 0x8040
+#define CN10K_DDRC_PERF_CFG_BASE 0x8040
+#define ODY_DDRC_PERF_CFG_BASE 0x20160
/* 8 Generic event counter + 2 fixed event counters */
#define DDRC_PERF_NUM_GEN_COUNTERS 8
@@ -42,18 +48,28 @@
DDRC_PERF_NUM_FIX_COUNTERS)
/* Generic event counter registers */
-#define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n))
+#define DDRC_PERF_CFG(base, n) ((base) + 8 * (n))
#define EVENT_ENABLE BIT_ULL(63)
/* Two dedicated event counters for DDR reads and writes */
#define EVENT_DDR_READS 101
#define EVENT_DDR_WRITES 100
+#define DDRC_PERF_REG(base, n) ((base) + 8 * (n))
/*
* programmable events IDs in programmable event counters.
* DO NOT change these event-id numbers, they are used to
* program event bitmap in h/w.
*/
+#define EVENT_DFI_CMD_IS_RETRY 61
+#define EVENT_RD_UC_ECC_ERROR 60
+#define EVENT_RD_CRC_ERROR 59
+#define EVENT_CAPAR_ERROR 58
+#define EVENT_WR_CRC_ERROR 57
+#define EVENT_DFI_PARITY_POISON 56
+#define EVENT_RETRY_FIFO_FULL 46
+#define EVENT_DFI_CYCLES 45
+
#define EVENT_OP_IS_ZQLATCH 55
#define EVENT_OP_IS_ZQSTART 54
#define EVENT_OP_IS_TCR_MRR 53
@@ -102,28 +118,37 @@
#define EVENT_HIF_RD_OR_WR 1
/* Event counter value registers */
-#define DDRC_PERF_CNT_VALUE_BASE 0x8080
-#define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
+#define CN10K_DDRC_PERF_CNT_VALUE_BASE 0x8080
+#define ODY_DDRC_PERF_CNT_VALUE_BASE 0x201C0
/* Fixed event counter enable/disable register */
-#define DDRC_PERF_CNT_FREERUN_EN 0x80C0
+#define CN10K_DDRC_PERF_CNT_FREERUN_EN 0x80C0
#define DDRC_PERF_FREERUN_WRITE_EN 0x1
#define DDRC_PERF_FREERUN_READ_EN 0x2
/* Fixed event counter control register */
-#define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define CN10K_DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define ODY_DDRC_PERF_CNT_FREERUN_CTRL 0x20240
#define DDRC_FREERUN_WRITE_CNT_CLR 0x1
#define DDRC_FREERUN_READ_CNT_CLR 0x2
-/* Fixed event counter value register */
-#define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
-#define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+/* Fixed event counter clear register, defined only for Odyssey */
+#define ODY_DDRC_PERF_CNT_FREERUN_CLR 0x20248
+
#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48)
#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0)
+/* Fixed event counter value register */
+#define CN10K_DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
+#define CN10K_DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+#define ODY_DDRC_PERF_CNT_VALUE_WR_OP 0x20250
+#define ODY_DDRC_PERF_CNT_VALUE_RD_OP 0x20258
+
struct cn10k_ddr_pmu {
struct pmu pmu;
void __iomem *base;
+ const struct ddr_pmu_platform_data *p_data;
+ const struct ddr_pmu_ops *ops;
unsigned int cpu;
struct device *dev;
int active_events;
@@ -132,8 +157,36 @@ struct cn10k_ddr_pmu {
struct hlist_node node;
};
+struct ddr_pmu_ops {
+ void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu,
+ bool enable);
+ void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu,
+ bool enable);
+ void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu);
+ void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu);
+ void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx);
+};
+
#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
+struct ddr_pmu_platform_data {
+ u64 counter_overflow_val;
+ u64 counter_max_val;
+ u64 cnt_base;
+ u64 cfg_base;
+ u64 cnt_op_mode_ctrl;
+ u64 cnt_start_op_ctrl;
+ u64 cnt_end_op_ctrl;
+ u64 cnt_end_status;
+ u64 cnt_freerun_en;
+ u64 cnt_freerun_ctrl;
+ u64 cnt_freerun_clr;
+ u64 cnt_value_wr_op;
+ u64 cnt_value_rd_op;
+ bool is_cn10k;
+ bool is_ody;
+};
+
static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
struct device_attribute *attr,
char *page)
@@ -209,6 +262,85 @@ static struct attribute *cn10k_ddr_perf_events_attrs[] = {
NULL
};
+static struct attribute *odyssey_ddr_perf_events_attrs[] = {
+ /* Programmable */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_wr_data_access,
+ EVENT_DFI_WR_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_rd_data_access,
+ EVENT_DFI_RD_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
+ EVENT_HPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
+ EVENT_LPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
+ EVENT_WR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access,
+ EVENT_OP_IS_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access,
+ EVENT_OP_IS_RD_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr,
+ EVENT_PRECHARGE_FOR_RDWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
+ EVENT_PRECHARGE_FOR_OTHER),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown,
+ EVENT_OP_IS_ENTER_POWERDOWN),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cycles, EVENT_DFI_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_retry_fifo_full,
+ EVENT_RETRY_FIFO_FULL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_parity_poison,
+ EVENT_DFI_PARITY_POISON),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_crc_error, EVENT_WR_CRC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_capar_error, EVENT_CAPAR_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_crc_error, EVENT_RD_CRC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_uc_ecc_error, EVENT_RD_UC_ECC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cmd_is_retry, EVENT_DFI_CMD_IS_RETRY),
+ /* Free run event counters */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
+ NULL
+};
+
+static struct attribute_group odyssey_ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = odyssey_ddr_perf_events_attrs,
+};
+
static struct attribute_group cn10k_ddr_perf_events_attr_group = {
.name = "events",
.attrs = cn10k_ddr_perf_events_attrs,
@@ -254,6 +386,13 @@ static const struct attribute_group *cn10k_attr_groups[] = {
NULL,
};
+static const struct attribute_group *odyssey_attr_groups[] = {
+ &odyssey_ddr_perf_events_attr_group,
+ &cn10k_ddr_perf_format_attr_group,
+ &cn10k_ddr_perf_cpumask_attr_group,
+ NULL
+};
+
/* Default poll timeout is 100 sec, which is very sufficient for
* 48 bit counter incremented max at 5.6 GT/s, which may take many
* hours to overflow.
@@ -266,9 +405,18 @@ static ktime_t cn10k_ddr_pmu_timer_period(void)
return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC);
}
-static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
+static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap,
+ struct cn10k_ddr_pmu *ddr_pmu)
{
+ int err = 0;
+
switch (eventid) {
+ case EVENT_DFI_PARITY_POISON ...EVENT_DFI_CMD_IS_RETRY:
+ if (!ddr_pmu->p_data->is_ody) {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
*event_bitmap = (1ULL << (eventid - 1));
@@ -279,11 +427,12 @@ static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
*event_bitmap = (0xFULL << (eventid - 1));
break;
default:
- pr_err("%s Invalid eventid %d\n", __func__, eventid);
- return -EINVAL;
+ err = -EINVAL;
}
- return 0;
+ if (err)
+ pr_err("%s Invalid eventid %d\n", __func__, eventid);
+ return err;
}
static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
@@ -351,9 +500,33 @@ static int cn10k_ddr_perf_event_init(struct perf_event *event)
return 0;
}
+static void cn10k_ddr_perf_counter_start(struct cn10k_ddr_pmu *ddr_pmu,
+ int counter)
+{
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
+ u64 ctrl_reg = p_data->cnt_start_op_ctrl;
+
+ writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
+ DDRC_PERF_REG(ctrl_reg, counter));
+}
+
+static void cn10k_ddr_perf_counter_stop(struct cn10k_ddr_pmu *ddr_pmu,
+ int counter)
+{
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
+ u64 ctrl_reg = p_data->cnt_end_op_ctrl;
+
+ writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
+ DDRC_PERF_REG(ctrl_reg, counter));
+}
+
static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
int counter, bool enable)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 ctrl_reg = pmu->p_data->cnt_op_mode_ctrl;
+ const struct ddr_pmu_ops *ops = pmu->ops;
+ bool is_ody = pmu->p_data->is_ody;
u32 reg;
u64 val;
@@ -363,7 +536,7 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
}
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
- reg = DDRC_PERF_CFG(counter);
+ reg = DDRC_PERF_CFG(p_data->cfg_base, counter);
val = readq_relaxed(pmu->base + reg);
if (enable)
@@ -372,40 +545,52 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
val &= ~EVENT_ENABLE;
writeq_relaxed(val, pmu->base + reg);
- } else {
- val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
- if (enable) {
- if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val |= DDRC_PERF_FREERUN_READ_EN;
- else
- val |= DDRC_PERF_FREERUN_WRITE_EN;
- } else {
- if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val &= ~DDRC_PERF_FREERUN_READ_EN;
- else
- val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ if (is_ody) {
+ if (enable) {
+ /*
+ * Setup the PMU counter to work in
+ * manual mode
+ */
+ reg = DDRC_PERF_REG(ctrl_reg, counter);
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL,
+ pmu->base + reg);
+
+ cn10k_ddr_perf_counter_start(pmu, counter);
+ } else {
+ cn10k_ddr_perf_counter_stop(pmu, counter);
+ }
}
- writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ } else {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ ops->enable_read_freerun_counter(pmu, enable);
+ else
+ ops->enable_write_freerun_counter(pmu, enable);
}
}
static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
u64 val;
if (counter == DDRC_PERF_READ_COUNTER_IDX)
- return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
+ return readq_relaxed(pmu->base +
+ p_data->cnt_value_rd_op);
if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
- return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
+ return readq_relaxed(pmu->base +
+ p_data->cnt_value_wr_op);
- val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter));
+ val = readq_relaxed(pmu->base +
+ DDRC_PERF_REG(p_data->cnt_base, counter));
return val;
}
static void cn10k_ddr_perf_event_update(struct perf_event *event)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
struct hw_perf_event *hwc = &event->hw;
u64 prev_count, new_count, mask;
@@ -414,7 +599,7 @@ static void cn10k_ddr_perf_event_update(struct perf_event *event)
new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
} while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
- mask = DDRC_PERF_CNT_MAX_VALUE;
+ mask = p_data->counter_max_val;
local64_add((new_count - prev_count) & mask, &event->count);
}
@@ -435,6 +620,8 @@ static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ const struct ddr_pmu_ops *ops = pmu->ops;
struct hw_perf_event *hwc = &event->hw;
u8 config = event->attr.config;
int counter, ret;
@@ -454,8 +641,8 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
/* Generic counters, configure event id */
- reg_offset = DDRC_PERF_CFG(counter);
- ret = ddr_perf_get_event_bitmap(config, &val);
+ reg_offset = DDRC_PERF_CFG(p_data->cfg_base, counter);
+ ret = ddr_perf_get_event_bitmap(config, &val, pmu);
if (ret)
return ret;
@@ -463,11 +650,9 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
} else {
/* fixed event counter, clear counter value */
if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val = DDRC_FREERUN_READ_CNT_CLR;
+ ops->clear_read_freerun_counter(pmu);
else
- val = DDRC_FREERUN_WRITE_CNT_CLR;
-
- writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
+ ops->clear_write_freerun_counter(pmu);
}
hwc->state |= PERF_HES_STOPPED;
@@ -512,17 +697,19 @@ static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
- DDRC_PERF_CNT_START_OP_CTRL);
+ p_data->cnt_start_op_ctrl);
}
static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
- DDRC_PERF_CNT_END_OP_CTRL);
+ p_data->cnt_end_op_ctrl);
}
static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
@@ -547,8 +734,123 @@ static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
}
}
+static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
+}
+
+static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
+}
+
+static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
+{
+ cn10k_ddr_perf_event_update_all(pmu);
+ cn10k_ddr_perf_pmu_disable(&pmu->pmu);
+ cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+}
+
+static void ddr_pmu_ody_enable_read_freerun(struct cn10k_ddr_pmu *pmu,
+ bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_ody_enable_write_freerun(struct cn10k_ddr_pmu *pmu,
+ bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_ody_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
+}
+
+static void ddr_pmu_ody_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
+}
+
+static void ddr_pmu_ody_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
+{
+ /*
+ * On reaching the maximum value of the counter, the counter freezes
+ * there. The particular event is updated and the respective counter
+ * is stopped and started again so that it starts counting from zero
+ */
+ cn10k_ddr_perf_event_update(pmu->events[evt_idx]);
+ cn10k_ddr_perf_counter_stop(pmu, evt_idx);
+ cn10k_ddr_perf_counter_start(pmu, evt_idx);
+}
+
static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ const struct ddr_pmu_ops *ops = pmu->ops;
struct perf_event *event;
struct hw_perf_event *hwc;
u64 prev_count, new_count;
@@ -586,11 +888,9 @@ static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
continue;
value = cn10k_ddr_perf_read_counter(pmu, i);
- if (value == DDRC_PERF_CNT_MAX_VALUE) {
+ if (value == p_data->counter_max_val) {
pr_info("Counter-(%d) reached max value\n", i);
- cn10k_ddr_perf_event_update_all(pmu);
- cn10k_ddr_perf_pmu_disable(&pmu->pmu);
- cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+ ops->pmu_overflow_handler(pmu, i);
}
}
@@ -629,11 +929,68 @@ static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
+static const struct ddr_pmu_ops ddr_pmu_ops = {
+ .enable_read_freerun_counter = ddr_pmu_enable_read_freerun,
+ .enable_write_freerun_counter = ddr_pmu_enable_write_freerun,
+ .clear_read_freerun_counter = ddr_pmu_read_clear_freerun,
+ .clear_write_freerun_counter = ddr_pmu_write_clear_freerun,
+ .pmu_overflow_handler = ddr_pmu_overflow_hander,
+};
+
+#if defined(CONFIG_ACPI) || defined(CONFIG_OF)
+static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = {
+ .counter_overflow_val = BIT_ULL(48),
+ .counter_max_val = GENMASK_ULL(48, 0),
+ .cnt_base = CN10K_DDRC_PERF_CNT_VALUE_BASE,
+ .cfg_base = CN10K_DDRC_PERF_CFG_BASE,
+ .cnt_op_mode_ctrl = CN10K_DDRC_PERF_CNT_OP_MODE_CTRL,
+ .cnt_start_op_ctrl = CN10K_DDRC_PERF_CNT_START_OP_CTRL,
+ .cnt_end_op_ctrl = CN10K_DDRC_PERF_CNT_END_OP_CTRL,
+ .cnt_end_status = CN10K_DDRC_PERF_CNT_END_STATUS,
+ .cnt_freerun_en = CN10K_DDRC_PERF_CNT_FREERUN_EN,
+ .cnt_freerun_ctrl = CN10K_DDRC_PERF_CNT_FREERUN_CTRL,
+ .cnt_freerun_clr = 0,
+ .cnt_value_wr_op = CN10K_DDRC_PERF_CNT_VALUE_WR_OP,
+ .cnt_value_rd_op = CN10K_DDRC_PERF_CNT_VALUE_RD_OP,
+ .is_cn10k = TRUE,
+};
+#endif
+
+static const struct ddr_pmu_ops ddr_pmu_ody_ops = {
+ .enable_read_freerun_counter = ddr_pmu_ody_enable_read_freerun,
+ .enable_write_freerun_counter = ddr_pmu_ody_enable_write_freerun,
+ .clear_read_freerun_counter = ddr_pmu_ody_read_clear_freerun,
+ .clear_write_freerun_counter = ddr_pmu_ody_write_clear_freerun,
+ .pmu_overflow_handler = ddr_pmu_ody_overflow_hander,
+};
+
+#ifdef CONFIG_ACPI
+static const struct ddr_pmu_platform_data odyssey_ddr_pmu_pdata = {
+ .counter_overflow_val = 0,
+ .counter_max_val = GENMASK_ULL(63, 0),
+ .cnt_base = ODY_DDRC_PERF_CNT_VALUE_BASE,
+ .cfg_base = ODY_DDRC_PERF_CFG_BASE,
+ .cnt_op_mode_ctrl = ODY_DDRC_PERF_CNT_OP_MODE_CTRL,
+ .cnt_start_op_ctrl = ODY_DDRC_PERF_CNT_START_OP_CTRL,
+ .cnt_end_op_ctrl = ODY_DDRC_PERF_CNT_END_OP_CTRL,
+ .cnt_end_status = ODY_DDRC_PERF_CNT_END_STATUS,
+ .cnt_freerun_en = 0,
+ .cnt_freerun_ctrl = ODY_DDRC_PERF_CNT_FREERUN_CTRL,
+ .cnt_freerun_clr = ODY_DDRC_PERF_CNT_FREERUN_CLR,
+ .cnt_value_wr_op = ODY_DDRC_PERF_CNT_VALUE_WR_OP,
+ .cnt_value_rd_op = ODY_DDRC_PERF_CNT_VALUE_RD_OP,
+ .is_ody = TRUE,
+};
+#endif
+
static int cn10k_ddr_perf_probe(struct platform_device *pdev)
{
+ const struct ddr_pmu_platform_data *dev_data;
struct cn10k_ddr_pmu *ddr_pmu;
struct resource *res;
void __iomem *base;
+ bool is_cn10k;
+ bool is_ody;
char *name;
int ret;
@@ -644,30 +1001,60 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
ddr_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, ddr_pmu);
+ dev_data = device_get_match_data(&pdev->dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
+ return -ENODEV;
+ }
+
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
ddr_pmu->base = base;
- /* Setup the PMU counter to work in manual mode */
- writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base +
- DDRC_PERF_CNT_OP_MODE_CTRL);
-
- ddr_pmu->pmu = (struct pmu) {
- .module = THIS_MODULE,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- .task_ctx_nr = perf_invalid_context,
- .attr_groups = cn10k_attr_groups,
- .event_init = cn10k_ddr_perf_event_init,
- .add = cn10k_ddr_perf_event_add,
- .del = cn10k_ddr_perf_event_del,
- .start = cn10k_ddr_perf_event_start,
- .stop = cn10k_ddr_perf_event_stop,
- .read = cn10k_ddr_perf_event_update,
- .pmu_enable = cn10k_ddr_perf_pmu_enable,
- .pmu_disable = cn10k_ddr_perf_pmu_disable,
- };
+ ddr_pmu->p_data = dev_data;
+ is_cn10k = ddr_pmu->p_data->is_cn10k;
+ is_ody = ddr_pmu->p_data->is_ody;
+
+ if (is_cn10k) {
+ ddr_pmu->ops = &ddr_pmu_ops;
+ /* Setup the PMU counter to work in manual mode */
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base +
+ ddr_pmu->p_data->cnt_op_mode_ctrl);
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = cn10k_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ .pmu_enable = cn10k_ddr_perf_pmu_enable,
+ .pmu_disable = cn10k_ddr_perf_pmu_disable,
+ };
+ }
+
+ if (is_ody) {
+ ddr_pmu->ops = &ddr_pmu_ody_ops;
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = odyssey_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ };
+ }
/* Choose this cpu to collect perf data */
ddr_pmu->cpu = raw_smp_processor_id();
@@ -688,7 +1075,7 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
if (ret)
goto error;
- pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start);
+ pr_info("DDR PMU Driver for ddrc@%llx\n", res->start);
return 0;
error:
cpuhp_state_remove_instance_nocalls(
@@ -710,7 +1097,7 @@ static void cn10k_ddr_perf_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
- { .compatible = "marvell,cn10k-ddr-pmu", },
+ { .compatible = "marvell,cn10k-ddr-pmu", .data = &cn10k_ddr_pmu_pdata },
{ },
};
MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
@@ -718,7 +1105,8 @@ MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = {
- {"MRVL000A", 0},
+ {"MRVL000A", (kernel_ulong_t)&cn10k_ddr_pmu_pdata },
+ {"MRVL000C", (kernel_ulong_t)&odyssey_ddr_pmu_pdata},
{},
};
MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index cda55ee35eee..51ccb0befa05 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -37,6 +37,15 @@ struct tad_pmu {
DECLARE_BITMAP(counters_map, TAD_MAX_COUNTERS);
};
+enum mrvl_tad_pmu_version {
+ TAD_PMU_V1 = 1,
+ TAD_PMU_V2,
+};
+
+struct tad_pmu_data {
+ int id;
+};
+
static int tad_pmu_cpuhp_state;
static void tad_pmu_event_counter_read(struct perf_event *event)
@@ -214,6 +223,24 @@ static const struct attribute_group tad_pmu_events_attr_group = {
.attrs = tad_pmu_event_attrs,
};
+static struct attribute *ody_tad_pmu_event_attrs[] = {
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_exlmn, 0x3),
+ TAD_PMU_EVENT_ATTR(tad_alloc_dtg, 0x1a),
+ TAD_PMU_EVENT_ATTR(tad_alloc_ltg, 0x1b),
+ TAD_PMU_EVENT_ATTR(tad_alloc_any, 0x1c),
+ TAD_PMU_EVENT_ATTR(tad_hit_dtg, 0x1d),
+ TAD_PMU_EVENT_ATTR(tad_hit_ltg, 0x1e),
+ TAD_PMU_EVENT_ATTR(tad_hit_any, 0x1f),
+ TAD_PMU_EVENT_ATTR(tad_tag_rd, 0x20),
+ TAD_PMU_EVENT_ATTR(tad_tot_cycle, 0xFF),
+ NULL
+};
+
+static const struct attribute_group ody_tad_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = ody_tad_pmu_event_attrs,
+};
+
PMU_FORMAT_ATTR(event, "config:0-7");
static struct attribute *tad_pmu_format_attrs[] = {
@@ -252,8 +279,16 @@ static const struct attribute_group *tad_pmu_attr_groups[] = {
NULL
};
+static const struct attribute_group *ody_tad_pmu_attr_groups[] = {
+ &ody_tad_pmu_events_attr_group,
+ &tad_pmu_format_attr_group,
+ &tad_pmu_cpumask_attr_group,
+ NULL
+};
+
static int tad_pmu_probe(struct platform_device *pdev)
{
+ const struct tad_pmu_data *dev_data;
struct device *dev = &pdev->dev;
struct tad_region *regions;
struct tad_pmu *tad_pmu;
@@ -261,6 +296,7 @@ static int tad_pmu_probe(struct platform_device *pdev)
u32 tad_pmu_page_size;
u32 tad_page_size;
u32 tad_cnt;
+ int version;
int i, ret;
char *name;
@@ -270,6 +306,13 @@ static int tad_pmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tad_pmu);
+ dev_data = device_get_match_data(&pdev->dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
+ return -ENODEV;
+ }
+ version = dev_data->id;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Mem resource not found\n");
@@ -319,7 +362,6 @@ static int tad_pmu_probe(struct platform_device *pdev)
tad_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
- .attr_groups = tad_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE |
PERF_PMU_CAP_NO_INTERRUPT,
.task_ctx_nr = perf_invalid_context,
@@ -332,6 +374,11 @@ static int tad_pmu_probe(struct platform_device *pdev)
.read = tad_pmu_event_counter_read,
};
+ if (version == TAD_PMU_V1)
+ tad_pmu->pmu.attr_groups = tad_pmu_attr_groups;
+ else
+ tad_pmu->pmu.attr_groups = ody_tad_pmu_attr_groups;
+
tad_pmu->cpu = raw_smp_processor_id();
/* Register pmu instance for cpu hotplug */
@@ -360,16 +407,29 @@ static void tad_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
}
+#if defined(CONFIG_OF) || defined(CONFIG_ACPI)
+static const struct tad_pmu_data tad_pmu_data = {
+ .id = TAD_PMU_V1,
+};
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct tad_pmu_data tad_pmu_v2_data = {
+ .id = TAD_PMU_V2,
+};
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id tad_pmu_of_match[] = {
- { .compatible = "marvell,cn10k-tad-pmu", },
+ { .compatible = "marvell,cn10k-tad-pmu", .data = &tad_pmu_data },
{},
};
#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id tad_pmu_acpi_match[] = {
- {"MRVL000B", 0},
+ {"MRVL000B", (kernel_ulong_t)&tad_pmu_data},
+ {"MRVL000D", (kernel_ulong_t)&tad_pmu_v2_data},
{},
};
MODULE_DEVICE_TABLE(acpi, tad_pmu_acpi_match);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 84af6aae36d1..a96be8f244e0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -21,6 +21,7 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "../pinctrl-utils.h"
@@ -254,7 +255,7 @@ static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev, "gpio:%u level_low:%s falling:%s\n", gpio,
- level_low ? "true" : "false", falling ? "true" : "false");
+ str_true_false(level_low), str_true_false(falling));
return 0;
}
diff --git a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
index 014297a3fbd2..0f32866a4aef 100644
--- a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
+++ b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -1068,7 +1069,7 @@ static void lochnagar_gpio_set(struct gpio_chip *chip,
value = !!value;
dev_dbg(priv->dev, "Set GPIO %s to %s\n",
- pin->name, value ? "high" : "low");
+ pin->name, str_high_low(value));
switch (pin->type) {
case LN_PTYPE_MUX:
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index b3eec63c00ba..4bdbf6bb26e2 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1256,6 +1256,20 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
DL_FLAG_AUTOREMOVE_CONSUMER);
}
+static void pinctrl_cond_disable_mux_setting(struct pinctrl_state *state,
+ struct pinctrl_setting *target_setting)
+{
+ struct pinctrl_setting *setting;
+
+ list_for_each_entry(setting, &state->settings, node) {
+ if (target_setting && (&setting->node == &target_setting->node))
+ break;
+
+ if (setting->type == PIN_MAP_TYPE_MUX_GROUP)
+ pinmux_disable_setting(setting);
+ }
+}
+
/**
* pinctrl_commit_state() - select/activate/program a pinctrl state to HW
* @p: the pinctrl handle for the device that requests configuration
@@ -1263,7 +1277,7 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
*/
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
- struct pinctrl_setting *setting, *setting2;
+ struct pinctrl_setting *setting;
struct pinctrl_state *old_state = READ_ONCE(p->state);
int ret;
@@ -1274,11 +1288,7 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
* still owned by the new state will be re-acquired by the call
* to pinmux_enable_setting() in the loop below.
*/
- list_for_each_entry(setting, &old_state->settings, node) {
- if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
- continue;
- pinmux_disable_setting(setting);
- }
+ pinctrl_cond_disable_mux_setting(old_state, NULL);
}
p->state = NULL;
@@ -1322,7 +1332,7 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
}
if (ret < 0) {
- goto unapply_new_state;
+ goto unapply_mux_setting;
}
/* Do not link hogs (circular dependency) */
@@ -1334,23 +1344,23 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
return 0;
+unapply_mux_setting:
+ pinctrl_cond_disable_mux_setting(state, NULL);
+ goto restore_old_state;
+
unapply_new_state:
dev_err(p->dev, "Error applying setting, reverse things back\n");
- list_for_each_entry(setting2, &state->settings, node) {
- if (&setting2->node == &setting->node)
- break;
- /*
- * All we can do here is pinmux_disable_setting.
- * That means that some pins are muxed differently now
- * than they were before applying the setting (We can't
- * "unmux a pin"!), but it's not a big deal since the pins
- * are free to be muxed by another apply_setting.
- */
- if (setting2->type == PIN_MAP_TYPE_MUX_GROUP)
- pinmux_disable_setting(setting2);
- }
+ /*
+ * All we can do here is pinmux_disable_setting.
+ * That means that some pins are muxed differently now
+ * than they were before applying the setting (We can't
+ * "unmux a pin"!), but it's not a big deal since the pins
+ * are free to be muxed by another apply_setting.
+ */
+ pinctrl_cond_disable_mux_setting(state, setting);
+restore_old_state:
/* There's no infinite recursive loop here because p->state is NULL */
if (old_state)
pinctrl_select_state(p, old_state);
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index a417a031659c..58f32818a0e6 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -202,6 +202,13 @@ config PINCTRL_MT7986
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_MOORE
+config PINCTRL_MT7988
+ bool "Mediatek MT7988 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_MOORE
+
config PINCTRL_MT8167
bool "MediaTek MT8167 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 1405d434218e..721ae83476d0 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
obj-$(CONFIG_PINCTRL_MT7981) += pinctrl-mt7981.o
obj-$(CONFIG_PINCTRL_MT7986) += pinctrl-mt7986.o
+obj-$(CONFIG_PINCTRL_MT7988) += pinctrl-mt7988.o
obj-$(CONFIG_PINCTRL_MT8167) += pinctrl-mt8167.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7988.c b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
new file mode 100644
index 000000000000..68b4097792b8
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
@@ -0,0 +1,1556 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The MT7988 driver based on Linux generic pinctrl binding.
+ *
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#include "pinctrl-moore.h"
+
+enum mt7988_pinctrl_reg_page {
+ GPIO_BASE,
+ IOCFG_TR_BASE,
+ IOCFG_BR_BASE,
+ IOCFG_RB_BASE,
+ IOCFG_LB_BASE,
+ IOCFG_TL_BASE,
+};
+
+#define MT7988_PIN(_number, _name) MTK_PIN(_number, _name, 0, _number, DRV_GRP4)
+
+#define PIN_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 0)
+
+#define PINS_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 1)
+
+static const struct mtk_pin_field_calc mt7988_pin_mode_range[] = {
+ PIN_FIELD(0, 83, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_dir_range[] = {
+ PIN_FIELD(0, 83, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_di_range[] = {
+ PIN_FIELD(0, 83, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_do_range[] = {
+ PIN_FIELD(0, 83, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x30, 0x10, 13, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x30, 0x10, 14, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x30, 0x10, 11, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x30, 0x10, 12, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x30, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(7, 7, 4, 0x30, 0x10, 8, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x30, 0x10, 6, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x30, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x40, 0x10, 21, 1),
+ PIN_FIELD_BASE(13, 13, 1, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0x40, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(15, 15, 5, 0x30, 0x10, 7, 1),
+ PIN_FIELD_BASE(16, 16, 5, 0x30, 0x10, 8, 1),
+ PIN_FIELD_BASE(17, 17, 5, 0x30, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 5, 0x30, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x30, 0x10, 7, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x30, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x50, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x50, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x50, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x50, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x50, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x50, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x50, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x50, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x50, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x50, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x50, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x50, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x50, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x50, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x50, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x50, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x50, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x50, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x50, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x50, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x50, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x50, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x50, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x50, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x50, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x50, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x40, 0x10, 14, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x40, 0x10, 15, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x40, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x40, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x40, 0x10, 20, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x40, 0x10, 8, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x40, 0x10, 9, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x40, 0x10, 10, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x40, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x40, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(71, 71, 5, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 5, 0x30, 0x10, 6, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x30, 0x10, 10, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(75, 75, 4, 0x30, 0x10, 11, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x30, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x40, 0x10, 18, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x40, 0x10, 19, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x40, 0x10, 16, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x40, 0x10, 17, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0xc0, 0x10, 13, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0xc0, 0x10, 14, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0xc0, 0x10, 11, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0xc0, 0x10, 12, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0xc0, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0xc0, 0x10, 9, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0xc0, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(7, 7, 4, 0xb0, 0x10, 8, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0xb0, 0x10, 6, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0xb0, 0x10, 5, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0xb0, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0xe0, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0xe0, 0x10, 21, 1),
+ PIN_FIELD_BASE(13, 13, 1, 0xe0, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0xe0, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(15, 15, 5, 0xc0, 0x10, 7, 1),
+ PIN_FIELD_BASE(16, 16, 5, 0xc0, 0x10, 8, 1),
+ PIN_FIELD_BASE(17, 17, 5, 0xc0, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 5, 0xc0, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0xb0, 0x10, 7, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0xb0, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x140, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x140, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x140, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x140, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x140, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x140, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x140, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x140, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x140, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x140, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x140, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x140, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x150, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x140, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x140, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x140, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x150, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x140, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x140, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x140, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x140, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x140, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x140, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x140, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x140, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x140, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x140, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x140, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x140, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x140, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x140, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x140, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x140, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x140, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0xe0, 0x10, 14, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0xe0, 0x10, 15, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0xe0, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0xe0, 0x10, 4, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0xe0, 0x10, 5, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0xe0, 0x10, 6, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0xe0, 0x10, 3, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0xe0, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0xe0, 0x10, 20, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0xe0, 0x10, 8, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0xe0, 0x10, 9, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0xe0, 0x10, 10, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0xe0, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0xe0, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0xc0, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0xc0, 0x10, 2, 1),
+ PIN_FIELD_BASE(71, 71, 5, 0xc0, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 5, 0xc0, 0x10, 6, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0xb0, 0x10, 10, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0xb0, 0x10, 1, 1),
+ PIN_FIELD_BASE(75, 75, 4, 0xb0, 0x10, 11, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0xb0, 0x10, 9, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0xb0, 0x10, 2, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0xb0, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0xb0, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0xe0, 0x10, 18, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0xe0, 0x10, 19, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0xe0, 0x10, 16, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0xe0, 0x10, 17, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_pu_range[] = {
+ PIN_FIELD_BASE(7, 7, 4, 0x60, 0x10, 5, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x60, 0x10, 4, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x60, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(13, 13, 1, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x70, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(75, 75, 4, 0x60, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0x60, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x60, 0x10, 8, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_pd_range[] = {
+ PIN_FIELD_BASE(7, 7, 4, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x40, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(13, 13, 1, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0x50, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(15, 15, 5, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(16, 16, 5, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(17, 17, 5, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(18, 18, 5, 0x40, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(63, 63, 1, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(71, 71, 5, 0x40, 0x10, 2, 1),
+ PIN_FIELD_BASE(72, 72, 5, 0x40, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(75, 75, 4, 0x40, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x40, 0x10, 8, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(1, 1, 5, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(2, 2, 5, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(3, 3, 5, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(4, 4, 5, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(5, 5, 5, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(6, 6, 5, 0x00, 0x10, 12, 3),
+
+ PIN_FIELD_BASE(7, 7, 4, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(8, 8, 4, 0x00, 0x10, 28, 3),
+ PIN_FIELD_BASE(9, 9, 4, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(10, 10, 4, 0x00, 0x10, 9, 3),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(12, 12, 1, 0x20, 0x10, 3, 3),
+ PIN_FIELD_BASE(13, 13, 1, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(14, 14, 1, 0x00, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(20, 20, 4, 0x00, 0x10, 12, 3),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x10, 0x10, 21, 3),
+ PIN_FIELD_BASE(22, 22, 3, 0x20, 0x10, 9, 3),
+ PIN_FIELD_BASE(23, 23, 3, 0x20, 0x10, 0, 3),
+ PIN_FIELD_BASE(24, 24, 3, 0x10, 0x10, 27, 3),
+ PIN_FIELD_BASE(25, 25, 3, 0x20, 0x10, 3, 3),
+ PIN_FIELD_BASE(26, 26, 3, 0x20, 0x10, 6, 3),
+ PIN_FIELD_BASE(27, 27, 3, 0x10, 0x10, 24, 3),
+ PIN_FIELD_BASE(28, 28, 3, 0x20, 0x10, 15, 3),
+ PIN_FIELD_BASE(29, 29, 3, 0x20, 0x10, 18, 3),
+ PIN_FIELD_BASE(30, 30, 3, 0x20, 0x10, 21, 3),
+ PIN_FIELD_BASE(31, 31, 3, 0x20, 0x10, 12, 3),
+ PIN_FIELD_BASE(32, 32, 3, 0x20, 0x10, 24, 3),
+ PIN_FIELD_BASE(33, 33, 3, 0x30, 0x10, 6, 3),
+ PIN_FIELD_BASE(34, 34, 3, 0x30, 0x10, 3, 3),
+ PIN_FIELD_BASE(35, 35, 3, 0x20, 0x10, 27, 3),
+ PIN_FIELD_BASE(36, 36, 3, 0x30, 0x10, 0, 3),
+ PIN_FIELD_BASE(37, 37, 3, 0x30, 0x10, 9, 3),
+ PIN_FIELD_BASE(38, 38, 3, 0x10, 0x10, 3, 3),
+ PIN_FIELD_BASE(39, 39, 3, 0x10, 0x10, 0, 3),
+ PIN_FIELD_BASE(40, 40, 3, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(41, 41, 3, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(42, 42, 3, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(43, 43, 3, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(44, 44, 3, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(45, 45, 3, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(46, 46, 3, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(47, 47, 3, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(48, 48, 3, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(49, 49, 3, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(50, 50, 3, 0x10, 0x10, 15, 3),
+ PIN_FIELD_BASE(51, 51, 3, 0x10, 0x10, 6, 3),
+ PIN_FIELD_BASE(52, 52, 3, 0x10, 0x10, 9, 3),
+ PIN_FIELD_BASE(53, 53, 3, 0x10, 0x10, 12, 3),
+ PIN_FIELD_BASE(54, 54, 3, 0x10, 0x10, 18, 3),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x10, 0x10, 12, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x10, 0x10, 15, 3),
+ PIN_FIELD_BASE(57, 57, 1, 0x10, 0x10, 9, 3),
+ PIN_FIELD_BASE(58, 58, 1, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(59, 59, 1, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(60, 60, 1, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(61, 61, 1, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(62, 62, 1, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(63, 63, 1, 0x20, 0x10, 0, 3),
+ PIN_FIELD_BASE(64, 64, 1, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(65, 65, 1, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(66, 66, 1, 0x10, 0x10, 0, 3),
+ PIN_FIELD_BASE(67, 67, 1, 0x10, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 1, 0x10, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(70, 70, 5, 0x00, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x10, 0x10, 0, 3),
+ PIN_FIELD_BASE(74, 74, 4, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(75, 75, 4, 0x10, 0x10, 3, 3),
+ PIN_FIELD_BASE(76, 76, 4, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(77, 77, 4, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(78, 78, 4, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(79, 79, 4, 0x10, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x10, 0x10, 24, 3),
+ PIN_FIELD_BASE(81, 81, 1, 0x10, 0x10, 27, 3),
+ PIN_FIELD_BASE(82, 82, 1, 0x10, 0x10, 18, 3),
+ PIN_FIELD_BASE(83, 83, 1, 0x10, 0x10, 21, 3),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_pupd_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x50, 0x10, 7, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x50, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x50, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x60, 0x10, 18, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x50, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x70, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x70, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x70, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x70, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x70, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x70, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x70, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x70, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x70, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x70, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x70, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x70, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x80, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x70, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x70, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x70, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x80, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x70, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x70, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x70, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x70, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x70, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x70, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x70, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x70, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x70, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x70, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x70, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x70, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x70, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x70, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x60, 0x10, 12, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x60, 0x10, 13, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x60, 0x10, 11, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x60, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x60, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x60, 0x10, 5, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x60, 0x10, 6, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x60, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x60, 0x10, 8, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x60, 0x10, 9, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x60, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x50, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x50, 0x10, 0, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x60, 0x10, 16, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x60, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x60, 0x10, 14, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x60, 0x10, 15, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_r0_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x60, 0x10, 7, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x60, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x60, 0x10, 5, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x60, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x60, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x80, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x80, 0x10, 18, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x70, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x70, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x90, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x90, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x90, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x90, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x90, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x90, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x90, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x90, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x90, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x90, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x90, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x90, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0xa0, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x90, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x90, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x90, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0xa0, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x90, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x90, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x90, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x90, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x90, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x90, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x90, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x90, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x90, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x90, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x90, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x90, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x90, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x90, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x90, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x90, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x90, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x80, 0x10, 12, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x80, 0x10, 13, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x80, 0x10, 11, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x80, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x80, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x80, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x80, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x80, 0x10, 5, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x80, 0x10, 6, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x80, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x80, 0x10, 8, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x80, 0x10, 9, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x80, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x60, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x70, 0x10, 0, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x80, 0x10, 16, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x80, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x80, 0x10, 14, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x80, 0x10, 15, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_r1_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x70, 0x10, 7, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x70, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x70, 0x10, 5, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x70, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x70, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x90, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x90, 0x10, 18, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x80, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x80, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0xb0, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0xb0, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0xb0, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0xb0, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0xb0, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0xb0, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0xb0, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0xb0, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0xb0, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0xb0, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0xb0, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0xb0, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0xc0, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0xb0, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0xb0, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0xb0, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0xc0, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0xb0, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0xb0, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0xb0, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0xb0, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0xb0, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0xb0, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0xb0, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0xb0, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0xb0, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0xb0, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0xb0, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0xb0, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0xb0, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0xb0, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0xb0, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0xb0, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0xb0, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x90, 0x10, 12, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x90, 0x10, 13, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x90, 0x10, 11, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x90, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x90, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x90, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x90, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x90, 0x10, 5, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x90, 0x10, 6, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x90, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x90, 0x10, 8, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x90, 0x10, 9, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x90, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x70, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x80, 0x10, 3, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x80, 0x10, 0, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x90, 0x10, 16, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x90, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x90, 0x10, 14, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x90, 0x10, 15, 1),
+};
+
+static const unsigned int mt7988_pull_type[] = {
+ MTK_PULL_PUPD_R1R0_TYPE,/*0*/ MTK_PULL_PUPD_R1R0_TYPE,/*1*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*2*/ MTK_PULL_PUPD_R1R0_TYPE,/*3*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*4*/ MTK_PULL_PUPD_R1R0_TYPE,/*5*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE, /*7*/
+ MTK_PULL_PU_PD_TYPE, /*8*/ MTK_PULL_PU_PD_TYPE, /*9*/
+ MTK_PULL_PU_PD_TYPE, /*10*/ MTK_PULL_PUPD_R1R0_TYPE,/*11*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*12*/ MTK_PULL_PU_PD_TYPE, /*13*/
+ MTK_PULL_PU_PD_TYPE, /*14*/ MTK_PULL_PD_TYPE, /*15*/
+ MTK_PULL_PD_TYPE, /*16*/ MTK_PULL_PD_TYPE, /*17*/
+ MTK_PULL_PD_TYPE, /*18*/ MTK_PULL_PUPD_R1R0_TYPE,/*19*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*20*/ MTK_PULL_PUPD_R1R0_TYPE,/*21*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*22*/ MTK_PULL_PUPD_R1R0_TYPE,/*23*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*24*/ MTK_PULL_PUPD_R1R0_TYPE,/*25*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*26*/ MTK_PULL_PUPD_R1R0_TYPE,/*27*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*28*/ MTK_PULL_PUPD_R1R0_TYPE,/*29*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*30*/ MTK_PULL_PUPD_R1R0_TYPE,/*31*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*32*/ MTK_PULL_PUPD_R1R0_TYPE,/*33*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*34*/ MTK_PULL_PUPD_R1R0_TYPE,/*35*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*36*/ MTK_PULL_PUPD_R1R0_TYPE,/*37*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*38*/ MTK_PULL_PUPD_R1R0_TYPE,/*39*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*40*/ MTK_PULL_PUPD_R1R0_TYPE,/*41*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*42*/ MTK_PULL_PUPD_R1R0_TYPE,/*43*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*44*/ MTK_PULL_PUPD_R1R0_TYPE,/*45*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*46*/ MTK_PULL_PUPD_R1R0_TYPE,/*47*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*48*/ MTK_PULL_PUPD_R1R0_TYPE,/*49*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*50*/ MTK_PULL_PUPD_R1R0_TYPE,/*51*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*52*/ MTK_PULL_PUPD_R1R0_TYPE,/*53*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*54*/ MTK_PULL_PUPD_R1R0_TYPE,/*55*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*56*/ MTK_PULL_PUPD_R1R0_TYPE,/*57*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*58*/ MTK_PULL_PUPD_R1R0_TYPE,/*59*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*60*/ MTK_PULL_PUPD_R1R0_TYPE,/*61*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE, /*63*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*64*/ MTK_PULL_PUPD_R1R0_TYPE,/*65*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*66*/ MTK_PULL_PUPD_R1R0_TYPE,/*67*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*68*/ MTK_PULL_PUPD_R1R0_TYPE,/*69*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*70*/ MTK_PULL_PD_TYPE, /*71*/
+ MTK_PULL_PD_TYPE, /*72*/ MTK_PULL_PUPD_R1R0_TYPE,/*73*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE, /*75*/
+ MTK_PULL_PU_PD_TYPE, /*76*/ MTK_PULL_PU_PD_TYPE, /*77*/
+ MTK_PULL_PU_PD_TYPE, /*78*/ MTK_PULL_PU_PD_TYPE, /*79*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*80*/ MTK_PULL_PUPD_R1R0_TYPE,/*81*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*82*/ MTK_PULL_PUPD_R1R0_TYPE,/*83*/
+};
+
+static const struct mtk_pin_reg_calc mt7988_reg_cals[] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt7988_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt7988_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt7988_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt7988_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt7988_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt7988_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt7988_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt7988_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt7988_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt7988_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt7988_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt7988_pin_r1_range),
+};
+
+static const struct mtk_pin_desc mt7988_pins[] = {
+ MT7988_PIN(0, "UART2_RXD"),
+ MT7988_PIN(1, "UART2_TXD"),
+ MT7988_PIN(2, "UART2_CTS"),
+ MT7988_PIN(3, "UART2_RTS"),
+ MT7988_PIN(4, "GPIO_A"),
+ MT7988_PIN(5, "SMI_0_MDC"),
+ MT7988_PIN(6, "SMI_0_MDIO"),
+ MT7988_PIN(7, "PCIE30_2L_0_WAKE_N"),
+ MT7988_PIN(8, "PCIE30_2L_0_CLKREQ_N"),
+ MT7988_PIN(9, "PCIE30_1L_1_WAKE_N"),
+ MT7988_PIN(10, "PCIE30_1L_1_CLKREQ_N"),
+ MT7988_PIN(11, "GPIO_P"),
+ MT7988_PIN(12, "WATCHDOG"),
+ MT7988_PIN(13, "GPIO_RESET"),
+ MT7988_PIN(14, "GPIO_WPS"),
+ MT7988_PIN(15, "PMIC_I2C_SCL"),
+ MT7988_PIN(16, "PMIC_I2C_SDA"),
+ MT7988_PIN(17, "I2C_1_SCL"),
+ MT7988_PIN(18, "I2C_1_SDA"),
+ MT7988_PIN(19, "PCIE30_2L_0_PRESET_N"),
+ MT7988_PIN(20, "PCIE30_1L_1_PRESET_N"),
+ MT7988_PIN(21, "PWMD1"),
+ MT7988_PIN(22, "SPI0_WP"),
+ MT7988_PIN(23, "SPI0_HOLD"),
+ MT7988_PIN(24, "SPI0_CSB"),
+ MT7988_PIN(25, "SPI0_MISO"),
+ MT7988_PIN(26, "SPI0_MOSI"),
+ MT7988_PIN(27, "SPI0_CLK"),
+ MT7988_PIN(28, "SPI1_CSB"),
+ MT7988_PIN(29, "SPI1_MISO"),
+ MT7988_PIN(30, "SPI1_MOSI"),
+ MT7988_PIN(31, "SPI1_CLK"),
+ MT7988_PIN(32, "SPI2_CLK"),
+ MT7988_PIN(33, "SPI2_MOSI"),
+ MT7988_PIN(34, "SPI2_MISO"),
+ MT7988_PIN(35, "SPI2_CSB"),
+ MT7988_PIN(36, "SPI2_HOLD"),
+ MT7988_PIN(37, "SPI2_WP"),
+ MT7988_PIN(38, "EMMC_RSTB"),
+ MT7988_PIN(39, "EMMC_DSL"),
+ MT7988_PIN(40, "EMMC_CK"),
+ MT7988_PIN(41, "EMMC_CMD"),
+ MT7988_PIN(42, "EMMC_DATA_7"),
+ MT7988_PIN(43, "EMMC_DATA_6"),
+ MT7988_PIN(44, "EMMC_DATA_5"),
+ MT7988_PIN(45, "EMMC_DATA_4"),
+ MT7988_PIN(46, "EMMC_DATA_3"),
+ MT7988_PIN(47, "EMMC_DATA_2"),
+ MT7988_PIN(48, "EMMC_DATA_1"),
+ MT7988_PIN(49, "EMMC_DATA_0"),
+ MT7988_PIN(50, "PCM_FS_I2S_LRCK"),
+ MT7988_PIN(51, "PCM_CLK_I2S_BCLK"),
+ MT7988_PIN(52, "PCM_DRX_I2S_DIN"),
+ MT7988_PIN(53, "PCM_DTX_I2S_DOUT"),
+ MT7988_PIN(54, "PCM_MCK_I2S_MCLK"),
+ MT7988_PIN(55, "UART0_RXD"),
+ MT7988_PIN(56, "UART0_TXD"),
+ MT7988_PIN(57, "PWMD0"),
+ MT7988_PIN(58, "JTAG_JTDI"),
+ MT7988_PIN(59, "JTAG_JTDO"),
+ MT7988_PIN(60, "JTAG_JTMS"),
+ MT7988_PIN(61, "JTAG_JTCLK"),
+ MT7988_PIN(62, "JTAG_JTRST_N"),
+ MT7988_PIN(63, "USB_DRV_VBUS_P1"),
+ MT7988_PIN(64, "LED_A"),
+ MT7988_PIN(65, "LED_B"),
+ MT7988_PIN(66, "LED_C"),
+ MT7988_PIN(67, "LED_D"),
+ MT7988_PIN(68, "LED_E"),
+ MT7988_PIN(69, "GPIO_B"),
+ MT7988_PIN(70, "GPIO_C"),
+ MT7988_PIN(71, "I2C_2_SCL"),
+ MT7988_PIN(72, "I2C_2_SDA"),
+ MT7988_PIN(73, "PCIE30_2L_1_PRESET_N"),
+ MT7988_PIN(74, "PCIE30_1L_0_PRESET_N"),
+ MT7988_PIN(75, "PCIE30_2L_1_WAKE_N"),
+ MT7988_PIN(76, "PCIE30_2L_1_CLKREQ_N"),
+ MT7988_PIN(77, "PCIE30_1L_0_WAKE_N"),
+ MT7988_PIN(78, "PCIE30_1L_0_CLKREQ_N"),
+ MT7988_PIN(79, "USB_DRV_VBUS_P0"),
+ MT7988_PIN(80, "UART1_RXD"),
+ MT7988_PIN(81, "UART1_TXD"),
+ MT7988_PIN(82, "UART1_CTS"),
+ MT7988_PIN(83, "UART1_RTS"),
+};
+
+/* jtag */
+static const int mt7988_tops_jtag0_0_pins[] = { 0, 1, 2, 3, 4 };
+static int mt7988_tops_jtag0_0_funcs[] = { 2, 2, 2, 2, 2 };
+
+static const int mt7988_wo0_jtag_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_wo0_jtag_funcs[] = { 3, 3, 3, 3, 3 };
+
+static const int mt7988_wo1_jtag_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_wo1_jtag_funcs[] = { 4, 4, 4, 4, 4 };
+
+static const int mt7988_wo2_jtag_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_wo2_jtag_funcs[] = { 5, 5, 5, 5, 5 };
+
+static const int mt7988_jtag_pins[] = { 58, 59, 60, 61, 62 };
+static int mt7988_jtag_funcs[] = { 1, 1, 1, 1, 1 };
+
+static const int mt7988_tops_jtag0_1_pins[] = { 58, 59, 60, 61, 62 };
+static int mt7988_tops_jtag0_1_funcs[] = { 4, 4, 4, 4, 4 };
+
+/* int_usxgmii */
+static const int mt7988_int_usxgmii_pins[] = { 2, 3 };
+static int mt7988_int_usxgmii_funcs[] = { 3, 3 };
+
+/* pwm */
+static const int mt7988_pwm0_pins[] = { 57 };
+static int mt7988_pwm0_funcs[] = { 1 };
+
+static const int mt7988_pwm1_pins[] = { 21 };
+static int mt7988_pwm1_funcs[] = { 1 };
+
+static const int mt7988_pwm2_pins[] = { 80 };
+static int mt7988_pwm2_funcs[] = { 2 };
+
+static const int mt7988_pwm2_0_pins[] = { 58 };
+static int mt7988_pwm2_0_funcs[] = { 5 };
+
+static const int mt7988_pwm3_pins[] = { 81 };
+static int mt7988_pwm3_funcs[] = { 2 };
+
+static const int mt7988_pwm3_0_pins[] = { 59 };
+static int mt7988_pwm3_0_funcs[] = { 5 };
+
+static const int mt7988_pwm4_pins[] = { 82 };
+static int mt7988_pwm4_funcs[] = { 2 };
+
+static const int mt7988_pwm4_0_pins[] = { 60 };
+static int mt7988_pwm4_0_funcs[] = { 5 };
+
+static const int mt7988_pwm5_pins[] = { 83 };
+static int mt7988_pwm5_funcs[] = { 2 };
+
+static const int mt7988_pwm5_0_pins[] = { 61 };
+static int mt7988_pwm5_0_funcs[] = { 5 };
+
+static const int mt7988_pwm6_pins[] = { 69 };
+static int mt7988_pwm6_funcs[] = { 3 };
+
+static const int mt7988_pwm6_0_pins[] = { 62 };
+static int mt7988_pwm6_0_funcs[] = { 5 };
+
+static const int mt7988_pwm7_pins[] = { 70 };
+static int mt7988_pwm7_funcs[] = { 3 };
+
+static const int mt7988_pwm7_0_pins[] = { 4 };
+static int mt7988_pwm7_0_funcs[] = { 3 };
+
+/* dfd */
+static const int mt7988_dfd_pins[] = { 0, 1, 2, 3, 4 };
+static int mt7988_dfd_funcs[] = { 4, 4, 4, 4, 4 };
+
+/* i2c */
+static const int mt7988_xfi_phy0_i2c0_pins[] = { 0, 1 };
+static int mt7988_xfi_phy0_i2c0_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_phy1_i2c0_pins[] = { 0, 1 };
+static int mt7988_xfi_phy1_i2c0_funcs[] = { 6, 6 };
+
+static const int mt7988_xfi_phy_pll_i2c0_pins[] = { 3, 4 };
+static int mt7988_xfi_phy_pll_i2c0_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_phy_pll_i2c1_pins[] = { 3, 4 };
+static int mt7988_xfi_phy_pll_i2c1_funcs[] = { 6, 6 };
+
+static const int mt7988_i2c0_0_pins[] = { 5, 6 };
+static int mt7988_i2c0_0_funcs[] = { 2, 2 };
+
+static const int mt7988_i2c1_sfp_pins[] = { 5, 6 };
+static int mt7988_i2c1_sfp_funcs[] = { 4, 4 };
+
+static const int mt7988_xfi_pextp_phy0_i2c_pins[] = { 5, 6 };
+static int mt7988_xfi_pextp_phy0_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_pextp_phy1_i2c_pins[] = { 5, 6 };
+static int mt7988_xfi_pextp_phy1_i2c_funcs[] = { 6, 6 };
+
+static const int mt7988_i2c0_1_pins[] = { 15, 16 };
+static int mt7988_i2c0_1_funcs[] = { 1, 1 };
+
+static const int mt7988_u30_phy_i2c0_pins[] = { 15, 16 };
+static int mt7988_u30_phy_i2c0_funcs[] = { 2, 2 };
+
+static const int mt7988_u32_phy_i2c0_pins[] = { 15, 16 };
+static int mt7988_u32_phy_i2c0_funcs[] = { 3, 3 };
+
+static const int mt7988_xfi_phy0_i2c1_pins[] = { 15, 16 };
+static int mt7988_xfi_phy0_i2c1_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_phy1_i2c1_pins[] = { 15, 16 };
+static int mt7988_xfi_phy1_i2c1_funcs[] = { 6, 6 };
+
+static const int mt7988_xfi_phy_pll_i2c2_pins[] = { 15, 16 };
+static int mt7988_xfi_phy_pll_i2c2_funcs[] = { 7, 7 };
+
+static const int mt7988_i2c1_0_pins[] = { 17, 18 };
+static int mt7988_i2c1_0_funcs[] = { 1, 1 };
+
+static const int mt7988_u30_phy_i2c1_pins[] = { 17, 18 };
+static int mt7988_u30_phy_i2c1_funcs[] = { 2, 2 };
+
+static const int mt7988_u32_phy_i2c1_pins[] = { 17, 18 };
+static int mt7988_u32_phy_i2c1_funcs[] = { 3, 3 };
+
+static const int mt7988_xfi_phy_pll_i2c3_pins[] = { 17, 18 };
+static int mt7988_xfi_phy_pll_i2c3_funcs[] = { 4, 4 };
+
+static const int mt7988_sgmii0_i2c_pins[] = { 17, 18 };
+static int mt7988_sgmii0_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_sgmii1_i2c_pins[] = { 17, 18 };
+static int mt7988_sgmii1_i2c_funcs[] = { 6, 6 };
+
+static const int mt7988_i2c1_2_pins[] = { 69, 70 };
+static int mt7988_i2c1_2_funcs[] = { 2, 2 };
+
+static const int mt7988_i2c2_0_pins[] = { 69, 70 };
+static int mt7988_i2c2_0_funcs[] = { 4, 4 };
+
+static const int mt7988_i2c2_1_pins[] = { 71, 72 };
+static int mt7988_i2c2_1_funcs[] = { 1, 1 };
+
+/* eth */
+static const int mt7988_mdc_mdio0_pins[] = { 5, 6 };
+static int mt7988_mdc_mdio0_funcs[] = { 1, 1 };
+
+static const int mt7988_2p5g_ext_mdio_pins[] = { 28, 29 };
+static int mt7988_2p5g_ext_mdio_funcs[] = { 6, 6 };
+
+static const int mt7988_gbe_ext_mdio_pins[] = { 30, 31 };
+static int mt7988_gbe_ext_mdio_funcs[] = { 6, 6 };
+
+static const int mt7988_mdc_mdio1_pins[] = { 69, 70 };
+static int mt7988_mdc_mdio1_funcs[] = { 1, 1 };
+
+/* pcie */
+static const int mt7988_pcie_wake_n0_0_pins[] = { 7 };
+static int mt7988_pcie_wake_n0_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n0_0_pins[] = { 8 };
+static int mt7988_pcie_clk_req_n0_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n3_0_pins[] = { 9 };
+static int mt7988_pcie_wake_n3_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n3_pins[] = { 10 };
+static int mt7988_pcie_clk_req_n3_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n0_1_pins[] = { 10 };
+static int mt7988_pcie_clk_req_n0_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_p0_phy_i2c_pins[] = { 7, 8 };
+static int mt7988_pcie_p0_phy_i2c_funcs[] = { 3, 3 };
+
+static const int mt7988_pcie_p1_phy_i2c_pins[] = { 7, 8 };
+static int mt7988_pcie_p1_phy_i2c_funcs[] = { 4, 4 };
+
+static const int mt7988_pcie_p3_phy_i2c_pins[] = { 9, 10 };
+static int mt7988_pcie_p3_phy_i2c_funcs[] = { 4, 4 };
+
+static const int mt7988_pcie_p2_phy_i2c_pins[] = { 7, 8 };
+static int mt7988_pcie_p2_phy_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_ckm_phy_i2c_pins[] = { 9, 10 };
+static int mt7988_ckm_phy_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_pcie_wake_n0_1_pins[] = { 13 };
+static int mt7988_pcie_wake_n0_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_wake_n3_1_pins[] = { 14 };
+static int mt7988_pcie_wake_n3_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_2l_0_pereset_pins[] = { 19 };
+static int mt7988_pcie_2l_0_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_1l_1_pereset_pins[] = { 20 };
+static int mt7988_pcie_1l_1_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n2_1_pins[] = { 63 };
+static int mt7988_pcie_clk_req_n2_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_2l_1_pereset_pins[] = { 73 };
+static int mt7988_pcie_2l_1_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_1l_0_pereset_pins[] = { 74 };
+static int mt7988_pcie_1l_0_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n1_0_pins[] = { 75 };
+static int mt7988_pcie_wake_n1_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n1_pins[] = { 76 };
+static int mt7988_pcie_clk_req_n1_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n2_0_pins[] = { 77 };
+static int mt7988_pcie_wake_n2_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n2_0_pins[] = { 78 };
+static int mt7988_pcie_clk_req_n2_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n2_1_pins[] = { 79 };
+static int mt7988_pcie_wake_n2_1_funcs[] = { 2 };
+
+/* pmic */
+static const int mt7988_pmic_pins[] = { 11 };
+static int mt7988_pmic_funcs[] = { 1 };
+
+/* watchdog */
+static const int mt7988_watchdog_pins[] = { 12 };
+static int mt7988_watchdog_funcs[] = { 1 };
+
+/* spi */
+static const int mt7988_spi0_wp_hold_pins[] = { 22, 23 };
+static int mt7988_spi0_wp_hold_funcs[] = { 1, 1 };
+
+static const int mt7988_spi0_pins[] = { 24, 25, 26, 27 };
+static int mt7988_spi0_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_spi1_pins[] = { 28, 29, 30, 31 };
+static int mt7988_spi1_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_spi2_pins[] = { 32, 33, 34, 35 };
+static int mt7988_spi2_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_spi2_wp_hold_pins[] = { 36, 37 };
+static int mt7988_spi2_wp_hold_funcs[] = { 1, 1 };
+
+/* flash */
+static const int mt7988_snfi_pins[] = { 22, 23, 24, 25, 26, 27 };
+static int mt7988_snfi_funcs[] = { 2, 2, 2, 2, 2, 2 };
+
+static const int mt7988_emmc_45_pins[] = {
+ 21, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37
+};
+static int mt7988_emmc_45_funcs[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
+
+static const int mt7988_sdcard_pins[] = { 32, 33, 34, 35, 36, 37 };
+static int mt7988_sdcard_funcs[] = { 5, 5, 5, 5, 5, 5 };
+
+static const int mt7988_emmc_51_pins[] = { 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49 };
+static int mt7988_emmc_51_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+
+/* uart */
+static const int mt7988_uart2_pins[] = { 0, 1, 2, 3 };
+static int mt7988_uart2_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_tops_uart0_0_pins[] = { 22, 23 };
+static int mt7988_tops_uart0_0_funcs[] = { 3, 3 };
+
+static const int mt7988_uart2_0_pins[] = { 28, 29, 30, 31 };
+static int mt7988_uart2_0_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart1_0_pins[] = { 32, 33, 34, 35 };
+static int mt7988_uart1_0_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart2_1_pins[] = { 32, 33, 34, 35 };
+static int mt7988_uart2_1_funcs[] = { 3, 3, 3, 3 };
+
+static const int mt7988_net_wo0_uart_txd_0_pins[] = { 28 };
+static int mt7988_net_wo0_uart_txd_0_funcs[] = { 3 };
+
+static const int mt7988_net_wo1_uart_txd_0_pins[] = { 29 };
+static int mt7988_net_wo1_uart_txd_0_funcs[] = { 3 };
+
+static const int mt7988_net_wo2_uart_txd_0_pins[] = { 30 };
+static int mt7988_net_wo2_uart_txd_0_funcs[] = { 3 };
+
+static const int mt7988_tops_uart1_0_pins[] = { 28, 29 };
+static int mt7988_tops_uart1_0_funcs[] = { 4, 4 };
+
+static const int mt7988_tops_uart0_1_pins[] = { 30, 31 };
+static int mt7988_tops_uart0_1_funcs[] = { 4, 4 };
+
+static const int mt7988_tops_uart1_1_pins[] = { 36, 37 };
+static int mt7988_tops_uart1_1_funcs[] = { 3, 3 };
+
+static const int mt7988_uart0_pins[] = { 55, 56 };
+static int mt7988_uart0_funcs[] = { 1, 1 };
+
+static const int mt7988_tops_uart0_2_pins[] = { 55, 56 };
+static int mt7988_tops_uart0_2_funcs[] = { 2, 2 };
+
+static const int mt7988_uart2_2_pins[] = { 50, 51, 52, 53 };
+static int mt7988_uart2_2_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart1_1_pins[] = { 58, 59, 60, 61 };
+static int mt7988_uart1_1_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart2_3_pins[] = { 58, 59, 60, 61 };
+static int mt7988_uart2_3_funcs[] = { 3, 3, 3, 3 };
+
+static const int mt7988_uart1_2_pins[] = { 80, 81, 82, 83 };
+static int mt7988_uart1_2_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_uart1_2_lite_pins[] = { 80, 81 };
+static int mt7988_uart1_2_lite_funcs[] = { 1, 1 };
+
+static const int mt7988_tops_uart1_2_pins[] = { 80, 81 };
+static int mt7988_tops_uart1_2_funcs[] = { 4, 4, };
+
+static const int mt7988_net_wo0_uart_txd_1_pins[] = { 80 };
+static int mt7988_net_wo0_uart_txd_1_funcs[] = { 3 };
+
+static const int mt7988_net_wo1_uart_txd_1_pins[] = { 81 };
+static int mt7988_net_wo1_uart_txd_1_funcs[] = { 3 };
+
+static const int mt7988_net_wo2_uart_txd_1_pins[] = { 82 };
+static int mt7988_net_wo2_uart_txd_1_funcs[] = { 3 };
+
+/* udi */
+static const int mt7988_udi_pins[] = { 32, 33, 34, 35, 36 };
+static int mt7988_udi_funcs[] = { 4, 4, 4, 4, 4 };
+
+/* i2s */
+static const int mt7988_i2s_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_i2s_funcs[] = { 1, 1, 1, 1, 1 };
+
+/* pcm */
+static const int mt7988_pcm_pins[] = { 50, 51, 52, 53 };
+static int mt7988_pcm_funcs[] = { 1, 1, 1, 1 };
+
+/* led */
+static const int mt7988_gbe0_led1_pins[] = { 58 };
+static int mt7988_gbe0_led1_funcs[] = { 6 };
+static const int mt7988_gbe1_led1_pins[] = { 59 };
+static int mt7988_gbe1_led1_funcs[] = { 6 };
+static const int mt7988_gbe2_led1_pins[] = { 60 };
+static int mt7988_gbe2_led1_funcs[] = { 6 };
+static const int mt7988_gbe3_led1_pins[] = { 61 };
+static int mt7988_gbe3_led1_funcs[] = { 6 };
+
+static const int mt7988_2p5gbe_led1_pins[] = { 62 };
+static int mt7988_2p5gbe_led1_funcs[] = { 6 };
+
+static const int mt7988_gbe0_led0_pins[] = { 64 };
+static int mt7988_gbe0_led0_funcs[] = { 1 };
+static const int mt7988_gbe1_led0_pins[] = { 65 };
+static int mt7988_gbe1_led0_funcs[] = { 1 };
+static const int mt7988_gbe2_led0_pins[] = { 66 };
+static int mt7988_gbe2_led0_funcs[] = { 1 };
+static const int mt7988_gbe3_led0_pins[] = { 67 };
+static int mt7988_gbe3_led0_funcs[] = { 1 };
+
+static const int mt7988_2p5gbe_led0_pins[] = { 68 };
+static int mt7988_2p5gbe_led0_funcs[] = { 1 };
+
+/* usb */
+static const int mt7988_drv_vbus_p1_pins[] = { 63 };
+static int mt7988_drv_vbus_p1_funcs[] = { 1 };
+
+static const int mt7988_drv_vbus_pins[] = { 79 };
+static int mt7988_drv_vbus_funcs[] = { 1 };
+
+static const struct group_desc mt7988_groups[] = {
+ /* @GPIO(0,1,2,3): uart2 */
+ PINCTRL_PIN_GROUP("uart2", mt7988_uart2),
+ /* @GPIO(0,1,2,3,4): tops_jtag0_0 */
+ PINCTRL_PIN_GROUP("tops_jtag0_0", mt7988_tops_jtag0_0),
+ /* @GPIO(2,3): int_usxgmii */
+ PINCTRL_PIN_GROUP("int_usxgmii", mt7988_int_usxgmii),
+ /* @GPIO(0,1,2,3,4): dfd */
+ PINCTRL_PIN_GROUP("dfd", mt7988_dfd),
+ /* @GPIO(0,1): xfi_phy0_i2c0 */
+ PINCTRL_PIN_GROUP("xfi_phy0_i2c0", mt7988_xfi_phy0_i2c0),
+ /* @GPIO(0,1): xfi_phy1_i2c0 */
+ PINCTRL_PIN_GROUP("xfi_phy1_i2c0", mt7988_xfi_phy1_i2c0),
+ /* @GPIO(3,4): xfi_phy_pll_i2c0 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c0", mt7988_xfi_phy_pll_i2c0),
+ /* @GPIO(3,4): xfi_phy_pll_i2c1 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c1", mt7988_xfi_phy_pll_i2c1),
+ /* @GPIO(4): pwm7 */
+ PINCTRL_PIN_GROUP("pwm7_0", mt7988_pwm7_0),
+ /* @GPIO(5,6) i2c0_0 */
+ PINCTRL_PIN_GROUP("i2c0_0", mt7988_i2c0_0),
+ /* @GPIO(5,6) i2c1_sfp */
+ PINCTRL_PIN_GROUP("i2c1_sfp", mt7988_i2c1_sfp),
+ /* @GPIO(5,6) xfi_pextp_phy0_i2c */
+ PINCTRL_PIN_GROUP("xfi_pextp_phy0_i2c", mt7988_xfi_pextp_phy0_i2c),
+ /* @GPIO(5,6) xfi_pextp_phy1_i2c */
+ PINCTRL_PIN_GROUP("xfi_pextp_phy1_i2c", mt7988_xfi_pextp_phy1_i2c),
+ /* @GPIO(5,6) mdc_mdio0 */
+ PINCTRL_PIN_GROUP("mdc_mdio0", mt7988_mdc_mdio0),
+ /* @GPIO(7): pcie_wake_n0_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n0_0", mt7988_pcie_wake_n0_0),
+ /* @GPIO(8): pcie_clk_req_n0_0 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n0_0", mt7988_pcie_clk_req_n0_0),
+ /* @GPIO(9): pcie_wake_n3_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n3_0", mt7988_pcie_wake_n3_0),
+ /* @GPIO(10): pcie_clk_req_n3 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n3", mt7988_pcie_clk_req_n3),
+ /* @GPIO(10): pcie_clk_req_n0_1 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n0_1", mt7988_pcie_clk_req_n0_1),
+ /* @GPIO(7,8) pcie_p0_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p0_phy_i2c", mt7988_pcie_p0_phy_i2c),
+ /* @GPIO(7,8) pcie_p1_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p1_phy_i2c", mt7988_pcie_p1_phy_i2c),
+ /* @GPIO(7,8) pcie_p2_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p2_phy_i2c", mt7988_pcie_p2_phy_i2c),
+ /* @GPIO(9,10) pcie_p3_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p3_phy_i2c", mt7988_pcie_p3_phy_i2c),
+ /* @GPIO(9,10) ckm_phy_i2c */
+ PINCTRL_PIN_GROUP("ckm_phy_i2c", mt7988_ckm_phy_i2c),
+ /* @GPIO(11): pmic */
+ PINCTRL_PIN_GROUP("pcie_pmic", mt7988_pmic),
+ /* @GPIO(12): watchdog */
+ PINCTRL_PIN_GROUP("watchdog", mt7988_watchdog),
+ /* @GPIO(13): pcie_wake_n0_1 */
+ PINCTRL_PIN_GROUP("pcie_wake_n0_1", mt7988_pcie_wake_n0_1),
+ /* @GPIO(14): pcie_wake_n3_1 */
+ PINCTRL_PIN_GROUP("pcie_wake_n3_1", mt7988_pcie_wake_n3_1),
+ /* @GPIO(15,16) i2c0_1 */
+ PINCTRL_PIN_GROUP("i2c0_1", mt7988_i2c0_1),
+ /* @GPIO(15,16) u30_phy_i2c0 */
+ PINCTRL_PIN_GROUP("u30_phy_i2c0", mt7988_u30_phy_i2c0),
+ /* @GPIO(15,16) u32_phy_i2c0 */
+ PINCTRL_PIN_GROUP("u32_phy_i2c0", mt7988_u32_phy_i2c0),
+ /* @GPIO(15,16) xfi_phy0_i2c1 */
+ PINCTRL_PIN_GROUP("xfi_phy0_i2c1", mt7988_xfi_phy0_i2c1),
+ /* @GPIO(15,16) xfi_phy1_i2c1 */
+ PINCTRL_PIN_GROUP("xfi_phy1_i2c1", mt7988_xfi_phy1_i2c1),
+ /* @GPIO(15,16) xfi_phy_pll_i2c2 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c2", mt7988_xfi_phy_pll_i2c2),
+ /* @GPIO(17,18) i2c1_0 */
+ PINCTRL_PIN_GROUP("i2c1_0", mt7988_i2c1_0),
+ /* @GPIO(17,18) u30_phy_i2c1 */
+ PINCTRL_PIN_GROUP("u30_phy_i2c1", mt7988_u30_phy_i2c1),
+ /* @GPIO(17,18) u32_phy_i2c1 */
+ PINCTRL_PIN_GROUP("u32_phy_i2c1", mt7988_u32_phy_i2c1),
+ /* @GPIO(17,18) xfi_phy_pll_i2c3 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c3", mt7988_xfi_phy_pll_i2c3),
+ /* @GPIO(17,18) sgmii0_i2c */
+ PINCTRL_PIN_GROUP("sgmii0_i2c", mt7988_sgmii0_i2c),
+ /* @GPIO(17,18) sgmii1_i2c */
+ PINCTRL_PIN_GROUP("sgmii1_i2c", mt7988_sgmii1_i2c),
+ /* @GPIO(19): pcie_2l_0_pereset */
+ PINCTRL_PIN_GROUP("pcie_2l_0_pereset", mt7988_pcie_2l_0_pereset),
+ /* @GPIO(20): pcie_1l_1_pereset */
+ PINCTRL_PIN_GROUP("pcie_1l_1_pereset", mt7988_pcie_1l_1_pereset),
+ /* @GPIO(21): pwm1 */
+ PINCTRL_PIN_GROUP("pwm1", mt7988_pwm1),
+ /* @GPIO(22,23) spi0_wp_hold */
+ PINCTRL_PIN_GROUP("spi0_wp_hold", mt7988_spi0_wp_hold),
+ /* @GPIO(24,25,26,27) spi0 */
+ PINCTRL_PIN_GROUP("spi0", mt7988_spi0),
+ /* @GPIO(28,29,30,31) spi1 */
+ PINCTRL_PIN_GROUP("spi1", mt7988_spi1),
+ /* @GPIO(32,33,34,35) spi2 */
+ PINCTRL_PIN_GROUP("spi2", mt7988_spi2),
+ /* @GPIO(36,37) spi2_wp_hold */
+ PINCTRL_PIN_GROUP("spi2_wp_hold", mt7988_spi2_wp_hold),
+ /* @GPIO(22,23,24,25,26,27) snfi */
+ PINCTRL_PIN_GROUP("snfi", mt7988_snfi),
+ /* @GPIO(22,23) tops_uart0_0 */
+ PINCTRL_PIN_GROUP("tops_uart0_0", mt7988_tops_uart0_0),
+ /* @GPIO(28,29,30,31) uart2_0 */
+ PINCTRL_PIN_GROUP("uart2_0", mt7988_uart2_0),
+ /* @GPIO(32,33,34,35) uart1_0 */
+ PINCTRL_PIN_GROUP("uart1_0", mt7988_uart1_0),
+ /* @GPIO(32,33,34,35) uart2_1 */
+ PINCTRL_PIN_GROUP("uart2_1", mt7988_uart2_1),
+ /* @GPIO(28) net_wo0_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_0", mt7988_net_wo0_uart_txd_0),
+ /* @GPIO(29) net_wo1_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo1_uart_txd_0", mt7988_net_wo1_uart_txd_0),
+ /* @GPIO(30) net_wo2_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo2_uart_txd_0", mt7988_net_wo2_uart_txd_0),
+ /* @GPIO(28,29) tops_uart1_0 */
+ PINCTRL_PIN_GROUP("tops_uart0_0", mt7988_tops_uart1_0),
+ /* @GPIO(30,31) tops_uart0_1 */
+ PINCTRL_PIN_GROUP("tops_uart0_1", mt7988_tops_uart0_1),
+ /* @GPIO(36,37) tops_uart1_1 */
+ PINCTRL_PIN_GROUP("tops_uart1_1", mt7988_tops_uart1_1),
+ /* @GPIO(32,33,34,35,36) udi */
+ PINCTRL_PIN_GROUP("udi", mt7988_udi),
+ /* @GPIO(21,28,29,30,31,32,33,34,35,36,37) emmc_45 */
+ PINCTRL_PIN_GROUP("emmc_45", mt7988_emmc_45),
+ /* @GPIO(32,33,34,35,36,37) sdcard */
+ PINCTRL_PIN_GROUP("sdcard", mt7988_sdcard),
+ /* @GPIO(38,39,40,41,42,43,44,45,46,47,48,49) emmc_51 */
+ PINCTRL_PIN_GROUP("emmc_51", mt7988_emmc_51),
+ /* @GPIO(28,29) 2p5g_ext_mdio */
+ PINCTRL_PIN_GROUP("2p5g_ext_mdio", mt7988_2p5g_ext_mdio),
+ /* @GPIO(30,31) gbe_ext_mdio */
+ PINCTRL_PIN_GROUP("gbe_ext_mdio", mt7988_gbe_ext_mdio),
+ /* @GPIO(50,51,52,53,54) i2s */
+ PINCTRL_PIN_GROUP("i2s", mt7988_i2s),
+ /* @GPIO(50,51,52,53) pcm */
+ PINCTRL_PIN_GROUP("pcm", mt7988_pcm),
+ /* @GPIO(55,56) uart0 */
+ PINCTRL_PIN_GROUP("uart0", mt7988_uart0),
+ /* @GPIO(55,56) tops_uart0_2 */
+ PINCTRL_PIN_GROUP("tops_uart0_2", mt7988_tops_uart0_2),
+ /* @GPIO(50,51,52,53) uart2_2 */
+ PINCTRL_PIN_GROUP("uart2_2", mt7988_uart2_2),
+ /* @GPIO(50,51,52,53,54) wo0_jtag */
+ PINCTRL_PIN_GROUP("wo0_jtag", mt7988_wo0_jtag),
+ /* @GPIO(50,51,52,53,54) wo1-wo1_jtag */
+ PINCTRL_PIN_GROUP("wo1_jtag", mt7988_wo1_jtag),
+ /* @GPIO(50,51,52,53,54) wo2_jtag */
+ PINCTRL_PIN_GROUP("wo2_jtag", mt7988_wo2_jtag),
+ /* @GPIO(57) pwm0 */
+ PINCTRL_PIN_GROUP("pwm0", mt7988_pwm0),
+ /* @GPIO(58) pwm2_0 */
+ PINCTRL_PIN_GROUP("pwm2_0", mt7988_pwm2_0),
+ /* @GPIO(59) pwm3_0 */
+ PINCTRL_PIN_GROUP("pwm3_0", mt7988_pwm3_0),
+ /* @GPIO(60) pwm4_0 */
+ PINCTRL_PIN_GROUP("pwm4_0", mt7988_pwm4_0),
+ /* @GPIO(61) pwm5_0 */
+ PINCTRL_PIN_GROUP("pwm5_0", mt7988_pwm5_0),
+ /* @GPIO(58,59,60,61,62) jtag */
+ PINCTRL_PIN_GROUP("jtag", mt7988_jtag),
+ /* @GPIO(58,59,60,61,62) tops_jtag0_1 */
+ PINCTRL_PIN_GROUP("tops_jtag0_1", mt7988_tops_jtag0_1),
+ /* @GPIO(58,59,60,61) uart2_3 */
+ PINCTRL_PIN_GROUP("uart2_3", mt7988_uart2_3),
+ /* @GPIO(58,59,60,61) uart1_1 */
+ PINCTRL_PIN_GROUP("uart1_1", mt7988_uart1_1),
+ /* @GPIO(58,59,60,61) gbe_led1 */
+ PINCTRL_PIN_GROUP("gbe0_led1", mt7988_gbe0_led1),
+ PINCTRL_PIN_GROUP("gbe1_led1", mt7988_gbe1_led1),
+ PINCTRL_PIN_GROUP("gbe2_led1", mt7988_gbe2_led1),
+ PINCTRL_PIN_GROUP("gbe3_led1", mt7988_gbe3_led1),
+ /* @GPIO(62) pwm6_0 */
+ PINCTRL_PIN_GROUP("pwm6_0", mt7988_pwm6_0),
+ /* @GPIO(62) 2p5gbe_led1 */
+ PINCTRL_PIN_GROUP("2p5gbe_led1", mt7988_2p5gbe_led1),
+ /* @GPIO(64,65,66,67) gbe_led0 */
+ PINCTRL_PIN_GROUP("gbe0_led0", mt7988_gbe0_led0),
+ PINCTRL_PIN_GROUP("gbe1_led0", mt7988_gbe1_led0),
+ PINCTRL_PIN_GROUP("gbe2_led0", mt7988_gbe2_led0),
+ PINCTRL_PIN_GROUP("gbe3_led0", mt7988_gbe3_led0),
+ /* @GPIO(68) 2p5gbe_led0 */
+ PINCTRL_PIN_GROUP("2p5gbe_led0", mt7988_2p5gbe_led0),
+ /* @GPIO(63) drv_vbus_p1 */
+ PINCTRL_PIN_GROUP("drv_vbus_p1", mt7988_drv_vbus_p1),
+ /* @GPIO(63) pcie_clk_req_n2_1 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n2_1", mt7988_pcie_clk_req_n2_1),
+ /* @GPIO(69, 70) mdc_mdio1 */
+ PINCTRL_PIN_GROUP("mdc_mdio1", mt7988_mdc_mdio1),
+ /* @GPIO(69, 70) i2c1_2 */
+ PINCTRL_PIN_GROUP("i2c1_2", mt7988_i2c1_2),
+ /* @GPIO(69) pwm6 */
+ PINCTRL_PIN_GROUP("pwm6", mt7988_pwm6),
+ /* @GPIO(70) pwm7 */
+ PINCTRL_PIN_GROUP("pwm7", mt7988_pwm7),
+ /* @GPIO(69,70) i2c2_0 */
+ PINCTRL_PIN_GROUP("i2c2_0", mt7988_i2c2_0),
+ /* @GPIO(71,72) i2c2_1 */
+ PINCTRL_PIN_GROUP("i2c2_1", mt7988_i2c2_1),
+ /* @GPIO(73) pcie_2l_1_pereset */
+ PINCTRL_PIN_GROUP("pcie_2l_1_pereset", mt7988_pcie_2l_1_pereset),
+ /* @GPIO(74) pcie_1l_0_pereset */
+ PINCTRL_PIN_GROUP("pcie_1l_0_pereset", mt7988_pcie_1l_0_pereset),
+ /* @GPIO(75) pcie_wake_n1_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n1_0", mt7988_pcie_wake_n1_0),
+ /* @GPIO(76) pcie_clk_req_n1 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n1", mt7988_pcie_clk_req_n1),
+ /* @GPIO(77) pcie_wake_n2_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n2_0", mt7988_pcie_wake_n2_0),
+ /* @GPIO(78) pcie_clk_req_n2_0 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n2_0", mt7988_pcie_clk_req_n2_0),
+ /* @GPIO(79) drv_vbus */
+ PINCTRL_PIN_GROUP("drv_vbus", mt7988_drv_vbus),
+ /* @GPIO(79) pcie_wake_n2_1 */
+ PINCTRL_PIN_GROUP("pcie_wake_n2_1", mt7988_pcie_wake_n2_1),
+ /* @GPIO(80,81,82,83) uart1_2 */
+ PINCTRL_PIN_GROUP("uart1_2", mt7988_uart1_2),
+ /* @GPIO(80,81) uart1_2_lite */
+ PINCTRL_PIN_GROUP("uart1_2_lite", mt7988_uart1_2_lite),
+ /* @GPIO(80) pwm2 */
+ PINCTRL_PIN_GROUP("pwm2", mt7988_pwm2),
+ /* @GPIO(81) pwm3 */
+ PINCTRL_PIN_GROUP("pwm3", mt7988_pwm3),
+ /* @GPIO(82) pwm4 */
+ PINCTRL_PIN_GROUP("pwm4", mt7988_pwm4),
+ /* @GPIO(83) pwm5 */
+ PINCTRL_PIN_GROUP("pwm5", mt7988_pwm5),
+ /* @GPIO(80) net_wo0_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_0", mt7988_net_wo0_uart_txd_0),
+ /* @GPIO(81) net_wo1_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo1_uart_txd_0", mt7988_net_wo1_uart_txd_0),
+ /* @GPIO(82) net_wo2_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo2_uart_txd_0", mt7988_net_wo2_uart_txd_0),
+ /* @GPIO(80,81) tops_uart1_2 */
+ PINCTRL_PIN_GROUP("tops_uart1_2", mt7988_tops_uart1_2),
+ /* @GPIO(80) net_wo0_uart_txd_1 */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_1", mt7988_net_wo0_uart_txd_1),
+ /* @GPIO(81) net_wo1_uart_txd_1 */
+ PINCTRL_PIN_GROUP("net_wo1_uart_txd_1", mt7988_net_wo1_uart_txd_1),
+ /* @GPIO(82) net_wo2_uart_txd_1 */
+ PINCTRL_PIN_GROUP("net_wo2_uart_txd_1", mt7988_net_wo2_uart_txd_1),
+};
+
+/* Joint those groups owning the same capability in user point of view which
+ * allows that people tend to use through the device tree.
+ */
+static const char * const mt7988_jtag_groups[] = {
+ "tops_jtag0_0", "wo0_jtag", "wo1_jtag",
+ "wo2_jtag", "jtag", "tops_jtag0_1",
+};
+static const char * const mt7988_int_usxgmii_groups[] = {
+ "int_usxgmii",
+};
+static const char * const mt7988_pwm_groups[] = {
+ "pwm0", "pwm1", "pwm2", "pwm2_0", "pwm3", "pwm3_0", "pwm4", "pwm4_0",
+ "pwm5", "pwm5_0", "pwm6", "pwm6_0", "pwm7", "pwm7_0",
+
+};
+static const char * const mt7988_dfd_groups[] = {
+ "dfd",
+};
+static const char * const mt7988_i2c_groups[] = {
+ "xfi_phy0_i2c0",
+ "xfi_phy1_i2c0",
+ "xfi_phy_pll_i2c0",
+ "xfi_phy_pll_i2c1",
+ "i2c0_0",
+ "i2c1_sfp",
+ "xfi_pextp_phy0_i2c",
+ "xfi_pextp_phy1_i2c",
+ "i2c0_1",
+ "u30_phy_i2c0",
+ "u32_phy_i2c0",
+ "xfi_phy0_i2c1",
+ "xfi_phy1_i2c1",
+ "xfi_phy_pll_i2c2",
+ "i2c1_0",
+ "u30_phy_i2c1",
+ "u32_phy_i2c1",
+ "xfi_phy_pll_i2c3",
+ "sgmii0_i2c",
+ "sgmii1_i2c",
+ "i2c1_2",
+ "i2c2_0",
+ "i2c2_1",
+};
+static const char * const mt7988_ethernet_groups[] = {
+ "mdc_mdio0",
+ "2p5g_ext_mdio",
+ "gbe_ext_mdio",
+ "mdc_mdio1",
+};
+static const char * const mt7988_pcie_groups[] = {
+ "pcie_wake_n0_0", "pcie_clk_req_n0_0", "pcie_wake_n3_0",
+ "pcie_clk_req_n3", "pcie_p0_phy_i2c", "pcie_p1_phy_i2c",
+ "pcie_p3_phy_i2c", "pcie_p2_phy_i2c", "ckm_phy_i2c",
+ "pcie_wake_n0_1", "pcie_wake_n3_1", "pcie_2l_0_pereset",
+ "pcie_1l_1_pereset", "pcie_clk_req_n2_1", "pcie_2l_1_pereset",
+ "pcie_1l_0_pereset", "pcie_wake_n1_0", "pcie_clk_req_n1",
+ "pcie_wake_n2_0", "pcie_clk_req_n2_0", "pcie_wake_n2_1",
+ "pcie_clk_req_n0_1"
+};
+static const char * const mt7988_pmic_groups[] = {
+ "pmic",
+};
+static const char * const mt7988_wdt_groups[] = {
+ "watchdog",
+};
+static const char * const mt7988_spi_groups[] = {
+ "spi0", "spi0_wp_hold", "spi1", "spi2", "spi2_wp_hold",
+};
+static const char * const mt7988_flash_groups[] = { "emmc_45", "sdcard", "snfi",
+ "emmc_51" };
+static const char * const mt7988_uart_groups[] = {
+ "uart2",
+ "tops_uart0_0",
+ "uart2_0",
+ "uart1_0",
+ "uart2_1",
+ "net_wo0_uart_txd_0",
+ "net_wo1_uart_txd_0",
+ "net_wo2_uart_txd_0",
+ "tops_uart1_0",
+ "ops_uart0_1",
+ "ops_uart1_1",
+ "uart0",
+ "tops_uart0_2",
+ "uart1_1",
+ "uart2_3",
+ "uart1_2",
+ "uart1_2_lite",
+ "tops_uart1_2",
+ "net_wo0_uart_txd_1",
+ "net_wo1_uart_txd_1",
+ "net_wo2_uart_txd_1",
+};
+static const char * const mt7988_udi_groups[] = {
+ "udi",
+};
+static const char * const mt7988_audio_groups[] = {
+ "i2s", "pcm",
+};
+static const char * const mt7988_led_groups[] = {
+ "gbe0_led1", "gbe1_led1", "gbe2_led1", "gbe3_led1", "2p5gbe_led1",
+ "gbe0_led0", "gbe1_led0", "gbe2_led0", "gbe3_led0", "2p5gbe_led0",
+ "wf5g_led0", "wf5g_led1",
+};
+static const char * const mt7988_usb_groups[] = {
+ "drv_vbus",
+ "drv_vbus_p1",
+};
+
+static const struct function_desc mt7988_functions[] = {
+ { { "audio", mt7988_audio_groups, ARRAY_SIZE(mt7988_audio_groups) },
+ NULL },
+ { { "jtag", mt7988_jtag_groups, ARRAY_SIZE(mt7988_jtag_groups) },
+ NULL },
+ { { "int_usxgmii", mt7988_int_usxgmii_groups,
+ ARRAY_SIZE(mt7988_int_usxgmii_groups) },
+ NULL },
+ { { "pwm", mt7988_pwm_groups, ARRAY_SIZE(mt7988_pwm_groups) }, NULL },
+ { { "dfd", mt7988_dfd_groups, ARRAY_SIZE(mt7988_dfd_groups) }, NULL },
+ { { "i2c", mt7988_i2c_groups, ARRAY_SIZE(mt7988_i2c_groups) }, NULL },
+ { { "eth", mt7988_ethernet_groups, ARRAY_SIZE(mt7988_ethernet_groups) },
+ NULL },
+ { { "pcie", mt7988_pcie_groups, ARRAY_SIZE(mt7988_pcie_groups) },
+ NULL },
+ { { "pmic", mt7988_pmic_groups, ARRAY_SIZE(mt7988_pmic_groups) },
+ NULL },
+ { { "watchdog", mt7988_wdt_groups, ARRAY_SIZE(mt7988_wdt_groups) },
+ NULL },
+ { { "spi", mt7988_spi_groups, ARRAY_SIZE(mt7988_spi_groups) }, NULL },
+ { { "flash", mt7988_flash_groups, ARRAY_SIZE(mt7988_flash_groups) },
+ NULL },
+ { { "uart", mt7988_uart_groups, ARRAY_SIZE(mt7988_uart_groups) },
+ NULL },
+ { { "udi", mt7988_udi_groups, ARRAY_SIZE(mt7988_udi_groups) }, NULL },
+ { { "usb", mt7988_usb_groups, ARRAY_SIZE(mt7988_usb_groups) }, NULL },
+ { { "led", mt7988_led_groups, ARRAY_SIZE(mt7988_led_groups) }, NULL },
+};
+
+static const struct mtk_eint_hw mt7988_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = ARRAY_SIZE(mt7988_pins),
+ .db_cnt = 16,
+};
+
+static const char * const mt7988_pinctrl_register_base_names[] = {
+ "gpio", "iocfg_tr", "iocfg_br",
+ "iocfg_rb", "iocfg_lb", "iocfg_tl",
+};
+
+static const struct mtk_pin_soc mt7988_data = {
+ .reg_cal = mt7988_reg_cals,
+ .pins = mt7988_pins,
+ .npins = ARRAY_SIZE(mt7988_pins),
+ .grps = mt7988_groups,
+ .ngrps = ARRAY_SIZE(mt7988_groups),
+ .funcs = mt7988_functions,
+ .nfuncs = ARRAY_SIZE(mt7988_functions),
+ .eint_hw = &mt7988_eint_hw,
+ .gpio_m = 0,
+ .ies_present = false,
+ .base_names = mt7988_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt7988_pinctrl_register_base_names),
+ .bias_disable_set = mtk_pinconf_bias_disable_set,
+ .bias_disable_get = mtk_pinconf_bias_disable_get,
+ .bias_set = mtk_pinconf_bias_set,
+ .bias_get = mtk_pinconf_bias_get,
+ .pull_type = mt7988_pull_type,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_pull_get = mtk_pinconf_adv_pull_get,
+ .adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+static const struct of_device_id mt7988_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt7988-pinctrl" },
+ {}
+};
+
+static int mt7988_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_moore_pinctrl_probe(pdev, &mt7988_data);
+}
+
+static struct platform_driver mt7988_pinctrl_driver = {
+ .driver = {
+ .name = "mt7988-pinctrl",
+ .of_match_table = mt7988_pinctrl_of_match,
+ },
+ .probe = mt7988_pinctrl_probe,
+};
+
+static int __init mt7988_pinctrl_init(void)
+{
+ return platform_driver_register(&mt7988_pinctrl_driver);
+}
+arch_initcall(mt7988_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 54301fbba524..00e95682b9f8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get_rev1);
*/
static int mtk_pinconf_bias_set_pu_pd(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
- u32 pullup, u32 arg)
+ u32 pullup, u32 arg, bool pd_only)
{
int err, pu, pd;
@@ -587,18 +587,16 @@ static int mtk_pinconf_bias_set_pu_pd(struct mtk_pinctrl *hw,
pu = 0;
pd = 1;
} else {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, pu);
- if (err)
- goto out;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, pd);
+ if (!pd_only) {
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, pu);
+ if (err)
+ return err;
+ }
-out:
- return err;
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, pd);
}
static int mtk_pinconf_bias_set_pullsel_pullen(struct mtk_pinctrl *hw,
@@ -737,7 +735,7 @@ static int mtk_pinconf_bias_set_pu_pd_rsel(struct mtk_pinctrl *hw,
return err;
}
- return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable);
+ return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable, false);
}
int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
@@ -758,8 +756,14 @@ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
return 0;
}
+ if (try_all_type & MTK_PULL_PD_TYPE) {
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg, true);
+ if (!err)
+ return err;
+ }
+
if (try_all_type & MTK_PULL_PU_PD_TYPE) {
- err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg);
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg, false);
if (!err)
return 0;
}
@@ -878,6 +882,29 @@ out:
return err;
}
+static int mtk_pinconf_bias_get_pd(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err, pd;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &pd);
+ if (err)
+ goto out;
+
+ if (pd == 0) {
+ *pullup = 0;
+ *enable = MTK_DISABLE;
+ } else if (pd == 1) {
+ *pullup = 0;
+ *enable = MTK_ENABLE;
+ } else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
static int mtk_pinconf_bias_get_pullsel_pullen(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
u32 *pullup, u32 *enable)
@@ -947,6 +974,12 @@ int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
return 0;
}
+ if (try_all_type & MTK_PULL_PD_TYPE) {
+ err = mtk_pinconf_bias_get_pd(hw, desc, pullup, enable);
+ if (!err)
+ return err;
+ }
+
if (try_all_type & MTK_PULL_PU_PD_TYPE) {
err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable);
if (!err)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index 23688ca6d04e..9c271dc2b521 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -24,6 +24,7 @@
* turned on/off itself. But it can't be selected pull up/down
*/
#define MTK_PULL_RSEL_TYPE BIT(3)
+#define MTK_PULL_PD_TYPE BIT(4)
/* MTK_PULL_PU_PD_RSEL_TYPE is a type which is controlled by
* MTK_PULL_PU_PD_TYPE and MTK_PULL_RSEL_TYPE.
*/
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 4ce2e35a6373..8cd4ba5cf0bd 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -22,6 +22,7 @@
#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/mfd/abx500.h>
@@ -496,7 +497,7 @@ static void abx500_gpio_dbg_show_one(struct seq_file *s,
seq_printf(s, " %-9s", pull_up_down[pd]);
} else
- seq_printf(s, " %-9s", chip->get(chip, offset) ? "hi" : "lo");
+ seq_printf(s, " %-9s", str_hi_lo(chip->get(chip, offset)));
mode = abx500_get_mode(pctldev, chip, offset);
@@ -865,7 +866,7 @@ static int abx500_pin_config_set(struct pinctrl_dev *pctldev,
pin, configs[i],
(param == PIN_CONFIG_OUTPUT) ? "output " : "input",
(param == PIN_CONFIG_OUTPUT) ?
- (argument ? "high" : "low") :
+ str_high_low(argument) :
(argument ? "pull up" : "pull down"));
/* on ABx500, there is no GPIO0, so adjust the offset */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index f4f10c60c1d2..8940e04fcf4c 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -28,6 +28,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
/* Since we request GPIOs from ourself */
@@ -438,9 +439,9 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
* - Any spurious wake up event during switch sequence to be ignored and
* cleared
*/
-static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+static int nmk_gpio_glitch_slpm_init(unsigned int *slpm)
{
- int i;
+ int i, j, ret;
for (i = 0; i < NMK_MAX_BANKS; i++) {
struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
@@ -449,11 +450,21 @@ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
if (!chip)
break;
- clk_enable(chip->clk);
+ ret = clk_enable(chip->clk);
+ if (ret) {
+ for (j = 0; j < i; j++) {
+ chip = nmk_gpio_chips[j];
+ clk_disable(chip->clk);
+ }
+
+ return ret;
+ }
slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
writel(temp, chip->addr + NMK_GPIO_SLPC);
}
+
+ return 0;
}
static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
@@ -923,7 +934,9 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
slpm[nmk_chip->bank] &= ~BIT(bit);
}
- nmk_gpio_glitch_slpm_init(slpm);
+ ret = nmk_gpio_glitch_slpm_init(slpm);
+ if (ret)
+ goto out_pre_slpm_init;
}
for (i = 0; i < g->grp.npins; i++) {
@@ -940,7 +953,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
dev_dbg(npct->dev, "setting pin %d to altsetting %d\n",
g->grp.pins[i], g->altsetting);
- clk_enable(nmk_chip->clk);
+ ret = clk_enable(nmk_chip->clk);
+ if (ret)
+ goto out_glitch;
+
/*
* If the pin is switching to altfunc, and there was an
* interrupt installed on it which has been lazy disabled,
@@ -988,6 +1004,7 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
unsigned int bit;
+ int ret;
if (!range) {
dev_err(npct->dev, "invalid range\n");
@@ -1004,7 +1021,9 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
find_nmk_gpio_from_pin(pin, &bit);
- clk_enable(nmk_chip->clk);
+ ret = clk_enable(nmk_chip->clk);
+ if (ret)
+ return ret;
/* There is no glitch when converting any pin to GPIO */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
clk_disable(nmk_chip->clk);
@@ -1058,6 +1077,7 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long cfg;
int pull, slpm, output, val, i;
bool lowemi, gpiomode, sleep;
+ int ret;
nmk_chip = find_nmk_gpio_from_pin(pin, &bit);
if (!nmk_chip) {
@@ -1106,17 +1126,19 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
slpm_pull ? pullnames[pull] : "same",
slpm_output ? (output ? "output" : "input")
: "same",
- slpm_val ? (val ? "high" : "low") : "same");
+ slpm_val ? str_high_low(val) : "same");
}
dev_dbg(nmk_chip->chip.parent,
"pin %d [%#lx]: pull %s, slpm %s (%s%s), lowemi %s\n",
pin, cfg, pullnames[pull], slpmnames[slpm],
output ? "output " : "input",
- output ? (val ? "high" : "low") : "",
- lowemi ? "on" : "off");
+ output ? str_high_low(val) : "",
+ str_on_off(lowemi));
- clk_enable(nmk_chip->clk);
+ ret = clk_enable(nmk_chip->clk);
+ if (ret)
+ return ret;
if (gpiomode)
/* No glitch when going to GPIO mode */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index fff6d4209ad5..1d7fdcdec4c8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -30,6 +30,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include "core.h"
@@ -458,7 +459,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
if (err)
dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n",
- on ? "enable" : "disable");
+ str_enable_disable(on));
return 0;
}
@@ -908,12 +909,13 @@ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
return false;
}
-static int amd_gpio_suspend(struct device *dev)
+static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
int i;
+ u32 wake_mask = is_suspend ? WAKE_SOURCE_SUSPEND : WAKE_SOURCE_HIBERNATE;
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
@@ -925,11 +927,11 @@ static int amd_gpio_suspend(struct device *dev)
gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
/* mask any interrupts not intended to be a wake source */
- if (!(gpio_dev->saved_regs[i] & WAKE_SOURCE)) {
+ if (!(gpio_dev->saved_regs[i] & wake_mask)) {
writel(gpio_dev->saved_regs[i] & ~BIT(INTERRUPT_MASK_OFF),
gpio_dev->base + pin * 4);
- pm_pr_dbg("Disabling GPIO #%d interrupt for suspend.\n",
- pin);
+ pm_pr_dbg("Disabling GPIO #%d interrupt for %s.\n",
+ pin, is_suspend ? "suspend" : "hibernate");
}
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -938,6 +940,16 @@ static int amd_gpio_suspend(struct device *dev)
return 0;
}
+static int amd_gpio_suspend(struct device *dev)
+{
+ return amd_gpio_suspend_hibernate_common(dev, true);
+}
+
+static int amd_gpio_hibernate(struct device *dev)
+{
+ return amd_gpio_suspend_hibernate_common(dev, false);
+}
+
static int amd_gpio_resume(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
@@ -961,8 +973,12 @@ static int amd_gpio_resume(struct device *dev)
}
static const struct dev_pm_ops amd_gpio_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend,
- amd_gpio_resume)
+ .suspend_late = amd_gpio_suspend,
+ .resume_early = amd_gpio_resume,
+ .freeze_late = amd_gpio_hibernate,
+ .thaw_early = amd_gpio_resume,
+ .poweroff_late = amd_gpio_hibernate,
+ .restore_early = amd_gpio_resume,
};
#endif
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 667be49c3f48..3a1e5bffaf6e 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -80,10 +80,9 @@
#define FUNCTION_MASK GENMASK(1, 0)
#define FUNCTION_INVALID GENMASK(7, 0)
-#define WAKE_SOURCE (BIT(WAKE_CNTRL_OFF_S0I3) | \
- BIT(WAKE_CNTRL_OFF_S3) | \
- BIT(WAKE_CNTRL_OFF_S4) | \
- BIT(WAKECNTRL_Z_OFF))
+#define WAKE_SOURCE_SUSPEND (BIT(WAKE_CNTRL_OFF_S0I3) | \
+ BIT(WAKE_CNTRL_OFF_S3))
+#define WAKE_SOURCE_HIBERNATE BIT(WAKE_CNTRL_OFF_S4)
struct amd_function {
const char *name;
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index 631612539af7..e9f61927858d 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -2237,7 +2238,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
"pin group %s could not be %s: "
"probably a hardware limitation\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
dev_err(pmx->dev,
"GLOBAL MISC CTRL before: %08x, after %08x, expected %08x\n",
before, after, expected);
@@ -2245,7 +2246,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
dev_dbg(pmx->dev,
"padgroup %s %s\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
}
}
@@ -2259,7 +2260,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
"pin group %s could not be %s: "
"probably a hardware limitation\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
dev_err(pmx->dev,
"GLOBAL MISC CTRL before: %08x, after %08x, expected %08x\n",
before, after, expected);
@@ -2267,7 +2268,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
dev_dbg(pmx->dev,
"padgroup %s %s\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
}
}
@@ -2588,7 +2589,7 @@ static int gemini_pmx_probe(struct platform_device *pdev)
tmp = val;
for_each_set_bit(i, &tmp, PADS_MAXBIT) {
dev_dbg(dev, "pad group %s %s\n", gemini_padgroups[i],
- (val & BIT(i)) ? "enabled" : "disabled");
+ str_enabled_disabled(val & BIT(i)));
}
/* Check if flash pin is set */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 31703737731b..bc7ee54e062b 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3699,7 +3699,7 @@ static void ingenic_gpio_irq_print_chip(struct irq_data *data, struct seq_file *
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
- seq_printf(p, "%s", gpio_chip->label);
+ seq_puts(p, gpio_chip->label);
}
static const struct irq_chip ingenic_gpio_irqchip = {
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 61532a7a612a..329d54b11529 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1777,7 +1777,7 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-static struct ocelot_match_data luton_desc = {
+static const struct ocelot_match_data luton_desc = {
.desc = {
.name = "luton-pinctrl",
.pins = luton_pins,
@@ -1788,7 +1788,7 @@ static struct ocelot_match_data luton_desc = {
},
};
-static struct ocelot_match_data serval_desc = {
+static const struct ocelot_match_data serval_desc = {
.desc = {
.name = "serval-pinctrl",
.pins = serval_pins,
@@ -1799,7 +1799,7 @@ static struct ocelot_match_data serval_desc = {
},
};
-static struct ocelot_match_data ocelot_desc = {
+static const struct ocelot_match_data ocelot_desc = {
.desc = {
.name = "ocelot-pinctrl",
.pins = ocelot_pins,
@@ -1810,7 +1810,7 @@ static struct ocelot_match_data ocelot_desc = {
},
};
-static struct ocelot_match_data jaguar2_desc = {
+static const struct ocelot_match_data jaguar2_desc = {
.desc = {
.name = "jaguar2-pinctrl",
.pins = jaguar2_pins,
@@ -1821,7 +1821,7 @@ static struct ocelot_match_data jaguar2_desc = {
},
};
-static struct ocelot_match_data servalt_desc = {
+static const struct ocelot_match_data servalt_desc = {
.desc = {
.name = "servalt-pinctrl",
.pins = servalt_pins,
@@ -1832,7 +1832,7 @@ static struct ocelot_match_data servalt_desc = {
},
};
-static struct ocelot_match_data sparx5_desc = {
+static const struct ocelot_match_data sparx5_desc = {
.desc = {
.name = "sparx5-pinctrl",
.pins = sparx5_pins,
@@ -1850,7 +1850,7 @@ static struct ocelot_match_data sparx5_desc = {
},
};
-static struct ocelot_match_data lan966x_desc = {
+static const struct ocelot_match_data lan966x_desc = {
.desc = {
.name = "lan966x-pinctrl",
.pins = lan966x_pins,
@@ -1867,7 +1867,7 @@ static struct ocelot_match_data lan966x_desc = {
},
};
-static struct ocelot_match_data lan969x_desc = {
+static const struct ocelot_match_data lan969x_desc = {
.desc = {
.name = "lan969x-pinctrl",
.pins = lan969x_pins,
@@ -2116,7 +2116,7 @@ static void ocelot_irq_ack(struct irq_data *data)
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
-static struct irq_chip ocelot_level_irqchip = {
+static const struct irq_chip ocelot_level_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
.irq_ack = ocelot_irq_ack,
@@ -2126,7 +2126,7 @@ static struct irq_chip ocelot_level_irqchip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS
};
-static struct irq_chip ocelot_irqchip = {
+static const struct irq_chip ocelot_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
.irq_ack = ocelot_irq_ack,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 36d4eaf0ebd1..15145882950f 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Pinctrl driver for Rockchip SoCs
- *
+ * Copyright (c) 2020-2024 Rockchip Electronics Co., Ltd.
* Copyright (c) 2013 MundoReader S.L.
* Author: Heiko Stuebner <heiko@sntech.de>
*
@@ -2003,6 +2003,151 @@ static int rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3562_DRV_BITS_PER_PIN 8
+#define RK3562_DRV_PINS_PER_REG 2
+#define RK3562_DRV_GPIO0_OFFSET 0x20070
+#define RK3562_DRV_GPIO1_OFFSET 0x200
+#define RK3562_DRV_GPIO2_OFFSET 0x240
+#define RK3562_DRV_GPIO3_OFFSET 0x10280
+#define RK3562_DRV_GPIO4_OFFSET 0x102C0
+
+static int rk3562_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ switch (bank->bank_num) {
+ case 0:
+ *reg = RK3562_DRV_GPIO0_OFFSET;
+ break;
+
+ case 1:
+ *reg = RK3562_DRV_GPIO1_OFFSET;
+ break;
+
+ case 2:
+ *reg = RK3562_DRV_GPIO2_OFFSET;
+ break;
+
+ case 3:
+ *reg = RK3562_DRV_GPIO3_OFFSET;
+ break;
+
+ case 4:
+ *reg = RK3562_DRV_GPIO4_OFFSET;
+ break;
+
+ default:
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+ break;
+ }
+
+ *reg += ((pin_num / RK3562_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3562_DRV_PINS_PER_REG;
+ *bit *= RK3562_DRV_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3562_PULL_BITS_PER_PIN 2
+#define RK3562_PULL_PINS_PER_REG 8
+#define RK3562_PULL_GPIO0_OFFSET 0x20020
+#define RK3562_PULL_GPIO1_OFFSET 0x80
+#define RK3562_PULL_GPIO2_OFFSET 0x90
+#define RK3562_PULL_GPIO3_OFFSET 0x100A0
+#define RK3562_PULL_GPIO4_OFFSET 0x100B0
+
+static int rk3562_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ switch (bank->bank_num) {
+ case 0:
+ *reg = RK3562_PULL_GPIO0_OFFSET;
+ break;
+
+ case 1:
+ *reg = RK3562_PULL_GPIO1_OFFSET;
+ break;
+
+ case 2:
+ *reg = RK3562_PULL_GPIO2_OFFSET;
+ break;
+
+ case 3:
+ *reg = RK3562_PULL_GPIO3_OFFSET;
+ break;
+
+ case 4:
+ *reg = RK3562_PULL_GPIO4_OFFSET;
+ break;
+
+ default:
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+ break;
+ }
+
+ *reg += ((pin_num / RK3562_PULL_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3562_PULL_PINS_PER_REG;
+ *bit *= RK3562_PULL_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3562_SMT_BITS_PER_PIN 2
+#define RK3562_SMT_PINS_PER_REG 8
+#define RK3562_SMT_GPIO0_OFFSET 0x20030
+#define RK3562_SMT_GPIO1_OFFSET 0xC0
+#define RK3562_SMT_GPIO2_OFFSET 0xD0
+#define RK3562_SMT_GPIO3_OFFSET 0x100E0
+#define RK3562_SMT_GPIO4_OFFSET 0x100F0
+
+static int rk3562_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ switch (bank->bank_num) {
+ case 0:
+ *reg = RK3562_SMT_GPIO0_OFFSET;
+ break;
+
+ case 1:
+ *reg = RK3562_SMT_GPIO1_OFFSET;
+ break;
+
+ case 2:
+ *reg = RK3562_SMT_GPIO2_OFFSET;
+ break;
+
+ case 3:
+ *reg = RK3562_SMT_GPIO3_OFFSET;
+ break;
+
+ case 4:
+ *reg = RK3562_SMT_GPIO4_OFFSET;
+ break;
+
+ default:
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+ break;
+ }
+
+ *reg += ((pin_num / RK3562_SMT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3562_SMT_PINS_PER_REG;
+ *bit *= RK3562_SMT_BITS_PER_PIN;
+
+ return 0;
+}
+
#define RK3568_PULL_PMU_OFFSET 0x20
#define RK3568_PULL_GRF_OFFSET 0x80
#define RK3568_PULL_BITS_PER_PIN 2
@@ -2495,7 +2640,8 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3588_DRV_BITS_PER_PIN;
ret = strength;
goto config;
- } else if (ctrl->type == RK3568) {
+ } else if (ctrl->type == RK3562 ||
+ ctrl->type == RK3568) {
rmask_bits = RK3568_DRV_BITS_PER_PIN;
ret = (1 << (strength + 1)) - 1;
goto config;
@@ -2639,6 +2785,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3328:
case RK3368:
case RK3399:
+ case RK3562:
case RK3568:
case RK3576:
case RK3588:
@@ -2699,6 +2846,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3328:
case RK3368:
case RK3399:
+ case RK3562:
case RK3568:
case RK3576:
case RK3588:
@@ -2810,6 +2958,7 @@ static int rockchip_get_schmitt(struct rockchip_pin_bank *bank, int pin_num)
data >>= bit;
switch (ctrl->type) {
+ case RK3562:
case RK3568:
return data & ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1);
default:
@@ -2839,6 +2988,7 @@ static int rockchip_set_schmitt(struct rockchip_pin_bank *bank,
/* enable the write to the equivalent lower bits */
switch (ctrl->type) {
+ case RK3562:
case RK3568:
data = ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1) << (bit + 16);
rmask = data | (data >> 16);
@@ -2965,6 +3115,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3328:
case RK3368:
case RK3399:
+ case RK3562:
case RK3568:
case RK3576:
case RK3588:
@@ -4086,6 +4237,49 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
.drv_calc_reg = rk3399_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3562_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS_OFFSET(0, 32, "gpio0",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x20000, 0x20008, 0x20010, 0x20018),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(1, 32, "gpio1",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0, 0x08, 0x10, 0x18),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(2, 32, "gpio2",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x20, 0, 0, 0),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(3, 32, "gpio3",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x10040, 0x10048, 0x10050, 0x10058),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(4, 16, "gpio4",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0,
+ 0,
+ 0x10060, 0x10068, 0, 0),
+};
+
+static struct rockchip_pin_ctrl rk3562_pin_ctrl __maybe_unused = {
+ .pin_banks = rk3562_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3562_pin_banks),
+ .label = "RK3562-GPIO",
+ .type = RK3562,
+ .pull_calc_reg = rk3562_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3562_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3562_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3568_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
@@ -4210,6 +4404,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3368_pin_ctrl },
{ .compatible = "rockchip,rk3399-pinctrl",
.data = &rk3399_pin_ctrl },
+ { .compatible = "rockchip,rk3562-pinctrl",
+ .data = &rk3562_pin_ctrl },
{ .compatible = "rockchip,rk3568-pinctrl",
.data = &rk3568_pin_ctrl },
{ .compatible = "rockchip,rk3576-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 6ebbb0a88ce7..87a20cec8e21 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2020-2021 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2020-2024 Rockchip Electronics Co., Ltd.
*
* Copyright (c) 2013 MundoReader S.L.
* Author: Heiko Stuebner <heiko@sntech.de>
@@ -196,6 +196,7 @@ enum rockchip_pinctrl_type {
RK3328,
RK3368,
RK3399,
+ RK3562,
RK3568,
RK3576,
RK3588,
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 521f6fef0b9f..aae01120dc52 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -380,7 +380,7 @@ static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "input %s ", str_high_low(val));
if (type)
seq_printf(s, "with internal pull-%s ",
- pupd ? "up" : "down");
+ str_up_down(pupd));
else
seq_printf(s, "%s ", pupd ? "floating" : "analog");
}
diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm
index 206226318e45..35f47660a56b 100644
--- a/drivers/pinctrl/qcom/Kconfig.msm
+++ b/drivers/pinctrl/qcom/Kconfig.msm
@@ -137,6 +137,12 @@ config PINCTRL_MSM8916
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_MSM8917
+ tristate "Qualcomm 8917 pin controller driver"
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8917 platform.
+
config PINCTRL_MSM8953
tristate "Qualcomm 8953 pin controller driver"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 9a23d41d801c..5c4100925cf9 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8909) += pinctrl-msm8909.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_MSM8917) += pinctrl-msm8917.o
obj-$(CONFIG_PINCTRL_MSM8953) += pinctrl-msm8953.o
obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5424.c b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
index 796299cd2e4e..0d610b076da3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5424.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
@@ -233,7 +233,10 @@ enum ipq5424_functions {
msm_mux_sdc_clk,
msm_mux_sdc_cmd,
msm_mux_sdc_data,
- msm_mux_spi0,
+ msm_mux_spi0_clk,
+ msm_mux_spi0_cs,
+ msm_mux_spi0_miso,
+ msm_mux_spi0_mosi,
msm_mux_spi1,
msm_mux_spi10,
msm_mux_spi11,
@@ -297,8 +300,8 @@ static const char * const qspi_clk_groups[] = {
"gpio5",
};
-static const char * const spi0_groups[] = {
- "gpio6", "gpio7", "gpio8", "gpio9",
+static const char * const spi0_clk_groups[] = {
+ "gpio6",
};
static const char * const pwm1_groups[] = {
@@ -315,14 +318,26 @@ static const char * const qdss_tracedata_a_groups[] = {
"gpio38", "gpio39",
};
+static const char * const spi0_cs_groups[] = {
+ "gpio7",
+};
+
static const char * const cri_trng1_groups[] = {
"gpio7",
};
+static const char * const spi0_miso_groups[] = {
+ "gpio8",
+};
+
static const char * const cri_trng2_groups[] = {
"gpio8",
};
+static const char * const spi0_mosi_groups[] = {
+ "gpio9",
+};
+
static const char * const cri_trng3_groups[] = {
"gpio9",
};
@@ -680,7 +695,10 @@ static const struct pinfunction ipq5424_functions[] = {
MSM_PIN_FUNCTION(sdc_clk),
MSM_PIN_FUNCTION(sdc_cmd),
MSM_PIN_FUNCTION(sdc_data),
- MSM_PIN_FUNCTION(spi0),
+ MSM_PIN_FUNCTION(spi0_clk),
+ MSM_PIN_FUNCTION(spi0_cs),
+ MSM_PIN_FUNCTION(spi0_miso),
+ MSM_PIN_FUNCTION(spi0_mosi),
MSM_PIN_FUNCTION(spi1),
MSM_PIN_FUNCTION(spi10),
MSM_PIN_FUNCTION(spi11),
@@ -700,10 +718,10 @@ static const struct msm_pingroup ipq5424_groups[] = {
PINGROUP(3, sdc_data, qspi_data, pwm2, _, _, _, _, _, _),
PINGROUP(4, sdc_cmd, qspi_cs, _, _, _, _, _, _, _),
PINGROUP(5, sdc_clk, qspi_clk, _, _, _, _, _, _, _),
- PINGROUP(6, spi0, pwm1, _, cri_trng0, qdss_tracedata_a, _, _, _, _),
- PINGROUP(7, spi0, pwm1, _, cri_trng1, qdss_tracedata_a, _, _, _, _),
- PINGROUP(8, spi0, pwm1, wci_txd, wci_rxd, _, cri_trng2, qdss_tracedata_a, _, _),
- PINGROUP(9, spi0, pwm1, _, cri_trng3, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(6, spi0_clk, pwm1, _, cri_trng0, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(7, spi0_cs, pwm1, _, cri_trng1, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(8, spi0_miso, pwm1, wci_txd, wci_rxd, _, cri_trng2, qdss_tracedata_a, _, _),
+ PINGROUP(9, spi0_mosi, pwm1, _, cri_trng3, qdss_tracedata_a, _, _, _, _),
PINGROUP(10, uart0, pwm0, spi11, _, wci_txd, wci_rxd, _, qdss_tracedata_a, _),
PINGROUP(11, uart0, pwm0, spi1, _, wci_txd, wci_rxd, _, qdss_tracedata_a, _),
PINGROUP(12, uart0, pwm0, spi11, _, prng_rosc0, qdss_tracedata_a, _, _, _),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index ec913c2e200f..47daa47153c9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -714,7 +715,7 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
}
seq_printf(s, " %-8s: %-3s", g->grp.name, is_out ? "out" : "in");
- seq_printf(s, " %-4s func%d", val ? "high" : "low", func);
+ seq_printf(s, " %-4s func%d", str_high_low(val), func);
seq_printf(s, " %dmA", msm_regval_to_drive(drive));
if (pctrl->soc->pull_no_keeper)
seq_printf(s, " %s", pulls_no_keeper[pull]);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
new file mode 100644
index 000000000000..cff137bb3b23
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -0,0 +1,1620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc msm8917_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "SDC1_CLK"),
+ PINCTRL_PIN(135, "SDC1_CMD"),
+ PINCTRL_PIN(136, "SDC1_DATA"),
+ PINCTRL_PIN(137, "SDC1_RCLK"),
+ PINCTRL_PIN(138, "SDC2_CLK"),
+ PINCTRL_PIN(139, "SDC2_CMD"),
+ PINCTRL_PIN(140, "SDC2_DATA"),
+ PINCTRL_PIN(141, "QDSD_CLK"),
+ PINCTRL_PIN(142, "QDSD_CMD"),
+ PINCTRL_PIN(143, "QDSD_DATA0"),
+ PINCTRL_PIN(144, "QDSD_DATA1"),
+ PINCTRL_PIN(145, "QDSD_DATA2"),
+ PINCTRL_PIN(146, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+
+static const unsigned int sdc1_clk_pins[] = { 134 };
+static const unsigned int sdc1_cmd_pins[] = { 135 };
+static const unsigned int sdc1_data_pins[] = { 136 };
+static const unsigned int sdc1_rclk_pins[] = { 137 };
+static const unsigned int sdc2_clk_pins[] = { 138 };
+static const unsigned int sdc2_cmd_pins[] = { 139 };
+static const unsigned int sdc2_data_pins[] = { 140 };
+static const unsigned int qdsd_clk_pins[] = { 141 };
+static const unsigned int qdsd_cmd_pins[] = { 142 };
+static const unsigned int qdsd_data0_pins[] = { 143 };
+static const unsigned int qdsd_data1_pins[] = { 144 };
+static const unsigned int qdsd_data2_pins[] = { 145 };
+static const unsigned int qdsd_data3_pins[] = { 146 };
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .grp = PINCTRL_PINGROUP("gpio" #id, \
+ gpio##id##_pins, \
+ ARRAY_SIZE(gpio##id##_pins)), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = 0x1000 * id, \
+ .io_reg = 0x4 + 0x1000 * id, \
+ .intr_cfg_reg = 0x8 + 0x1000 * id, \
+ .intr_status_reg = 0xc + 0x1000 * id, \
+ .intr_target_reg = 0x8 + 0x1000 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+enum msm8917_functions {
+ msm_mux_accel_int,
+ msm_mux_adsp_ext,
+ msm_mux_alsp_int,
+ msm_mux_atest_bbrx0,
+ msm_mux_atest_bbrx1,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_combodac_to_gpio_native,
+ msm_mux_atest_gpsadc_dtest0_native,
+ msm_mux_atest_gpsadc_dtest1_native,
+ msm_mux_atest_tsens,
+ msm_mux_atest_wlan0,
+ msm_mux_atest_wlan1,
+ msm_mux_audio_ref,
+ msm_mux_audio_reset,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp6_spi,
+ msm_mux_blsp8_spi,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_i2c7,
+ msm_mux_blsp_i2c8,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_spi7,
+ msm_mux_blsp_spi8,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart3,
+ msm_mux_blsp_uart4,
+ msm_mux_blsp_uart5,
+ msm_mux_blsp_uart6,
+ msm_mux_blsp_uart7,
+ msm_mux_blsp_uart8,
+ msm_mux_cam0_ldo,
+ msm_mux_cam1_rst,
+ msm_mux_cam1_standby,
+ msm_mux_cam2_rst,
+ msm_mux_cam2_standby,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cdc_pdm0,
+ msm_mux_codec_int1,
+ msm_mux_codec_int2,
+ msm_mux_codec_mad,
+ msm_mux_coex_uart,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_dmic0_clk,
+ msm_mux_dmic0_data,
+ msm_mux_ebi_cdc,
+ msm_mux_ebi_ch0,
+ msm_mux_ext_lpass,
+ msm_mux_forced_usb,
+ msm_mux_fp_gpio,
+ msm_mux_fp_int,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_gcc_plltest,
+ msm_mux_gcc_tlmm,
+ msm_mux_gpio,
+ msm_mux_gsm0_tx,
+ msm_mux_key_focus,
+ msm_mux_key_snapshot,
+ msm_mux_key_volp,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_lpass_slimbus,
+ msm_mux_lpass_slimbus0,
+ msm_mux_lpass_slimbus1,
+ msm_mux_m_voc,
+ msm_mux_mag_int,
+ msm_mux_mdp_vsync,
+ msm_mux_mipi_dsi0,
+ msm_mux_modem_tsync,
+ msm_mux_nav_pps,
+ msm_mux_nav_pps_in_a,
+ msm_mux_nav_pps_in_b,
+ msm_mux_nav_tsync,
+ msm_mux_nfc_pwr,
+ msm_mux_ov_ldo,
+ msm_mux_pa_indicator,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pbs2,
+ msm_mux_pri_mi2s,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_ws,
+ msm_mux_prng_rosc,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_sd_write,
+ msm_mux_sdcard_det,
+ msm_mux_sec_mi2s,
+ msm_mux_sec_mi2s_mclk_a,
+ msm_mux_sec_mi2s_mclk_b,
+ msm_mux_sensor_rst,
+ msm_mux_smb_int,
+ msm_mux_ssbi_wtr1,
+ msm_mux_ts_resout,
+ msm_mux_ts_sample,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim_batt,
+ msm_mux_us_emitter,
+ msm_mux_us_euro,
+ msm_mux_wcss_bt,
+ msm_mux_wcss_fm,
+ msm_mux_wcss_wlan,
+ msm_mux_wcss_wlan0,
+ msm_mux_wcss_wlan1,
+ msm_mux_wcss_wlan2,
+ msm_mux_webcam_rst,
+ msm_mux_webcam_standby,
+ msm_mux_wsa_io,
+ msm_mux_wsa_irq,
+ msm_mux__,
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio0", "gpio1", "gpio6", "gpio7", "gpio12", "gpio13", "gpio23",
+ "gpio42", "gpio43", "gpio44", "gpio47", "gpio66", "gpio86", "gpio87",
+ "gpio88", "gpio92",
+};
+
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133",
+};
+
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const adsp_ext_groups[] = {
+ "gpio1",
+};
+
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio2",
+};
+
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio2",
+};
+
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const pbs0_groups[] = {
+ "gpio8",
+};
+
+static const char * const pbs1_groups[] = {
+ "gpio9",
+};
+
+static const char * const pwr_modem_enabled_b_groups[] = {
+ "gpio9",
+};
+
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio10",
+};
+
+static const char * const ldo_update_groups[] = {
+ "gpio4",
+};
+
+static const char * const atest_combodac_to_gpio_native_groups[] = {
+ "gpio4", "gpio12", "gpio13", "gpio20", "gpio21", "gpio28", "gpio29",
+ "gpio30", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43", "gpio44",
+ "gpio45", "gpio46", "gpio47", "gpio48", "gpio67", "gpio115",
+};
+
+static const char * const ldo_en_groups[] = {
+ "gpio5",
+};
+
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio6",
+};
+
+static const char * const pbs2_groups[] = {
+ "gpio7",
+};
+
+static const char * const atest_gpsadc_dtest0_native_groups[] = {
+ "gpio7",
+};
+
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio11",
+};
+
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const blsp_uart4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const sec_mi2s_groups[] = {
+ "gpio12", "gpio13", "gpio94", "gpio95",
+};
+
+static const char * const pwr_nav_enabled_b_groups[] = {
+ "gpio12",
+};
+
+static const char * const codec_mad_groups[] = {
+ "gpio13",
+};
+
+static const char * const pwr_crypto_enabled_b_groups[] = {
+ "gpio13",
+};
+
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const blsp_uart5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio16",
+};
+
+static const char * const atest_bbrx1_groups[] = {
+ "gpio16",
+};
+
+static const char * const m_voc_groups[] = {
+ "gpio17", "gpio21",
+};
+
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio17",
+};
+
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio21",
+};
+
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio22",
+};
+
+static const char * const atest_wlan0_groups[] = {
+ "gpio22",
+};
+
+static const char * const atest_bbrx0_groups[] = {
+ "gpio17",
+};
+
+static const char * const blsp_i2c5_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio18",
+};
+
+static const char * const atest_gpsadc_dtest1_native_groups[] = {
+ "gpio18",
+};
+
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio19", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio33", "gpio34", "gpio35", "gpio36", "gpio38", "gpio39",
+ "gpio40", "gpio50",
+};
+
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio20",
+};
+
+static const char * const atest_wlan1_groups[] = {
+ "gpio23",
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio24", "gpio25",
+};
+
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+
+static const char * const sec_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio26", "gpio27", "gpio28",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+
+static const char * const pwr_modem_enabled_a_groups[] = {
+ "gpio29",
+};
+
+static const char * const cci_timer0_groups[] = {
+ "gpio33",
+};
+
+static const char * const cci_timer1_groups[] = {
+ "gpio34",
+};
+
+static const char * const cam1_standby_groups[] = {
+ "gpio35",
+};
+
+static const char * const pwr_nav_enabled_a_groups[] = {
+ "gpio35",
+};
+
+static const char * const cam1_rst_groups[] = {
+ "gpio36",
+};
+
+static const char * const pwr_crypto_enabled_a_groups[] = {
+ "gpio36",
+};
+
+static const char * const forced_usb_groups[] = {
+ "gpio37",
+};
+
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio37",
+};
+
+static const char * const cam2_rst_groups[] = {
+ "gpio38",
+};
+
+static const char * const webcam_standby_groups[] = {
+ "gpio39",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio39",
+};
+
+static const char * const webcam_rst_groups[] = {
+ "gpio40",
+};
+
+static const char * const ov_ldo_groups[] = {
+ "gpio41",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio41",
+};
+
+static const char * const accel_int_groups[] = {
+ "gpio42",
+};
+
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio42",
+};
+
+static const char * const alsp_int_groups[] = {
+ "gpio43",
+};
+
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio43",
+};
+
+static const char * const mag_int_groups[] = {
+ "gpio44",
+};
+
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio44",
+};
+
+static const char * const blsp6_spi_groups[] = {
+ "gpio47",
+};
+
+static const char * const fp_int_groups[] = {
+ "gpio48",
+};
+
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio48",
+};
+
+static const char * const uim_batt_groups[] = {
+ "gpio49",
+};
+
+static const char * const cam2_standby_groups[] = {
+ "gpio50",
+};
+
+static const char * const uim1_data_groups[] = {
+ "gpio51",
+};
+
+static const char * const uim1_clk_groups[] = {
+ "gpio52",
+};
+
+static const char * const uim1_reset_groups[] = {
+ "gpio53",
+};
+
+static const char * const uim1_present_groups[] = {
+ "gpio54",
+};
+
+static const char * const uim2_data_groups[] = {
+ "gpio55",
+};
+
+static const char * const uim2_clk_groups[] = {
+ "gpio56",
+};
+
+static const char * const uim2_reset_groups[] = {
+ "gpio57",
+};
+
+static const char * const uim2_present_groups[] = {
+ "gpio58",
+};
+
+static const char * const sensor_rst_groups[] = {
+ "gpio59",
+};
+
+static const char * const mipi_dsi0_groups[] = {
+ "gpio60",
+};
+
+static const char * const smb_int_groups[] = {
+ "gpio61",
+};
+
+static const char * const cam0_ldo_groups[] = {
+ "gpio62",
+};
+
+static const char * const us_euro_groups[] = {
+ "gpio63",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio63",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio63",
+};
+
+static const char * const bimc_dte0_groups[] = {
+ "gpio63", "gpio65",
+};
+
+static const char * const ts_resout_groups[] = {
+ "gpio64",
+};
+
+static const char * const ts_sample_groups[] = {
+ "gpio65",
+};
+
+static const char * const sec_mi2s_mclk_b_groups[] = {
+ "gpio66",
+};
+
+static const char * const pri_mi2s_groups[] = {
+ "gpio66", "gpio85", "gpio86", "gpio88", "gpio94", "gpio95",
+};
+
+static const char * const sdcard_det_groups[] = {
+ "gpio67",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio67",
+};
+
+static const char * const ebi_cdc_groups[] = {
+ "gpio67", "gpio69", "gpio118", "gpio119", "gpio120", "gpio123",
+};
+
+static const char * const audio_reset_groups[] = {
+ "gpio68",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio68",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio69",
+};
+
+static const char * const cdc_pdm0_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74",
+};
+
+static const char * const pri_mi2s_mclk_b_groups[] = {
+ "gpio69",
+};
+
+static const char * const lpass_slimbus_groups[] = {
+ "gpio70",
+};
+
+static const char * const lpass_slimbus0_groups[] = {
+ "gpio71",
+};
+
+static const char * const lpass_slimbus1_groups[] = {
+ "gpio72",
+};
+
+static const char * const codec_int1_groups[] = {
+ "gpio73",
+};
+
+static const char * const codec_int2_groups[] = {
+ "gpio74",
+};
+
+static const char * const wcss_bt_groups[] = {
+ "gpio75", "gpio83", "gpio84",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio75",
+};
+
+static const char * const ebi_ch0_groups[] = {
+ "gpio75",
+};
+
+static const char * const wcss_wlan2_groups[] = {
+ "gpio76",
+};
+
+static const char * const wcss_wlan1_groups[] = {
+ "gpio77",
+};
+
+static const char * const wcss_wlan0_groups[] = {
+ "gpio78",
+};
+
+static const char * const wcss_wlan_groups[] = {
+ "gpio79", "gpio80",
+};
+
+static const char * const wcss_fm_groups[] = {
+ "gpio81", "gpio82",
+};
+
+static const char * const ext_lpass_groups[] = {
+ "gpio81",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio82",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio83",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio84",
+};
+
+static const char * const blsp_spi7_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+
+static const char * const blsp_uart7_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio87",
+};
+
+static const char * const blsp_i2c7_groups[] = {
+ "gpio87", "gpio88",
+};
+
+static const char * const gcc_tlmm_groups[] = {
+ "gpio87",
+};
+
+static const char * const dmic0_clk_groups[] = {
+ "gpio89",
+};
+
+static const char * const dmic0_data_groups[] = {
+ "gpio90",
+};
+
+static const char * const key_volp_groups[] = {
+ "gpio91",
+};
+
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio91",
+};
+
+static const char * const us_emitter_groups[] = {
+ "gpio92",
+};
+
+static const char * const wsa_irq_groups[] = {
+ "gpio93",
+};
+
+static const char * const wsa_io_groups[] = {
+ "gpio94", "gpio95",
+};
+
+static const char * const blsp_spi8_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const blsp_uart8_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const blsp_i2c8_groups[] = {
+ "gpio98", "gpio99",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio98", "gpio99",
+};
+
+static const char * const nav_pps_in_a_groups[] = {
+ "gpio115",
+};
+
+static const char * const pa_indicator_groups[] = {
+ "gpio116",
+};
+
+static const char * const modem_tsync_groups[] = {
+ "gpio117",
+};
+
+static const char * const nav_tsync_groups[] = {
+ "gpio117",
+};
+
+static const char * const nav_pps_in_b_groups[] = {
+ "gpio117",
+};
+
+static const char * const nav_pps_groups[] = {
+ "gpio117",
+};
+
+static const char * const gsm0_tx_groups[] = {
+ "gpio119",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio120",
+};
+
+static const char * const atest_tsens_groups[] = {
+ "gpio120",
+};
+
+static const char * const bimc_dte1_groups[] = {
+ "gpio121", "gpio122",
+};
+
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio122", "gpio123",
+};
+
+static const char * const fp_gpio_groups[] = {
+ "gpio124",
+};
+
+static const char * const coex_uart_groups[] = {
+ "gpio124", "gpio127",
+};
+
+static const char * const key_snapshot_groups[] = {
+ "gpio127",
+};
+
+static const char * const key_focus_groups[] = {
+ "gpio128",
+};
+
+static const char * const nfc_pwr_groups[] = {
+ "gpio129",
+};
+
+static const char * const blsp8_spi_groups[] = {
+ "gpio130",
+};
+
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio132",
+};
+
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio133",
+};
+
+static const struct pinfunction msm8917_functions[] = {
+ MSM_PIN_FUNCTION(accel_int),
+ MSM_PIN_FUNCTION(adsp_ext),
+ MSM_PIN_FUNCTION(alsp_int),
+ MSM_PIN_FUNCTION(atest_bbrx0),
+ MSM_PIN_FUNCTION(atest_bbrx1),
+ MSM_PIN_FUNCTION(atest_char),
+ MSM_PIN_FUNCTION(atest_char0),
+ MSM_PIN_FUNCTION(atest_char1),
+ MSM_PIN_FUNCTION(atest_char2),
+ MSM_PIN_FUNCTION(atest_char3),
+ MSM_PIN_FUNCTION(atest_combodac_to_gpio_native),
+ MSM_PIN_FUNCTION(atest_gpsadc_dtest0_native),
+ MSM_PIN_FUNCTION(atest_gpsadc_dtest1_native),
+ MSM_PIN_FUNCTION(atest_tsens),
+ MSM_PIN_FUNCTION(atest_wlan0),
+ MSM_PIN_FUNCTION(atest_wlan1),
+ MSM_PIN_FUNCTION(audio_ref),
+ MSM_PIN_FUNCTION(audio_reset),
+ MSM_PIN_FUNCTION(bimc_dte0),
+ MSM_PIN_FUNCTION(bimc_dte1),
+ MSM_PIN_FUNCTION(blsp6_spi),
+ MSM_PIN_FUNCTION(blsp8_spi),
+ MSM_PIN_FUNCTION(blsp_i2c1),
+ MSM_PIN_FUNCTION(blsp_i2c2),
+ MSM_PIN_FUNCTION(blsp_i2c3),
+ MSM_PIN_FUNCTION(blsp_i2c4),
+ MSM_PIN_FUNCTION(blsp_i2c5),
+ MSM_PIN_FUNCTION(blsp_i2c6),
+ MSM_PIN_FUNCTION(blsp_i2c7),
+ MSM_PIN_FUNCTION(blsp_i2c8),
+ MSM_PIN_FUNCTION(blsp_spi1),
+ MSM_PIN_FUNCTION(blsp_spi2),
+ MSM_PIN_FUNCTION(blsp_spi3),
+ MSM_PIN_FUNCTION(blsp_spi4),
+ MSM_PIN_FUNCTION(blsp_spi5),
+ MSM_PIN_FUNCTION(blsp_spi6),
+ MSM_PIN_FUNCTION(blsp_spi7),
+ MSM_PIN_FUNCTION(blsp_spi8),
+ MSM_PIN_FUNCTION(blsp_uart1),
+ MSM_PIN_FUNCTION(blsp_uart2),
+ MSM_PIN_FUNCTION(blsp_uart3),
+ MSM_PIN_FUNCTION(blsp_uart4),
+ MSM_PIN_FUNCTION(blsp_uart5),
+ MSM_PIN_FUNCTION(blsp_uart6),
+ MSM_PIN_FUNCTION(blsp_uart7),
+ MSM_PIN_FUNCTION(blsp_uart8),
+ MSM_PIN_FUNCTION(cam0_ldo),
+ MSM_PIN_FUNCTION(cam1_rst),
+ MSM_PIN_FUNCTION(cam1_standby),
+ MSM_PIN_FUNCTION(cam2_rst),
+ MSM_PIN_FUNCTION(cam2_standby),
+ MSM_PIN_FUNCTION(cam_mclk),
+ MSM_PIN_FUNCTION(cci_async),
+ MSM_PIN_FUNCTION(cci_i2c),
+ MSM_PIN_FUNCTION(cci_timer0),
+ MSM_PIN_FUNCTION(cci_timer1),
+ MSM_PIN_FUNCTION(cdc_pdm0),
+ MSM_PIN_FUNCTION(codec_int1),
+ MSM_PIN_FUNCTION(codec_int2),
+ MSM_PIN_FUNCTION(codec_mad),
+ MSM_PIN_FUNCTION(coex_uart),
+ MSM_PIN_FUNCTION(cri_trng),
+ MSM_PIN_FUNCTION(cri_trng0),
+ MSM_PIN_FUNCTION(cri_trng1),
+ MSM_PIN_FUNCTION(dbg_out),
+ MSM_PIN_FUNCTION(dmic0_clk),
+ MSM_PIN_FUNCTION(dmic0_data),
+ MSM_PIN_FUNCTION(ebi_cdc),
+ MSM_PIN_FUNCTION(ebi_ch0),
+ MSM_PIN_FUNCTION(ext_lpass),
+ MSM_PIN_FUNCTION(forced_usb),
+ MSM_PIN_FUNCTION(fp_gpio),
+ MSM_PIN_FUNCTION(fp_int),
+ MSM_PIN_FUNCTION(gcc_gp1_clk_a),
+ MSM_PIN_FUNCTION(gcc_gp1_clk_b),
+ MSM_PIN_FUNCTION(gcc_gp2_clk_a),
+ MSM_PIN_FUNCTION(gcc_gp2_clk_b),
+ MSM_PIN_FUNCTION(gcc_gp3_clk_a),
+ MSM_PIN_FUNCTION(gcc_gp3_clk_b),
+ MSM_PIN_FUNCTION(gcc_plltest),
+ MSM_PIN_FUNCTION(gcc_tlmm),
+ MSM_PIN_FUNCTION(gpio),
+ MSM_PIN_FUNCTION(gsm0_tx),
+ MSM_PIN_FUNCTION(key_focus),
+ MSM_PIN_FUNCTION(key_snapshot),
+ MSM_PIN_FUNCTION(key_volp),
+ MSM_PIN_FUNCTION(ldo_en),
+ MSM_PIN_FUNCTION(ldo_update),
+ MSM_PIN_FUNCTION(lpass_slimbus),
+ MSM_PIN_FUNCTION(lpass_slimbus0),
+ MSM_PIN_FUNCTION(lpass_slimbus1),
+ MSM_PIN_FUNCTION(m_voc),
+ MSM_PIN_FUNCTION(mag_int),
+ MSM_PIN_FUNCTION(mdp_vsync),
+ MSM_PIN_FUNCTION(mipi_dsi0),
+ MSM_PIN_FUNCTION(modem_tsync),
+ MSM_PIN_FUNCTION(nav_pps),
+ MSM_PIN_FUNCTION(nav_pps_in_a),
+ MSM_PIN_FUNCTION(nav_pps_in_b),
+ MSM_PIN_FUNCTION(nav_tsync),
+ MSM_PIN_FUNCTION(nfc_pwr),
+ MSM_PIN_FUNCTION(ov_ldo),
+ MSM_PIN_FUNCTION(pa_indicator),
+ MSM_PIN_FUNCTION(pbs0),
+ MSM_PIN_FUNCTION(pbs1),
+ MSM_PIN_FUNCTION(pbs2),
+ MSM_PIN_FUNCTION(pri_mi2s),
+ MSM_PIN_FUNCTION(pri_mi2s_mclk_a),
+ MSM_PIN_FUNCTION(pri_mi2s_mclk_b),
+ MSM_PIN_FUNCTION(pri_mi2s_ws),
+ MSM_PIN_FUNCTION(prng_rosc),
+ MSM_PIN_FUNCTION(pwr_crypto_enabled_a),
+ MSM_PIN_FUNCTION(pwr_crypto_enabled_b),
+ MSM_PIN_FUNCTION(pwr_modem_enabled_a),
+ MSM_PIN_FUNCTION(pwr_modem_enabled_b),
+ MSM_PIN_FUNCTION(pwr_nav_enabled_a),
+ MSM_PIN_FUNCTION(pwr_nav_enabled_b),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_a0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_a1),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_b0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_b1),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_a0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_a1),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_b0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_b1),
+ MSM_PIN_FUNCTION(qdss_traceclk_a),
+ MSM_PIN_FUNCTION(qdss_traceclk_b),
+ MSM_PIN_FUNCTION(qdss_tracectl_a),
+ MSM_PIN_FUNCTION(qdss_tracectl_b),
+ MSM_PIN_FUNCTION(qdss_tracedata_a),
+ MSM_PIN_FUNCTION(qdss_tracedata_b),
+ MSM_PIN_FUNCTION(sd_write),
+ MSM_PIN_FUNCTION(sdcard_det),
+ MSM_PIN_FUNCTION(sec_mi2s),
+ MSM_PIN_FUNCTION(sec_mi2s_mclk_a),
+ MSM_PIN_FUNCTION(sec_mi2s_mclk_b),
+ MSM_PIN_FUNCTION(sensor_rst),
+ MSM_PIN_FUNCTION(smb_int),
+ MSM_PIN_FUNCTION(ssbi_wtr1),
+ MSM_PIN_FUNCTION(ts_resout),
+ MSM_PIN_FUNCTION(ts_sample),
+ MSM_PIN_FUNCTION(uim1_clk),
+ MSM_PIN_FUNCTION(uim1_data),
+ MSM_PIN_FUNCTION(uim1_present),
+ MSM_PIN_FUNCTION(uim1_reset),
+ MSM_PIN_FUNCTION(uim2_clk),
+ MSM_PIN_FUNCTION(uim2_data),
+ MSM_PIN_FUNCTION(uim2_present),
+ MSM_PIN_FUNCTION(uim2_reset),
+ MSM_PIN_FUNCTION(uim_batt),
+ MSM_PIN_FUNCTION(us_emitter),
+ MSM_PIN_FUNCTION(us_euro),
+ MSM_PIN_FUNCTION(wcss_bt),
+ MSM_PIN_FUNCTION(wcss_fm),
+ MSM_PIN_FUNCTION(wcss_wlan),
+ MSM_PIN_FUNCTION(wcss_wlan0),
+ MSM_PIN_FUNCTION(wcss_wlan1),
+ MSM_PIN_FUNCTION(wcss_wlan2),
+ MSM_PIN_FUNCTION(webcam_rst),
+ MSM_PIN_FUNCTION(webcam_standby),
+ MSM_PIN_FUNCTION(wsa_io),
+ MSM_PIN_FUNCTION(wsa_irq),
+};
+
+static const struct msm_pingroup msm8917_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, qdss_tracedata_b, _, _, _, _,
+ _, _),
+ PINGROUP(1, blsp_spi1, blsp_uart1, adsp_ext, _, _, _, _, _,
+ qdss_tracedata_b),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, prng_rosc, _, _, _,
+ _, _),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, _),
+ PINGROUP(4, blsp_spi2, blsp_uart2, ldo_update, _,
+ atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(5, blsp_spi2, blsp_uart2, ldo_en, _, _, _, _, _, _),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, gcc_gp1_clk_b,
+ qdss_tracedata_b, _, _, _, _),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, pbs2, _,
+ qdss_tracedata_b, _, atest_gpsadc_dtest0_native, _),
+ PINGROUP(8, blsp_spi3, blsp_uart3, pbs0, _, _, _, _, _, _),
+ PINGROUP(9, blsp_spi3, blsp_uart3, pbs1, pwr_modem_enabled_b, _, _,
+ _, _, _),
+ PINGROUP(10, blsp_spi3, blsp_uart3, blsp_i2c3, gcc_gp2_clk_b, _, _,
+ _, _, _),
+ PINGROUP(11, blsp_spi3, blsp_uart3, blsp_i2c3, gcc_gp3_clk_b, _, _,
+ _, _, _),
+ PINGROUP(12, blsp_spi4, blsp_uart4, sec_mi2s, pwr_nav_enabled_b, _,
+ _, _, _, _),
+ PINGROUP(13, blsp_spi4, blsp_uart4, sec_mi2s, pwr_crypto_enabled_b, _,
+ _, _, _, _),
+ PINGROUP(14, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(15, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(16, blsp_spi5, blsp_uart5, _, _, _, _, qdss_traceclk_a,
+ _, atest_bbrx1),
+ PINGROUP(17, blsp_spi5, blsp_uart5, m_voc, qdss_cti_trig_in_a0, _,
+ atest_bbrx0, _, _, _),
+ PINGROUP(18, blsp_spi5, blsp_uart5, blsp_i2c5, qdss_tracectl_a, _,
+ atest_gpsadc_dtest1_native, _, _, _),
+ PINGROUP(19, blsp_spi5, blsp_uart5, blsp_i2c5, qdss_tracedata_a, _,
+ _, _, _, _),
+ PINGROUP(20, blsp_spi6, blsp_uart6, _, _, _, _, _, _,
+ qdss_tracectl_b),
+ PINGROUP(21, blsp_spi6, blsp_uart6, m_voc, _, _, _, _, _,
+ qdss_cti_trig_in_b0),
+ PINGROUP(22, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_traceclk_b, _,
+ atest_wlan0, _, _, _),
+ PINGROUP(23, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_tracedata_b, _,
+ atest_wlan1, _, _, _),
+ PINGROUP(24, mdp_vsync, _, _, _, _, _, _, _, _),
+ PINGROUP(25, mdp_vsync, pri_mi2s_mclk_a, sec_mi2s_mclk_a, _, _, _,
+ _, _, _),
+ PINGROUP(26, cam_mclk, _, _, _, _, _, qdss_tracedata_a, _, _),
+ PINGROUP(27, cam_mclk, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(28, cam_mclk, _, _, _, _, _, qdss_tracedata_a, _,
+ atest_combodac_to_gpio_native),
+ PINGROUP(29, cci_i2c, pwr_modem_enabled_a, _, _, _, _, _,
+ qdss_tracedata_a, _),
+ PINGROUP(30, cci_i2c, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(31, cci_i2c, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(32, cci_i2c, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(33, cci_timer0, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(34, cci_timer1, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(35, pwr_nav_enabled_a, _, _, _, _, _, _, _,
+ qdss_tracedata_a),
+ PINGROUP(36, pwr_crypto_enabled_a, _, _, _, _, _, _, _,
+ qdss_tracedata_a),
+ PINGROUP(37, _, _, _, _, _, qdss_cti_trig_out_b1, _, _, _),
+ PINGROUP(38, _, qdss_tracedata_a, _, _, _, _, _, _, _),
+ PINGROUP(39, cci_async, _, _, _, _, _, qdss_tracedata_a, _,
+ atest_combodac_to_gpio_native),
+ PINGROUP(40, _, _, _, _, qdss_tracedata_a, _,
+ atest_combodac_to_gpio_native, _, _),
+ PINGROUP(41, sd_write, _, _, _, _, _, _, _,
+ atest_combodac_to_gpio_native),
+ PINGROUP(42, gcc_gp1_clk_a, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(43, gcc_gp2_clk_a, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(44, gcc_gp3_clk_a, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(45, _, _, atest_combodac_to_gpio_native, _, _, _, _, _,
+ _),
+ PINGROUP(46, _, _, atest_combodac_to_gpio_native, _, _, _, _, _,
+ _),
+ PINGROUP(47, blsp6_spi, _, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(48, _, qdss_cti_trig_in_b1, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(49, uim_batt, _, _, _, _, _, _, _, _),
+ PINGROUP(50, qdss_tracedata_a, _, _, _, _, _, _, _, _),
+ PINGROUP(51, uim1_data, _, _, _, _, _, _, _, _),
+ PINGROUP(52, uim1_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(53, uim1_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(54, uim1_present, _, _, _, _, _, _, _, _),
+ PINGROUP(55, uim2_data, _, _, _, _, _, _, _, _),
+ PINGROUP(56, uim2_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(57, uim2_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(58, uim2_present, _, _, _, _, _, _, _, _),
+ PINGROUP(59, _, _, _, _, _, _, _, _, _),
+ PINGROUP(60, _, _, _, _, _, _, _, _, _),
+ PINGROUP(61, _, _, _, _, _, _, _, _, _),
+ PINGROUP(62, _, _, _, _, _, _, _, _, _),
+ PINGROUP(63, atest_char3, dbg_out, bimc_dte0, _, _, _, _, _, _),
+ PINGROUP(64, _, _, _, _, _, _, _, _, _),
+ PINGROUP(65, bimc_dte0, _, _, _, _, _, _, _, _),
+ PINGROUP(66, sec_mi2s_mclk_b, pri_mi2s, _, qdss_tracedata_b, _, _,
+ _, _, _),
+ PINGROUP(67, atest_char1, ebi_cdc, _, atest_combodac_to_gpio_native,
+ _, _, _, _, _),
+ PINGROUP(68, atest_char0, _, _, _, _, _, _, _, _),
+ PINGROUP(69, audio_ref, cdc_pdm0, pri_mi2s_mclk_b, ebi_cdc, _, _, _,
+ _, _),
+ PINGROUP(70, lpass_slimbus, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(71, lpass_slimbus0, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(72, lpass_slimbus1, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(73, cdc_pdm0, _, _, _, _, _, _, _, _),
+ PINGROUP(74, cdc_pdm0, _, _, _, _, _, _, _, _),
+ PINGROUP(75, wcss_bt, atest_char2, _, ebi_ch0, _, _, _, _, _),
+ PINGROUP(76, wcss_wlan2, _, _, _, _, _, _, _, _),
+ PINGROUP(77, wcss_wlan1, _, _, _, _, _, _, _, _),
+ PINGROUP(78, wcss_wlan0, _, _, _, _, _, _, _, _),
+ PINGROUP(79, wcss_wlan, _, _, _, _, _, _, _, _),
+ PINGROUP(80, wcss_wlan, _, _, _, _, _, _, _, _),
+ PINGROUP(81, wcss_fm, ext_lpass, _, _, _, _, _, _, _),
+ PINGROUP(82, wcss_fm, cri_trng, _, _, _, _, _, _, _),
+ PINGROUP(83, wcss_bt, cri_trng1, _, _, _, _, _, _, _),
+ PINGROUP(84, wcss_bt, cri_trng0, _, _, _, _, _, _, _),
+ PINGROUP(85, pri_mi2s, blsp_spi7, blsp_uart7, _, _, _, _, _, _),
+ PINGROUP(86, pri_mi2s, blsp_spi7, blsp_uart7, qdss_tracedata_b, _, _,
+ _, _, _),
+ PINGROUP(87, pri_mi2s_ws, blsp_spi7, blsp_uart7, blsp_i2c7,
+ qdss_tracedata_b, gcc_tlmm, _, _, _),
+ PINGROUP(88, pri_mi2s, blsp_spi7, blsp_uart7, blsp_i2c7, _, _, _,
+ _, _),
+ PINGROUP(89, dmic0_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(90, dmic0_data, _, _, _, _, _, _, _, _),
+ PINGROUP(91, _, _, _, _, _, qdss_cti_trig_in_a1, _, _, _),
+ PINGROUP(92, _, _, _, _, _, qdss_tracedata_b, _, _, _),
+ PINGROUP(93, _, _, _, _, _, _, _, _, _),
+ PINGROUP(94, wsa_io, sec_mi2s, pri_mi2s, _, _, _, _, _, _),
+ PINGROUP(95, wsa_io, sec_mi2s, pri_mi2s, _, _, _, _, _, _),
+ PINGROUP(96, blsp_spi8, blsp_uart8, _, _, _, _, _, _, _),
+ PINGROUP(97, blsp_spi8, blsp_uart8, _, _, _, _, _, _, _),
+ PINGROUP(98, blsp_spi8, blsp_uart8, blsp_i2c8, gcc_plltest, _, _, _,
+ _, _),
+ PINGROUP(99, blsp_spi8, blsp_uart8, blsp_i2c8, gcc_plltest, _, _, _,
+ _, _),
+ PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ PINGROUP(102, _, _, _, _, _, _, _, _, _),
+ PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ PINGROUP(106, _, _, _, _, _, _, _, _, _),
+ PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ PINGROUP(111, _, _, _, _, _, _, _, _, _),
+ PINGROUP(112, _, _, _, _, _, _, _, _, _),
+ PINGROUP(113, _, _, _, _, _, _, _, _, _),
+ PINGROUP(114, _, _, _, _, _, _, _, _, _),
+ PINGROUP(115, _, _, nav_pps_in_a, _, atest_combodac_to_gpio_native,
+ _, _, _, _),
+ PINGROUP(116, _, pa_indicator, _, _, _, _, _, _, _),
+ PINGROUP(117, _, modem_tsync, nav_tsync, nav_pps_in_b, nav_pps, _,
+ _, _, _),
+ PINGROUP(118, _, ebi_cdc, _, _, _, _, _, _, _),
+ PINGROUP(119, gsm0_tx, _, ebi_cdc, _, _, _, _, _, _),
+ PINGROUP(120, _, atest_char, ebi_cdc, _, atest_tsens, _, _, _, _),
+ PINGROUP(121, _, _, _, bimc_dte1, _, _, _, _, _),
+ PINGROUP(122, _, ssbi_wtr1, _, _, bimc_dte1, _, _, _, _),
+ PINGROUP(123, _, ssbi_wtr1, ebi_cdc, _, _, _, _, _, _),
+ PINGROUP(124, coex_uart, _, _, _, _, _, _, _, _),
+ PINGROUP(125, _, _, _, _, _, _, _, _, _),
+ PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ PINGROUP(127, coex_uart, _, _, _, _, _, _, _, _),
+ PINGROUP(128, _, _, _, _, _, _, _, _, _),
+ PINGROUP(129, _, _, _, _, _, _, _, _, _),
+ PINGROUP(130, blsp8_spi, _, _, _, _, _, _, _, _),
+ PINGROUP(131, _, _, _, _, _, _, _, _, _),
+ PINGROUP(132, qdss_cti_trig_out_a0, _, _, _, _, _, _, _, _),
+ PINGROUP(133, qdss_cti_trig_out_a1, _, _, _, _, _, _, _, _),
+ SDC_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_PINGROUP(sdc1_rclk, 0x10a000, 15, 0),
+ SDC_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_pinctrl_soc_data msm8917_pinctrl = {
+ .pins = msm8917_pins,
+ .npins = ARRAY_SIZE(msm8917_pins),
+ .functions = msm8917_functions,
+ .nfunctions = ARRAY_SIZE(msm8917_functions),
+ .groups = msm8917_groups,
+ .ngroups = ARRAY_SIZE(msm8917_groups),
+ .ngpios = 134,
+};
+
+static int msm8917_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8917_pinctrl);
+}
+
+static const struct of_device_id msm8917_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8917-pinctrl", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8917_pinctrl_of_match);
+
+static struct platform_driver msm8917_pinctrl_driver = {
+ .driver = {
+ .name = "msm8917-pinctrl",
+ .of_match_table = msm8917_pinctrl_of_match,
+ },
+ .probe = msm8917_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8917_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8917_pinctrl_driver);
+}
+arch_initcall(msm8917_pinctrl_init);
+
+static void __exit msm8917_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8917_pinctrl_driver);
+}
+module_exit(msm8917_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm msm8917 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 0c806b8128b6..c8ce61066070 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -14,6 +14,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spmi.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -702,7 +703,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
else
seq_printf(s, " %-4s",
pad->output_enabled ? "out" : "in");
- seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+ seq_printf(s, " %-4s", str_high_low(pad->out_value));
seq_printf(s, " %-7s", pmic_gpio_functions[function]);
seq_printf(s, " vin-%d", pad->power_source);
seq_printf(s, " %-27s", biases[pad->pullup]);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 84de584cf7eb..7b28c5fb2402 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -544,7 +545,7 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, " %d", pad->aout_level);
if (pad->has_pullup)
seq_printf(s, " %-8s", biases[pad->pullup]);
- seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+ seq_printf(s, " %-4s", str_high_low(pad->out_value));
if (pad->dtest)
seq_printf(s, " dtest%d", pad->dtest);
if (pad->paired)
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 2225dc49d477..82679417e25f 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
@@ -569,7 +570,7 @@ static void pm8xxx_gpio_dbg_show_one(struct seq_file *s,
seq_printf(s, " VIN%d", pin->power_source);
seq_printf(s, " %-27s", biases[pin->bias]);
seq_printf(s, " %-10s", buffer_types[pin->open_drain]);
- seq_printf(s, " %-4s", pin->output_value ? "high" : "low");
+ seq_printf(s, " %-4s", str_high_low(pin->output_value));
seq_printf(s, " %-7s", strengths[pin->output_strength]);
if (pin->inverted)
seq_puts(s, " inverted");
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 9b1039c08aa6..4841bbfe4864 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
@@ -576,8 +577,7 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
seq_puts(s, "out ");
if (!pin->paired) {
- seq_puts(s, pin->output_value ?
- "high" : "low");
+ seq_puts(s, str_high_low(pin->output_value));
} else {
seq_puts(s, pin->output_value ?
"inverted" : "follow");
@@ -589,8 +589,7 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
if (pin->output) {
seq_printf(s, "out %s ", aout_lvls[pin->aout_level]);
if (!pin->paired) {
- seq_puts(s, pin->output_value ?
- "high" : "low");
+ seq_puts(s, str_high_low(pin->output_value));
} else {
seq_puts(s, pin->output_value ?
"inverted" : "follow");
@@ -605,8 +604,7 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
seq_printf(s, "dtest%d", pin->dtest);
} else {
if (!pin->paired) {
- seq_puts(s, pin->output_value ?
- "high" : "low");
+ seq_puts(s, str_high_low(pin->output_value));
} else {
seq_puts(s, pin->output_value ?
"inverted" : "follow");
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 7f3f41c7fe54..3c18d908b21e 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -41,6 +41,7 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A779H0 if ARCH_R8A779H0
select PINCTRL_RZG2L if ARCH_RZG2L
select PINCTRL_RZV2M if ARCH_R9A09G011
+ select PINCTRL_RZG2L if ARCH_R9A09G047
select PINCTRL_RZG2L if ARCH_R9A09G057
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 1df9cec2873f..ce4a07a3df49 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -26,6 +26,8 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h>
+#include <dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h>
#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
#include "../core.h"
@@ -157,7 +159,7 @@
#define PWPR_REGWE_B BIT(5) /* OEN Register Write Enable, known only in RZ/V2H(P) */
#define PM_MASK 0x03
-#define PFC_MASK 0x07
+#define PFC_MASK 0x0f
#define IEN_MASK 0x01
#define IOLH_MASK 0x03
#define SR_MASK 0x01
@@ -381,13 +383,51 @@ static u64 rzg2l_pinctrl_get_variable_pin_cfg(struct rzg2l_pinctrl *pctrl,
return 0;
}
+static const u64 r9a09g047_variable_pin_cfg[] = {
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 1, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 1, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 1, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 4, RZV2H_MPXED_PIN_FUNCS),
+};
+
static const u64 r9a09g057_variable_pin_cfg[] = {
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 0, RZV2H_MPXED_PIN_FUNCS),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
};
#ifdef CONFIG_RISCV
@@ -1962,6 +2002,73 @@ static const u64 r9a08g045_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(6, 0x2a, RZG3S_MPXED_PIN_FUNCS(A)), /* P18 */
};
+static const char * const rzg3e_gpio_names[] = {
+ "P00", "P01", "P02", "P03", "P04", "P05", "P06", "P07",
+ "P10", "P11", "P12", "P13", "P14", "P15", "P16", "P17",
+ "P20", "P21", "P22", "P23", "P24", "P25", "P26", "P27",
+ "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37",
+ "P40", "P41", "P42", "P43", "P44", "P45", "P46", "P47",
+ "P50", "P51", "P52", "P53", "P54", "P55", "P56", "P57",
+ "P60", "P61", "P62", "P63", "P64", "P65", "P66", "P67",
+ "P70", "P71", "P72", "P73", "P74", "P75", "P76", "P77",
+ "P80", "P81", "P82", "P83", "P84", "P85", "P86", "P87",
+ "", "", "", "", "", "", "", "",
+ "PA0", "PA1", "PA2", "PA3", "PA4", "PA5", "PA6", "PA7",
+ "PB0", "PB1", "PB2", "PB3", "PB4", "PB5", "PB6", "PB7",
+ "PC0", "PC1", "PC2", "PC3", "PC4", "PC5", "PC6", "PC7",
+ "PD0", "PD1", "PD2", "PD3", "PD4", "PD5", "PD6", "PD7",
+ "PE0", "PE1", "PE2", "PE3", "PE4", "PE5", "PE6", "PE7",
+ "PF0", "PF1", "PF2", "PF3", "PF4", "PF5", "PF6", "PF7",
+ "PG0", "PG1", "PG2", "PG3", "PG4", "PG5", "PG6", "PG7",
+ "PH0", "PH1", "PH2", "PH3", "PH4", "PH5", "PH6", "PH7",
+ "", "", "", "", "", "", "", "",
+ "PJ0", "PJ1", "PJ2", "PJ3", "PJ4", "PJ5", "PJ6", "PJ7",
+ "PK0", "PK1", "PK2", "PK3", "PK4", "PK5", "PK6", "PK7",
+ "PL0", "PL1", "PL2", "PL3", "PL4", "PL5", "PL6", "PL7",
+ "PM0", "PM1", "PM2", "PM3", "PM4", "PM5", "PM6", "PM7",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "PS0", "PS1", "PS2", "PS3", "PS4", "PS5", "PS6", "PS7",
+};
+
+static const u64 r9a09g047_gpio_configs[] = {
+ RZG2L_GPIO_PORT_PACK(8, 0x20, RZV2H_MPXED_PIN_FUNCS), /* P0 */
+ RZG2L_GPIO_PORT_PACK(8, 0x21, RZV2H_MPXED_PIN_FUNCS |
+ PIN_CFG_ELC), /* P1 */
+ RZG2L_GPIO_PORT_PACK(2, 0x22, RZG2L_MPXED_COMMON_PIN_FUNCS(RZV2H) |
+ PIN_CFG_NOD), /* P2 */
+ RZG2L_GPIO_PORT_PACK(8, 0x23, RZV2H_MPXED_PIN_FUNCS), /* P3 */
+ RZG2L_GPIO_PORT_PACK(6, 0x24, RZV2H_MPXED_PIN_FUNCS), /* P4 */
+ RZG2L_GPIO_PORT_PACK(7, 0x25, RZV2H_MPXED_PIN_FUNCS), /* P5 */
+ RZG2L_GPIO_PORT_PACK(7, 0x26, RZV2H_MPXED_PIN_FUNCS), /* P6 */
+ RZG2L_GPIO_PORT_PACK(8, 0x27, RZV2H_MPXED_PIN_FUNCS |
+ PIN_CFG_ELC), /* P7 */
+ RZG2L_GPIO_PORT_PACK(6, 0x28, RZV2H_MPXED_PIN_FUNCS), /* P8 */
+ 0x0,
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2a), /* PA */
+ RZG2L_GPIO_PORT_PACK(8, 0x2b, RZV2H_MPXED_PIN_FUNCS), /* PB */
+ RZG2L_GPIO_PORT_PACK(3, 0x2c, RZV2H_MPXED_PIN_FUNCS), /* PC */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2d), /* PD */
+ RZG2L_GPIO_PORT_PACK(8, 0x2e, RZV2H_MPXED_PIN_FUNCS), /* PE */
+ RZG2L_GPIO_PORT_PACK(3, 0x2f, RZV2H_MPXED_PIN_FUNCS), /* PF */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x30), /* PG */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x31), /* PH */
+ 0x0,
+ RZG2L_GPIO_PORT_PACK_VARIABLE(5, 0x33), /* PJ */
+ RZG2L_GPIO_PORT_PACK(4, 0x34, RZV2H_MPXED_PIN_FUNCS), /* PK */
+ RZG2L_GPIO_PORT_PACK(8, 0x35, RZV2H_MPXED_PIN_FUNCS), /* PL */
+ RZG2L_GPIO_PORT_PACK(8, 0x36, RZV2H_MPXED_PIN_FUNCS), /* PM */
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ RZG2L_GPIO_PORT_PACK(4, 0x3c, RZV2H_MPXED_PIN_FUNCS), /* PS */
+};
+
static const char * const rzv2h_gpio_names[] = {
"P00", "P01", "P02", "P03", "P04", "P05", "P06", "P07",
"P10", "P11", "P12", "P13", "P14", "P15", "P16", "P17",
@@ -2085,6 +2192,8 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
{ "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_IOLH_A | PIN_CFG_IEN |
PIN_CFG_SOFT_PS)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x1, 1, (PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS)) },
+ { "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x2, 0, PIN_CFG_IEN) },
+ { "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x2, 1, PIN_CFG_IEN) },
{ "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0x6, 0, PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS) },
{ "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_IOLH_B | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
@@ -2250,6 +2359,43 @@ static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = {
{ "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) },
};
+static struct rzg2l_dedicated_configs rzg3e_dedicated_pins[] = {
+ { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD)) },
+ { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD)) },
+ { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0PWEN", RZG2L_SINGLE_PIN_PACK(0x9, 3,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0IOVS", RZG2L_SINGLE_PIN_PACK(0x9, 4,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+};
+
static int rzg2l_gpio_get_gpioint(unsigned int virq, struct rzg2l_pinctrl *pctrl)
{
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[virq];
@@ -2760,6 +2906,9 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
BUILD_BUG_ON(ARRAY_SIZE(r9a08g045_gpio_configs) * RZG2L_PINS_PER_PORT >
ARRAY_SIZE(rzg2l_gpio_names));
+ BUILD_BUG_ON(ARRAY_SIZE(r9a09g047_gpio_configs) * RZG2L_PINS_PER_PORT >
+ ARRAY_SIZE(rzg3e_gpio_names));
+
BUILD_BUG_ON(ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT >
ARRAY_SIZE(rzv2h_gpio_names));
@@ -3158,6 +3307,29 @@ static struct rzg2l_pinctrl_data r9a08g045_data = {
.bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
+static struct rzg2l_pinctrl_data r9a09g047_data = {
+ .port_pins = rzg3e_gpio_names,
+ .port_pin_configs = r9a09g047_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g047_gpio_configs),
+ .dedicated_pins = rzg3e_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(r9a09g047_gpio_configs) * RZG2L_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzg3e_dedicated_pins),
+ .hwcfg = &rzv2h_hwcfg,
+ .variable_pin_cfg = r9a09g047_variable_pin_cfg,
+ .n_variable_pin_cfg = ARRAY_SIZE(r9a09g047_variable_pin_cfg),
+ .num_custom_params = ARRAY_SIZE(renesas_rzv2h_custom_bindings),
+ .custom_params = renesas_rzv2h_custom_bindings,
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = renesas_rzv2h_conf_items,
+#endif
+ .pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
+ .pmc_writeb = &rzv2h_pmc_writeb,
+ .oen_read = &rzv2h_oen_read,
+ .oen_write = &rzv2h_oen_write,
+ .hw_to_bias_param = &rzv2h_hw_to_bias_param,
+ .bias_param_to_hw = &rzv2h_bias_param_to_hw,
+};
+
static struct rzg2l_pinctrl_data r9a09g057_data = {
.port_pins = rzv2h_gpio_names,
.port_pin_configs = r9a09g057_gpio_configs,
@@ -3195,6 +3367,10 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = {
.data = &r9a08g045_data,
},
{
+ .compatible = "renesas,r9a09g047-pinctrl",
+ .data = &r9a09g047_data,
+ },
+ {
.compatible = "renesas,r9a09g057-pinctrl",
.data = &r9a09g057_data,
},
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index b79c211c0374..42093bae8bb7 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -23,6 +23,7 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/soc/samsung/exynos-pmu.h>
@@ -442,7 +443,7 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq);
- pr_info("wake %s for irq %u (%s-%lu)\n", on ? "enabled" : "disabled",
+ pr_info("wake %s for irq %u (%s-%lu)\n", str_enabled_disabled(on),
irqd->irq, bank->name, irqd->hwirq);
if (!on)
@@ -636,7 +637,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
if (clk_enable(b->drvdata->pclk)) {
dev_err(b->gpio_chip.parent,
"unable to enable clock for pending IRQs\n");
- return;
+ goto out;
}
}
@@ -652,6 +653,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
if (eintd->nr_banks)
clk_disable(eintd->banks[0]->drvdata->pclk);
+out:
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index bbedd980ec67..cfced7afd4ca 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1172,7 +1172,7 @@ static void samsung_banks_node_get(struct device *dev, struct samsung_pinctrl_dr
else
dev_warn(dev, "Missing node for bank %s - invalid DTB\n",
bank->name);
- /* child reference dropped in samsung_drop_banks_of_node() */
+ /* child reference dropped in samsung_banks_node_put() */
}
}
@@ -1272,7 +1272,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
ret = platform_get_irq_optional(pdev, 0);
if (ret < 0 && ret != -ENXIO)
- return ret;
+ goto err_put_banks;
if (ret > 0)
drvdata->irq = ret;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 5b7fa77c1184..cc0b4d1d7cff 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -22,6 +22,7 @@
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
@@ -86,7 +87,6 @@ struct stm32_pinctrl_group {
struct stm32_gpio_bank {
void __iomem *base;
- struct clk *clk;
struct reset_control *rstc;
spinlock_t lock;
struct gpio_chip gpio_chip;
@@ -108,6 +108,7 @@ struct stm32_pinctrl {
unsigned ngroups;
const char **grp_names;
struct stm32_gpio_bank *banks;
+ struct clk_bulk_data *clks;
unsigned nbanks;
const struct stm32_pinctrl_match_data *match_data;
struct irq_domain *domain;
@@ -1217,7 +1218,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
case 0:
val = stm32_pconf_get(bank, offset, true);
seq_printf(s, "- %s - %s",
- val ? "high" : "low",
+ str_high_low(val),
biasing[bias]);
break;
@@ -1227,7 +1228,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
speed = stm32_pconf_get_speed(bank, offset);
val = stm32_pconf_get(bank, offset, false);
seq_printf(s, "- %s - %s - %s - %s %s",
- val ? "high" : "low",
+ str_high_low(val),
drive ? "open drain" : "push pull",
biasing[bias],
speeds[speed], "speed");
@@ -1308,12 +1309,6 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
if (IS_ERR(bank->base))
return PTR_ERR(bank->base);
- err = clk_prepare_enable(bank->clk);
- if (err) {
- dev_err(dev, "failed to prepare_enable clk (%d)\n", err);
- return err;
- }
-
bank->gpio_chip = stm32_gpio_template;
fwnode_property_read_string(fwnode, "st,bank-name", &bank->gpio_chip.label);
@@ -1360,26 +1355,20 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
bank->fwnode, &stm32_gpio_domain_ops,
bank);
- if (!bank->domain) {
- err = -ENODEV;
- goto err_clk;
- }
+ if (!bank->domain)
+ return -ENODEV;
}
names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
- if (!names) {
- err = -ENOMEM;
- goto err_clk;
- }
+ if (!names)
+ return -ENOMEM;
for (i = 0; i < npins; i++) {
stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
if (stm32_pin && stm32_pin->pin.name) {
names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", stm32_pin->pin.name);
- if (!names[i]) {
- err = -ENOMEM;
- goto err_clk;
- }
+ if (!names[i])
+ return -ENOMEM;
} else {
names[i] = NULL;
}
@@ -1390,15 +1379,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
err = gpiochip_add_data(&bank->gpio_chip, bank);
if (err) {
dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr);
- goto err_clk;
+ return err;
}
dev_info(dev, "%s bank added\n", bank->gpio_chip.label);
return 0;
-
-err_clk:
- clk_disable_unprepare(bank->clk);
- return err;
}
static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pdev)
@@ -1621,6 +1606,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
if (!pctl->banks)
return -ENOMEM;
+ pctl->clks = devm_kcalloc(dev, banks, sizeof(*pctl->clks),
+ GFP_KERNEL);
+ if (!pctl->clks)
+ return -ENOMEM;
+
i = 0;
for_each_gpiochip_node(dev, child) {
struct stm32_gpio_bank *bank = &pctl->banks[i];
@@ -1632,24 +1622,27 @@ int stm32_pctl_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- bank->clk = of_clk_get_by_name(np, NULL);
- if (IS_ERR(bank->clk)) {
+ pctl->clks[i].clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(pctl->clks[i].clk)) {
fwnode_handle_put(child);
- return dev_err_probe(dev, PTR_ERR(bank->clk),
+ return dev_err_probe(dev, PTR_ERR(pctl->clks[i].clk),
"failed to get clk\n");
}
+ pctl->clks[i].id = "pctl";
i++;
}
+ ret = clk_bulk_prepare_enable(banks, pctl->clks);
+ if (ret) {
+ dev_err(dev, "failed to prepare_enable clk (%d)\n", ret);
+ return ret;
+ }
+
for_each_gpiochip_node(dev, child) {
ret = stm32_gpiolib_register_bank(pctl, child);
if (ret) {
fwnode_handle_put(child);
-
- for (i = 0; i < pctl->nbanks; i++)
- clk_disable_unprepare(pctl->banks[i].clk);
-
- return ret;
+ goto err_register;
}
pctl->nbanks++;
@@ -1658,6 +1651,15 @@ int stm32_pctl_probe(struct platform_device *pdev)
dev_info(dev, "Pinctrl STM32 initialized\n");
return 0;
+err_register:
+ for (i = 0; i < pctl->nbanks; i++) {
+ struct stm32_gpio_bank *bank = &pctl->banks[i];
+
+ gpiochip_remove(&bank->gpio_chip);
+ }
+
+ clk_bulk_disable_unprepare(banks, pctl->clks);
+ return ret;
}
static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
@@ -1726,10 +1728,8 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
int __maybe_unused stm32_pinctrl_suspend(struct device *dev)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
- int i;
- for (i = 0; i < pctl->nbanks; i++)
- clk_disable(pctl->banks[i].clk);
+ clk_bulk_disable(pctl->nbanks, pctl->clks);
return 0;
}
@@ -1738,10 +1738,11 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
struct stm32_pinctrl_group *g = pctl->groups;
- int i;
+ int i, ret;
- for (i = 0; i < pctl->nbanks; i++)
- clk_enable(pctl->banks[i].clk);
+ ret = clk_bulk_enable(pctl->nbanks, pctl->clks);
+ if (ret)
+ return ret;
for (i = 0; i < pctl->ngroups; i++, g++)
stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
index df90c75fb3c5..b97de80ae2f3 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
@@ -256,72 +256,84 @@ static const struct sunxi_desc_pin a100_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D3P */
SUNXI_FUNCTION(0x4, "dsi0"), /* DP3 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D3N */
SUNXI_FUNCTION(0x4, "dsi0"), /* DM3 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D0P */
SUNXI_FUNCTION(0x4, "spi1"), /* CS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D0N */
SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D1P */
SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D1N */
SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D2P */
SUNXI_FUNCTION(0x4, "uart3"), /* TX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D2N */
SUNXI_FUNCTION(0x4, "uart3"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKP */
SUNXI_FUNCTION(0x4, "uart3"), /* RTS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 16)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKN */
SUNXI_FUNCTION(0x4, "uart3"), /* CTS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 17)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D3P */
SUNXI_FUNCTION(0x4, "uart4"), /* TX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 18)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D3N */
SUNXI_FUNCTION(0x4, "uart4"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 19)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index e821b3d39590..110771a8645e 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -204,6 +204,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
mutex_init(&ec_dev->lock);
lockdep_set_class(&ec_dev->lock, &ec_dev->lockdep_key);
+ /* Send RWSIG continue to jump to RW for devices using RWSIG. */
+ err = cros_ec_rwsig_continue(ec_dev);
+ if (err)
+ dev_info(dev, "Failed to continue RWSIG: %d\n", err);
+
err = cros_ec_query_all(ec_dev);
if (err) {
dev_err(dev, "Cannot identify the EC: error %d\n", err);
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 62662ba5bf6e..38af97cdaab2 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -305,7 +305,8 @@ static int cros_ec_i2c_probe(struct i2c_client *client)
ec_dev->phys_name = client->adapter->name;
ec_dev->din_size = sizeof(struct ec_host_response_i2c) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request_i2c);
+ ec_dev->dout_size = sizeof(struct ec_host_request_i2c) +
+ sizeof(struct ec_params_rwsig_action);
err = cros_ec_register(ec_dev);
if (err) {
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 5ac37bd024c8..7e7190b30cbb 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -557,7 +557,7 @@ static int cros_ec_dev_init(struct ishtp_cl_data *client_data)
ec_dev->phys_name = dev_name(dev);
ec_dev->din_size = sizeof(struct cros_ish_in_msg) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct cros_ish_out_msg);
+ ec_dev->dout_size = sizeof(struct cros_ish_out_msg) + sizeof(struct ec_params_rwsig_action);
return cros_ec_register(ec_dev);
}
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 8470b7f2b135..5a2f1d98b350 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -70,13 +70,8 @@ struct lpc_driver_data {
/**
* struct cros_ec_lpc - LPC device-specific data
* @mmio_memory_base: The first I/O port addressing EC mapped memory.
- */
-struct cros_ec_lpc {
- u16 mmio_memory_base;
-};
-
-/**
- * struct lpc_driver_ops - LPC driver operations
+ * @base: For EC supporting memory mapping, base address of the mapped region.
+ * @mem32: Information about the memory mapped register region, if present.
* @read: Copy length bytes from EC address offset into buffer dest.
* Returns a negative error code on error, or the 8-bit checksum
* of all bytes read.
@@ -84,18 +79,21 @@ struct cros_ec_lpc {
* Returns a negative error code on error, or the 8-bit checksum
* of all bytes written.
*/
-struct lpc_driver_ops {
- int (*read)(unsigned int offset, unsigned int length, u8 *dest);
- int (*write)(unsigned int offset, unsigned int length, const u8 *msg);
+struct cros_ec_lpc {
+ u16 mmio_memory_base;
+ void __iomem *base;
+ struct acpi_resource_fixed_memory32 mem32;
+ int (*read)(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, u8 *dest);
+ int (*write)(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, const u8 *msg);
};
-static struct lpc_driver_ops cros_ec_lpc_ops = { };
-
/*
* A generic instance of the read function of struct lpc_driver_ops, used for
* the LPC EC.
*/
-static int cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length,
+static int cros_ec_lpc_read_bytes(struct cros_ec_lpc *_, unsigned int offset, unsigned int length,
u8 *dest)
{
u8 sum = 0;
@@ -114,7 +112,7 @@ static int cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length,
* A generic instance of the write function of struct lpc_driver_ops, used for
* the LPC EC.
*/
-static int cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length,
+static int cros_ec_lpc_write_bytes(struct cros_ec_lpc *_, unsigned int offset, unsigned int length,
const u8 *msg)
{
u8 sum = 0;
@@ -133,8 +131,8 @@ static int cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length,
* An instance of the read function of struct lpc_driver_ops, used for the
* MEC variant of LPC EC.
*/
-static int cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length,
- u8 *dest)
+static int cros_ec_lpc_mec_read_bytes(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, u8 *dest)
{
int in_range = cros_ec_lpc_mec_in_range(offset, length);
@@ -145,15 +143,15 @@ static int cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length,
cros_ec_lpc_io_bytes_mec(MEC_IO_READ,
offset - EC_HOST_CMD_REGION0,
length, dest) :
- cros_ec_lpc_read_bytes(offset, length, dest);
+ cros_ec_lpc_read_bytes(ec_lpc, offset, length, dest);
}
/*
* An instance of the write function of struct lpc_driver_ops, used for the
* MEC variant of LPC EC.
*/
-static int cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length,
- const u8 *msg)
+static int cros_ec_lpc_mec_write_bytes(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, const u8 *msg)
{
int in_range = cros_ec_lpc_mec_in_range(offset, length);
@@ -164,10 +162,50 @@ static int cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length,
cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE,
offset - EC_HOST_CMD_REGION0,
length, (u8 *)msg) :
- cros_ec_lpc_write_bytes(offset, length, msg);
+ cros_ec_lpc_write_bytes(ec_lpc, offset, length, msg);
+}
+
+static int cros_ec_lpc_direct_read(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, u8 *dest)
+{
+ int sum = 0;
+ int i;
+
+ if (offset < EC_HOST_CMD_REGION0 || offset > EC_LPC_ADDR_MEMMAP +
+ EC_MEMMAP_SIZE) {
+ return cros_ec_lpc_read_bytes(ec_lpc, offset, length, dest);
+ }
+
+ for (i = 0; i < length; ++i) {
+ dest[i] = readb(ec_lpc->base + offset - EC_HOST_CMD_REGION0 + i);
+ sum += dest[i];
+ }
+
+ /* Return checksum of all bytes read */
+ return sum;
}
-static int ec_response_timed_out(void)
+static int cros_ec_lpc_direct_write(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, const u8 *msg)
+{
+ int sum = 0;
+ int i;
+
+ if (offset < EC_HOST_CMD_REGION0 || offset > EC_LPC_ADDR_MEMMAP +
+ EC_MEMMAP_SIZE) {
+ return cros_ec_lpc_write_bytes(ec_lpc, offset, length, msg);
+ }
+
+ for (i = 0; i < length; ++i) {
+ writeb(msg[i], ec_lpc->base + offset - EC_HOST_CMD_REGION0 + i);
+ sum += msg[i];
+ }
+
+ /* Return checksum of all bytes written */
+ return sum;
+}
+
+static int ec_response_timed_out(struct cros_ec_lpc *ec_lpc)
{
unsigned long one_second = jiffies + HZ;
u8 data;
@@ -175,7 +213,7 @@ static int ec_response_timed_out(void)
usleep_range(200, 300);
do {
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_CMD, 1, &data);
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_CMD, 1, &data);
if (ret < 0)
return ret;
if (!(data & EC_LPC_STATUS_BUSY_MASK))
@@ -189,6 +227,7 @@ static int ec_response_timed_out(void)
static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
struct cros_ec_command *msg)
{
+ struct cros_ec_lpc *ec_lpc = ec->priv;
struct ec_host_response response;
u8 sum;
int ret = 0;
@@ -199,17 +238,17 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
goto done;
/* Write buffer */
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
if (ret < 0)
goto done;
/* Here we go */
sum = EC_COMMAND_PROTOCOL_3;
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_CMD, 1, &sum);
if (ret < 0)
goto done;
- ret = ec_response_timed_out();
+ ret = ec_response_timed_out(ec_lpc);
if (ret < 0)
goto done;
if (ret) {
@@ -219,7 +258,7 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
}
/* Check result */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_DATA, 1, &sum);
if (ret < 0)
goto done;
msg->result = ret;
@@ -229,7 +268,7 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
/* Read back response */
dout = (u8 *)&response;
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET, sizeof(response),
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_PACKET, sizeof(response),
dout);
if (ret < 0)
goto done;
@@ -246,7 +285,7 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
}
/* Read response and process checksum */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET +
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_PACKET +
sizeof(response), response.data_len,
msg->data);
if (ret < 0)
@@ -270,6 +309,7 @@ done:
static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
struct cros_ec_command *msg)
{
+ struct cros_ec_lpc *ec_lpc = ec->priv;
struct ec_lpc_host_args args;
u8 sum;
int ret = 0;
@@ -291,7 +331,7 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
sum = msg->command + args.flags + args.command_version + args.data_size;
/* Copy data and update checksum */
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PARAM, msg->outsize,
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_PARAM, msg->outsize,
msg->data);
if (ret < 0)
goto done;
@@ -299,18 +339,18 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
/* Finalize checksum and write args */
args.checksum = sum;
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_ARGS, sizeof(args),
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_ARGS, sizeof(args),
(u8 *)&args);
if (ret < 0)
goto done;
/* Here we go */
sum = msg->command;
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_CMD, 1, &sum);
if (ret < 0)
goto done;
- ret = ec_response_timed_out();
+ ret = ec_response_timed_out(ec_lpc);
if (ret < 0)
goto done;
if (ret) {
@@ -320,7 +360,7 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
}
/* Check result */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_DATA, 1, &sum);
if (ret < 0)
goto done;
msg->result = ret;
@@ -329,7 +369,7 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
goto done;
/* Read back args */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args);
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args);
if (ret < 0)
goto done;
@@ -345,7 +385,7 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
sum = msg->command + args.flags + args.command_version + args.data_size;
/* Read response and update checksum */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PARAM, args.data_size,
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_PARAM, args.data_size,
msg->data);
if (ret < 0)
goto done;
@@ -381,7 +421,7 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
/* fixed length */
if (bytes) {
- ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + offset, bytes, s);
+ ret = ec_lpc->read(ec_lpc, ec_lpc->mmio_memory_base + offset, bytes, s);
if (ret < 0)
return ret;
return bytes;
@@ -389,7 +429,7 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
/* string */
for (; i < EC_MEMMAP_SIZE; i++, s++) {
- ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + i, 1, s);
+ ret = ec_lpc->read(ec_lpc, ec_lpc->mmio_memory_base + i, 1, s);
if (ret < 0)
return ret;
cnt++;
@@ -419,7 +459,7 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
return;
}
- if (ec_dev->mkbp_event_supported)
+ if (value == ACPI_NOTIFY_CROS_EC_MKBP && ec_dev->mkbp_event_supported)
do {
ret = cros_ec_get_next_event(ec_dev, NULL,
&ec_has_more_events);
@@ -453,6 +493,20 @@ static struct acpi_device *cros_ec_lpc_get_device(const char *id)
return adev;
}
+static acpi_status cros_ec_lpc_resources(struct acpi_resource *res, void *data)
+{
+ struct cros_ec_lpc *ec_lpc = data;
+
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ ec_lpc->mem32 = res->data.fixed_memory32;
+ break;
+ default:
+ break;
+ }
+ return AE_OK;
+}
+
static int cros_ec_lpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -492,8 +546,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
}
if (quirks & CROS_EC_LPC_QUIRK_AML_MUTEX) {
- const char *name
- = driver_data->quirk_aml_mutex_name;
+ const char *name = driver_data->quirk_aml_mutex_name;
ret = cros_ec_lpc_mec_acpi_mutex(ACPI_COMPANION(dev), name);
if (ret) {
dev_err(dev, "failed to get AML mutex '%s'", name);
@@ -502,30 +555,49 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
dev_info(dev, "got AML mutex '%s'", name);
}
}
-
- /*
- * The Framework Laptop (and possibly other non-ChromeOS devices)
- * only exposes the eight I/O ports that are required for the Microchip EC.
- * Requesting a larger reservation will fail.
- */
- if (!devm_request_region(dev, EC_HOST_CMD_REGION0,
- EC_HOST_CMD_MEC_REGION_SIZE, dev_name(dev))) {
- dev_err(dev, "couldn't reserve MEC region\n");
- return -EBUSY;
+ adev = ACPI_COMPANION(dev);
+ if (adev) {
+ /*
+ * Retrieve the resource information in the CRS register, if available.
+ */
+ status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
+ cros_ec_lpc_resources, ec_lpc);
+ if (ACPI_SUCCESS(status) && ec_lpc->mem32.address_length) {
+ ec_lpc->base = devm_ioremap(dev,
+ ec_lpc->mem32.address,
+ ec_lpc->mem32.address_length);
+ if (!ec_lpc->base)
+ return -EINVAL;
+
+ ec_lpc->read = cros_ec_lpc_direct_read;
+ ec_lpc->write = cros_ec_lpc_direct_write;
+ }
}
+ if (!ec_lpc->read) {
+ /*
+ * The Framework Laptop (and possibly other non-ChromeOS devices)
+ * only exposes the eight I/O ports that are required for the Microchip EC.
+ * Requesting a larger reservation will fail.
+ */
+ if (!devm_request_region(dev, EC_HOST_CMD_REGION0,
+ EC_HOST_CMD_MEC_REGION_SIZE, dev_name(dev))) {
+ dev_err(dev, "couldn't reserve MEC region\n");
+ return -EBUSY;
+ }
- cros_ec_lpc_mec_init(EC_HOST_CMD_REGION0,
- EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE);
+ cros_ec_lpc_mec_init(EC_HOST_CMD_REGION0,
+ EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE);
- /*
- * Read the mapped ID twice, the first one is assuming the
- * EC is a Microchip Embedded Controller (MEC) variant, if the
- * protocol fails, fallback to the non MEC variant and try to
- * read again the ID.
- */
- cros_ec_lpc_ops.read = cros_ec_lpc_mec_read_bytes;
- cros_ec_lpc_ops.write = cros_ec_lpc_mec_write_bytes;
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
+ /*
+ * Read the mapped ID twice, the first one is assuming the
+ * EC is a Microchip Embedded Controller (MEC) variant, if the
+ * protocol fails, fallback to the non MEC variant and try to
+ * read again the ID.
+ */
+ ec_lpc->read = cros_ec_lpc_mec_read_bytes;
+ ec_lpc->write = cros_ec_lpc_mec_write_bytes;
+ }
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
if (ret < 0)
return ret;
if (buf[0] != 'E' || buf[1] != 'C') {
@@ -536,9 +608,9 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
}
/* Re-assign read/write operations for the non MEC variant */
- cros_ec_lpc_ops.read = cros_ec_lpc_read_bytes;
- cros_ec_lpc_ops.write = cros_ec_lpc_write_bytes;
- ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2,
+ ec_lpc->read = cros_ec_lpc_read_bytes;
+ ec_lpc->write = cros_ec_lpc_write_bytes;
+ ret = ec_lpc->read(ec_lpc, ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2,
buf);
if (ret < 0)
return ret;
@@ -573,7 +645,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
ec_dev->cmd_readmem = cros_ec_lpc_readmem;
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
ec_dev->priv = ec_lpc;
/*
@@ -598,7 +670,6 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
* Connect a notify handler to process MKBP messages if we have a
* companion ACPI device.
*/
- adev = ACPI_COMPANION(dev);
if (adev) {
status = acpi_install_notify_handler(adev->handle,
ACPI_ALL_NOTIFY,
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 5c9a53dffcf9..877b107fee4b 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -15,6 +15,8 @@
#include "cros_ec_trace.h"
#define EC_COMMAND_RETRIES 50
+#define RWSIG_CONTINUE_RETRIES 8
+#define RWSIG_CONTINUE_MAX_ERRORS_IN_ROW 3
static const int cros_ec_error_map[] = {
[EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
@@ -288,6 +290,64 @@ exit:
return ret;
}
+int cros_ec_rwsig_continue(struct cros_ec_device *ec_dev)
+{
+ struct cros_ec_command *msg;
+ struct ec_params_rwsig_action *rwsig_action;
+ int ret = 0;
+ int error_count = 0;
+
+ ec_dev->proto_version = 3;
+
+ msg = kmalloc(sizeof(*msg) + sizeof(*rwsig_action), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 0;
+ msg->command = EC_CMD_RWSIG_ACTION;
+ msg->insize = 0;
+ msg->outsize = sizeof(*rwsig_action);
+
+ rwsig_action = (struct ec_params_rwsig_action *)msg->data;
+ rwsig_action->action = RWSIG_ACTION_CONTINUE;
+
+ for (int i = 0; i < RWSIG_CONTINUE_RETRIES; i++) {
+ ret = cros_ec_send_command(ec_dev, msg);
+
+ if (ret < 0) {
+ if (++error_count >= RWSIG_CONTINUE_MAX_ERRORS_IN_ROW)
+ break;
+ } else if (msg->result == EC_RES_INVALID_COMMAND) {
+ /*
+ * If EC_RES_INVALID_COMMAND is retured, it means RWSIG
+ * is not supported or EC is already in RW, so there is
+ * nothing left to do.
+ */
+ break;
+ } else if (msg->result != EC_RES_SUCCESS) {
+ /* Unexpected command error. */
+ ret = cros_ec_map_error(msg->result);
+ break;
+ } else {
+ /*
+ * The EC_CMD_RWSIG_ACTION succeed. Send the command
+ * more times, to make sure EC is in RW. A following
+ * command can timeout, because EC may need some time to
+ * initialize after jump to RW.
+ */
+ error_count = 0;
+ }
+
+ if (ret != -ETIMEDOUT)
+ usleep_range(90000, 100000);
+ }
+
+ kfree(msg);
+
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_rwsig_continue);
+
static int cros_ec_get_proto_info(struct cros_ec_device *ec_dev, int devidx)
{
struct cros_ec_command *msg;
@@ -306,15 +366,6 @@ static int cros_ec_get_proto_info(struct cros_ec_device *ec_dev, int devidx)
msg->insize = sizeof(*info);
ret = cros_ec_send_command(ec_dev, msg);
- /*
- * Send command once again when timeout occurred.
- * Fingerprint MCU (FPMCU) is restarted during system boot which
- * introduces small window in which FPMCU won't respond for any
- * messages sent by kernel. There is no need to wait before next
- * attempt because we waited at least EC_MSG_DEADLINE_MS.
- */
- if (ret == -ETIMEDOUT)
- ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev,
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index 39d3b50a7c09..bc2666491db1 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -231,7 +231,7 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
ec_dev->phys_name = dev_name(&rpdev->dev);
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
dev_set_drvdata(dev, ec_dev);
ec_rpmsg->rpdev = rpdev;
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 86a3d32a7763..8ca0f854e7ac 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -715,7 +715,7 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
int err;
ec_spi->high_pri_worker =
- kthread_create_worker(0, "cros_ec_spi_high_pri");
+ kthread_run_worker(0, "cros_ec_spi_high_pri");
if (IS_ERR(ec_spi->high_pri_worker)) {
err = PTR_ERR(ec_spi->high_pri_worker);
@@ -766,7 +766,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
ec_dev->din_size = EC_MSG_PREAMBLE_COUNT +
sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
ec_spi->last_transfer_ns = ktime_get_ns();
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
index 425e9441b7ca..9827b3117597 100644
--- a/drivers/platform/chrome/cros_ec_trace.c
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -122,8 +122,10 @@
TRACE_SYMBOL(EC_CMD_ENTERING_MODE), \
TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU_PROTECT), \
TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
+ TRACE_SYMBOL(EC_CMD_CEC_READ_MSG), \
TRACE_SYMBOL(EC_CMD_CEC_SET), \
TRACE_SYMBOL(EC_CMD_CEC_GET), \
+ TRACE_SYMBOL(EC_CMD_CEC_PORT_COUNT), \
TRACE_SYMBOL(EC_CMD_EC_CODEC), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
@@ -161,11 +163,18 @@
TRACE_SYMBOL(EC_CMD_ADC_READ), \
TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \
TRACE_SYMBOL(EC_CMD_AP_RESET), \
+ TRACE_SYMBOL(EC_CMD_PCHG_COUNT), \
+ TRACE_SYMBOL(EC_CMD_PCHG), \
+ TRACE_SYMBOL(EC_CMD_PCHG_UPDATE), \
TRACE_SYMBOL(EC_CMD_REGULATOR_GET_INFO), \
TRACE_SYMBOL(EC_CMD_REGULATOR_ENABLE), \
TRACE_SYMBOL(EC_CMD_REGULATOR_IS_ENABLED), \
TRACE_SYMBOL(EC_CMD_REGULATOR_SET_VOLTAGE), \
TRACE_SYMBOL(EC_CMD_REGULATOR_GET_VOLTAGE), \
+ TRACE_SYMBOL(EC_CMD_TYPEC_DISCOVERY), \
+ TRACE_SYMBOL(EC_CMD_TYPEC_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_TYPEC_STATUS), \
+ TRACE_SYMBOL(EC_CMD_TYPEC_VDM_RESPONSE), \
TRACE_SYMBOL(EC_CMD_CR51_BASE), \
TRACE_SYMBOL(EC_CMD_CR51_LAST), \
TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \
@@ -184,6 +193,7 @@
TRACE_SYMBOL(EC_CMD_BATTERY_GET_STATIC), \
TRACE_SYMBOL(EC_CMD_BATTERY_GET_DYNAMIC), \
TRACE_SYMBOL(EC_CMD_CHARGER_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_MUX_ACK), \
TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_BASE), \
TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_LAST)
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
index 62bc24f6dcc7..19c179d49c90 100644
--- a/drivers/platform/chrome/cros_ec_uart.c
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -283,7 +283,7 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
ec_dev->pkt_xfer = cros_ec_uart_pkt_xfer;
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 7bdb489354c5..963c4db23055 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -15,7 +15,7 @@
#define DRV_NAME "cros-ec-vbc"
static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *att, char *buf,
+ const struct bin_attribute *att, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -59,7 +59,7 @@ static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -99,16 +99,16 @@ static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj,
return data_sz;
}
-static BIN_ATTR_RW(vboot_context, 16);
+static const BIN_ATTR_RW(vboot_context, 16);
-static struct bin_attribute *cros_ec_vbc_bin_attrs[] = {
+static const struct bin_attribute *const cros_ec_vbc_bin_attrs[] = {
&bin_attr_vboot_context,
NULL
};
static const struct attribute_group cros_ec_vbc_attr_group = {
.name = "vbc",
- .bin_attrs = cros_ec_vbc_bin_attrs,
+ .bin_attrs_new = cros_ec_vbc_bin_attrs,
};
static int cros_ec_vbc_probe(struct platform_device *pd)
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index 78097c8a4966..fc27bd7fc4b9 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -121,7 +121,17 @@ static const struct keyboard_led_drvdata keyboard_led_drvdata_acpi = {
#endif /* CONFIG_ACPI */
-#if IS_ENABLED(CONFIG_CROS_EC)
+#if IS_ENABLED(CONFIG_MFD_CROS_EC_DEV)
+static int keyboard_led_init_ec_pwm_mfd(struct platform_device *pdev)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
+
+ keyboard_led->ec = cros_ec;
+
+ return 0;
+}
static int
keyboard_led_set_brightness_ec_pwm(struct led_classdev *cdev,
@@ -169,44 +179,6 @@ keyboard_led_get_brightness_ec_pwm(struct led_classdev *cdev)
return resp->percent;
}
-static int keyboard_led_init_ec_pwm(struct platform_device *pdev)
-{
- struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
-
- keyboard_led->ec = dev_get_drvdata(pdev->dev.parent);
- if (!keyboard_led->ec) {
- dev_err(&pdev->dev, "no parent EC device\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {
- .init = keyboard_led_init_ec_pwm,
- .brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
- .brightness_get = keyboard_led_get_brightness_ec_pwm,
- .max_brightness = KEYBOARD_BACKLIGHT_MAX,
-};
-
-#else /* IS_ENABLED(CONFIG_CROS_EC) */
-
-static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {};
-
-#endif /* IS_ENABLED(CONFIG_CROS_EC) */
-
-#if IS_ENABLED(CONFIG_MFD_CROS_EC_DEV)
-static int keyboard_led_init_ec_pwm_mfd(struct platform_device *pdev)
-{
- struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
- struct cros_ec_device *cros_ec = ec_dev->ec_dev;
- struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
-
- keyboard_led->ec = cros_ec;
-
- return 0;
-}
-
static const struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm_mfd = {
.init = keyboard_led_init_ec_pwm_mfd,
.brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
@@ -229,7 +201,7 @@ static int keyboard_led_probe(struct platform_device *pdev)
{
const struct keyboard_led_drvdata *drvdata;
struct keyboard_led *keyboard_led;
- int error;
+ int err;
if (keyboard_led_is_mfd_device(pdev))
drvdata = &keyboard_led_drvdata_ec_pwm_mfd;
@@ -244,9 +216,9 @@ static int keyboard_led_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, keyboard_led);
if (drvdata->init) {
- error = drvdata->init(pdev);
- if (error)
- return error;
+ err = drvdata->init(pdev);
+ if (err)
+ return err;
}
keyboard_led->cdev.name = "chromeos::kbd_backlight";
@@ -256,13 +228,10 @@ static int keyboard_led_probe(struct platform_device *pdev)
keyboard_led->cdev.brightness_set_blocking = drvdata->brightness_set_blocking;
keyboard_led->cdev.brightness_get = drvdata->brightness_get;
- error = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
- if (error == -EEXIST) /* Already bound via other mechanism */
+ err = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
+ if (err == -EEXIST) /* Already bound via other mechanism */
return -ENODEV;
- if (error)
- return error;
-
- return 0;
+ return err;
}
#ifdef CONFIG_ACPI
@@ -273,17 +242,6 @@ static const struct acpi_device_id keyboard_led_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, keyboard_led_acpi_match);
#endif
-#ifdef CONFIG_OF
-static const struct of_device_id keyboard_led_of_match[] = {
- {
- .compatible = "google,cros-kbd-led-backlight",
- .data = &keyboard_led_drvdata_ec_pwm,
- },
- {}
-};
-MODULE_DEVICE_TABLE(of, keyboard_led_of_match);
-#endif
-
static const struct platform_device_id keyboard_led_id[] = {
{ "cros-keyboard-leds", 0 },
{}
@@ -294,7 +252,6 @@ static struct platform_driver keyboard_led_driver = {
.driver = {
.name = "cros-keyboard-leds",
.acpi_match_table = ACPI_PTR(keyboard_led_acpi_match),
- .of_match_table = of_match_ptr(keyboard_led_of_match),
},
.probe = keyboard_led_probe,
.id_table = keyboard_led_id,
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index cd71f1caea81..7ce75e2e039e 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -13,6 +13,7 @@
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
+#include <linux/string_choices.h>
#define DRV_NAME "cros-usbpd-logger"
@@ -135,8 +136,8 @@ static void cros_usbpd_print_log_entry(struct ec_response_pd_log *r,
len += append_str(buf, len, "Power supply fault: %s", fault);
break;
case PD_EVENT_VIDEO_DP_MODE:
- len += append_str(buf, len, "DP mode %sabled", r->data == 1 ?
- "en" : "dis");
+ len += append_str(buf, len, "DP mode %s",
+ str_enabled_disabled(r->data == 1));
break;
case PD_EVENT_VIDEO_CODEC:
minfo = (struct mcdp_info *)r->payload;
diff --git a/drivers/platform/cznic/turris-omnia-mcu-base.c b/drivers/platform/cznic/turris-omnia-mcu-base.c
index 58f9afae2867..770e680b96f9 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-base.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-base.c
@@ -52,6 +52,7 @@ int omnia_cmd_write_read(const struct i2c_client *client,
return 0;
}
+EXPORT_SYMBOL_GPL(omnia_cmd_write_read);
static int omnia_get_version_hash(struct omnia_mcu *mcu, bool bootloader,
char version[static OMNIA_FW_VERSION_HEX_LEN])
@@ -257,6 +258,8 @@ static int omnia_mcu_read_features(struct omnia_mcu *mcu)
_DEF_FEAT(NEW_INT_API, "new interrupt API"),
_DEF_FEAT(POWEROFF_WAKEUP, "poweroff and wakeup"),
_DEF_FEAT(TRNG, "true random number generator"),
+ _DEF_FEAT(BRIGHTNESS_INT, "LED panel brightness change interrupt"),
+ _DEF_FEAT(LED_GAMMA_CORRECTION, "LED gamma correction"),
#undef _DEF_FEAT
};
struct i2c_client *client = mcu->client;
diff --git a/drivers/platform/cznic/turris-omnia-mcu.h b/drivers/platform/cznic/turris-omnia-mcu.h
index 2b13e28ee323..088541be3f4c 100644
--- a/drivers/platform/cznic/turris-omnia-mcu.h
+++ b/drivers/platform/cznic/turris-omnia-mcu.h
@@ -8,7 +8,6 @@
#ifndef __TURRIS_OMNIA_MCU_H
#define __TURRIS_OMNIA_MCU_H
-#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/gpio/driver.h>
#include <linux/hw_random.h>
@@ -17,8 +16,6 @@
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/workqueue.h>
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
struct i2c_client;
struct rtc_device;
@@ -93,133 +90,6 @@ struct omnia_mcu {
#endif
};
-int omnia_cmd_write_read(const struct i2c_client *client,
- void *cmd, unsigned int cmd_len,
- void *reply, unsigned int reply_len);
-
-static inline int omnia_cmd_write(const struct i2c_client *client, void *cmd,
- unsigned int len)
-{
- return omnia_cmd_write_read(client, cmd, len, NULL, 0);
-}
-
-static inline int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd,
- u8 val)
-{
- u8 buf[2] = { cmd, val };
-
- return omnia_cmd_write(client, buf, sizeof(buf));
-}
-
-static inline int omnia_cmd_write_u16(const struct i2c_client *client, u8 cmd,
- u16 val)
-{
- u8 buf[3];
-
- buf[0] = cmd;
- put_unaligned_le16(val, &buf[1]);
-
- return omnia_cmd_write(client, buf, sizeof(buf));
-}
-
-static inline int omnia_cmd_write_u32(const struct i2c_client *client, u8 cmd,
- u32 val)
-{
- u8 buf[5];
-
- buf[0] = cmd;
- put_unaligned_le32(val, &buf[1]);
-
- return omnia_cmd_write(client, buf, sizeof(buf));
-}
-
-static inline int omnia_cmd_read(const struct i2c_client *client, u8 cmd,
- void *reply, unsigned int len)
-{
- return omnia_cmd_write_read(client, &cmd, 1, reply, len);
-}
-
-static inline unsigned int
-omnia_compute_reply_length(unsigned long mask, bool interleaved,
- unsigned int offset)
-{
- if (!mask)
- return 0;
-
- return ((__fls(mask) >> 3) << interleaved) + 1 + offset;
-}
-
-/* Returns 0 on success */
-static inline int omnia_cmd_read_bits(const struct i2c_client *client, u8 cmd,
- unsigned long bits, unsigned long *dst)
-{
- __le32 reply;
- int err;
-
- if (!bits) {
- *dst = 0;
- return 0;
- }
-
- err = omnia_cmd_read(client, cmd, &reply,
- omnia_compute_reply_length(bits, false, 0));
- if (err)
- return err;
-
- *dst = le32_to_cpu(reply) & bits;
-
- return 0;
-}
-
-static inline int omnia_cmd_read_bit(const struct i2c_client *client, u8 cmd,
- unsigned long bit)
-{
- unsigned long reply;
- int err;
-
- err = omnia_cmd_read_bits(client, cmd, bit, &reply);
- if (err)
- return err;
-
- return !!reply;
-}
-
-static inline int omnia_cmd_read_u32(const struct i2c_client *client, u8 cmd,
- u32 *dst)
-{
- __le32 reply;
- int err;
-
- err = omnia_cmd_read(client, cmd, &reply, sizeof(reply));
- if (err)
- return err;
-
- *dst = le32_to_cpu(reply);
-
- return 0;
-}
-
-static inline int omnia_cmd_read_u16(const struct i2c_client *client, u8 cmd,
- u16 *dst)
-{
- __le16 reply;
- int err;
-
- err = omnia_cmd_read(client, cmd, &reply, sizeof(reply));
- if (err)
- return err;
-
- *dst = le16_to_cpu(reply);
-
- return 0;
-}
-
-static inline int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd,
- u8 *reply)
-{
- return omnia_cmd_read(client, cmd, reply, sizeof(*reply));
-}
-
#ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO
extern const u8 omnia_int_to_gpio_idx[32];
extern const struct attribute_group omnia_mcu_gpio_group;
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index c5b36837e694..9cae07348d5e 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -177,7 +177,7 @@ static ssize_t post_reset_wdog_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t post_reset_wdog_store(struct device *dev,
@@ -206,7 +206,7 @@ static ssize_t mlxbf_bootctl_show(int smc_op, char *buf)
if (action < 0)
return action;
- return sprintf(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
+ return sysfs_emit(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
}
static int mlxbf_bootctl_store(int smc_op, const char *buf, size_t count)
@@ -274,14 +274,14 @@ static ssize_t lifecycle_state_show(struct device *dev,
* due to using the test bits.
*/
if (test_state) {
- return sprintf(buf, "%s(test)\n",
+ return sysfs_emit(buf, "%s(test)\n",
mlxbf_bootctl_lifecycle_states[lc_state]);
} else if (use_dev_key &&
(lc_state == MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE)) {
- return sprintf(buf, "Secured (development)\n");
+ return sysfs_emit(buf, "Secured (development)\n");
}
- return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
+ return sysfs_emit(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
}
static ssize_t secure_boot_fuse_state_show(struct device *dev,
@@ -332,9 +332,9 @@ static ssize_t secure_boot_fuse_state_show(struct device *dev,
else
status = valid ? "Invalid" : "Free";
}
- buf_len += sprintf(buf + buf_len, "%d:%s ", key, status);
+ buf_len += sysfs_emit(buf + buf_len, "%d:%s ", key, status);
}
- buf_len += sprintf(buf + buf_len, "\n");
+ buf_len += sysfs_emit(buf + buf_len, "\n");
return buf_len;
}
@@ -939,7 +939,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_bootctl_acpi_ids);
static ssize_t mlxbf_bootctl_bootfifo_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos,
size_t count)
{
@@ -971,9 +971,9 @@ static ssize_t mlxbf_bootctl_bootfifo_read(struct file *filp,
return p - buf;
}
-static struct bin_attribute mlxbf_bootctl_bootfifo_sysfs_attr = {
+static const struct bin_attribute mlxbf_bootctl_bootfifo_sysfs_attr = {
.attr = { .name = "bootfifo", .mode = 0400 },
- .read = mlxbf_bootctl_bootfifo_read,
+ .read_new = mlxbf_bootctl_bootfifo_read,
};
static bool mlxbf_bootctl_guid_match(const guid_t *guid,
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 9d18dfca6a67..36a00692347d 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -88,6 +88,7 @@
#define MLXBF_PMC_CRSPACE_PERFMON_CTL(n) (n * MLXBF_PMC_CRSPACE_PERFMON_REG0_SZ)
#define MLXBF_PMC_CRSPACE_PERFMON_EN BIT(30)
#define MLXBF_PMC_CRSPACE_PERFMON_CLR BIT(28)
+#define MLXBF_PMC_CRSPACE_PERFMON_COUNT_CLOCK(n) (MLXBF_PMC_CRSPACE_PERFMON_CTL(n) + 0x4)
#define MLXBF_PMC_CRSPACE_PERFMON_VAL0(n) (MLXBF_PMC_CRSPACE_PERFMON_CTL(n) + 0xc)
/**
@@ -114,6 +115,7 @@ struct mlxbf_pmc_attribute {
* @attr_event: Attributes for "event" sysfs files
* @attr_event_list: Attributes for "event_list" sysfs files
* @attr_enable: Attributes for "enable" sysfs files
+ * @attr_count_clock: Attributes for "count_clock" sysfs files
* @block_attr: All attributes needed for the block
* @block_attr_grp: Attribute group for the block
*/
@@ -126,6 +128,7 @@ struct mlxbf_pmc_block_info {
struct mlxbf_pmc_attribute *attr_event;
struct mlxbf_pmc_attribute attr_event_list;
struct mlxbf_pmc_attribute attr_enable;
+ struct mlxbf_pmc_attribute attr_count_clock;
struct attribute *block_attr[MLXBF_PMC_MAX_ATTRS];
struct attribute_group block_attr_grp;
};
@@ -859,6 +862,37 @@ static const struct mlxbf_pmc_events mlxbf_pmc_llt_miss_events[] = {
{75, "HISTOGRAM_HISTOGRAM_BIN9"},
};
+static const struct mlxbf_pmc_events mlxbf_pmc_clock_events[] = {
+ { 0x0, "FMON_CLK_LAST_COUNT_PLL_D1_INST0" },
+ { 0x4, "REFERENCE_WINDOW_WIDTH_PLL_D1_INST0" },
+ { 0x8, "FMON_CLK_LAST_COUNT_PLL_D1_INST1" },
+ { 0xc, "REFERENCE_WINDOW_WIDTH_PLL_D1_INST1" },
+ { 0x10, "FMON_CLK_LAST_COUNT_PLL_G1" },
+ { 0x14, "REFERENCE_WINDOW_WIDTH_PLL_G1" },
+ { 0x18, "FMON_CLK_LAST_COUNT_PLL_W1" },
+ { 0x1c, "REFERENCE_WINDOW_WIDTH_PLL_W1" },
+ { 0x20, "FMON_CLK_LAST_COUNT_PLL_T1" },
+ { 0x24, "REFERENCE_WINDOW_WIDTH_PLL_T1" },
+ { 0x28, "FMON_CLK_LAST_COUNT_PLL_A0" },
+ { 0x2c, "REFERENCE_WINDOW_WIDTH_PLL_A0" },
+ { 0x30, "FMON_CLK_LAST_COUNT_PLL_C0" },
+ { 0x34, "REFERENCE_WINDOW_WIDTH_PLL_C0" },
+ { 0x38, "FMON_CLK_LAST_COUNT_PLL_N1" },
+ { 0x3c, "REFERENCE_WINDOW_WIDTH_PLL_N1" },
+ { 0x40, "FMON_CLK_LAST_COUNT_PLL_I1" },
+ { 0x44, "REFERENCE_WINDOW_WIDTH_PLL_I1" },
+ { 0x48, "FMON_CLK_LAST_COUNT_PLL_R1" },
+ { 0x4c, "REFERENCE_WINDOW_WIDTH_PLL_R1" },
+ { 0x50, "FMON_CLK_LAST_COUNT_PLL_P1" },
+ { 0x54, "REFERENCE_WINDOW_WIDTH_PLL_P1" },
+ { 0x58, "FMON_CLK_LAST_COUNT_REF_100_INST0" },
+ { 0x5c, "REFERENCE_WINDOW_WIDTH_REF_100_INST0" },
+ { 0x60, "FMON_CLK_LAST_COUNT_REF_100_INST1" },
+ { 0x64, "REFERENCE_WINDOW_WIDTH_REF_100_INST1" },
+ { 0x68, "FMON_CLK_LAST_COUNT_REF_156" },
+ { 0x6c, "REFERENCE_WINDOW_WIDTH_REF_156" },
+};
+
static struct mlxbf_pmc_context *pmc;
/* UUID used to probe ATF service. */
@@ -1032,6 +1066,9 @@ static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk, size
} else if (strstr(blk, "llt")) {
events = mlxbf_pmc_llt_events;
size = ARRAY_SIZE(mlxbf_pmc_llt_events);
+ } else if (strstr(blk, "clock_measure")) {
+ events = mlxbf_pmc_clock_events;
+ size = ARRAY_SIZE(mlxbf_pmc_clock_events);
} else {
events = NULL;
size = 0;
@@ -1168,7 +1205,7 @@ static int mlxbf_pmc_program_l3_counter(unsigned int blk_num, u32 cnt_num, u32 e
/* Method to handle crspace counter programming */
static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num, u32 evt)
{
- void *addr;
+ void __iomem *addr;
u32 word;
int ret;
@@ -1192,7 +1229,7 @@ static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num,
/* Method to clear crspace counter value */
static int mlxbf_pmc_clear_crspace_counter(unsigned int blk_num, u32 cnt_num)
{
- void *addr;
+ void __iomem *addr;
addr = pmc->block[blk_num].mmio_base +
MLXBF_PMC_CRSPACE_PERFMON_VAL0(pmc->block[blk_num].counters) +
@@ -1405,7 +1442,7 @@ static int mlxbf_pmc_read_l3_event(unsigned int blk_num, u32 cnt_num, u64 *resul
static int mlxbf_pmc_read_crspace_event(unsigned int blk_num, u32 cnt_num, u64 *result)
{
u32 word, evt;
- void *addr;
+ void __iomem *addr;
int ret;
addr = pmc->block[blk_num].mmio_base +
@@ -1466,14 +1503,15 @@ static int mlxbf_pmc_read_event(unsigned int blk_num, u32 cnt_num, bool is_l3, u
/* Method to read a register */
static int mlxbf_pmc_read_reg(unsigned int blk_num, u32 offset, u64 *result)
{
- u32 ecc_out;
+ u32 reg;
- if (strstr(pmc->block_name[blk_num], "ecc")) {
+ if ((strstr(pmc->block_name[blk_num], "ecc")) ||
+ (strstr(pmc->block_name[blk_num], "clock_measure"))) {
if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
- &ecc_out))
+ &reg))
return -EFAULT;
- *result = ecc_out;
+ *result = reg;
return 0;
}
@@ -1487,6 +1525,9 @@ static int mlxbf_pmc_read_reg(unsigned int blk_num, u32 offset, u64 *result)
/* Method to write to a register */
static int mlxbf_pmc_write_reg(unsigned int blk_num, u32 offset, u64 data)
{
+ if (strstr(pmc->block_name[blk_num], "clock_measure"))
+ return -EINVAL;
+
if (strstr(pmc->block_name[blk_num], "ecc")) {
return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
MLXBF_PMC_WRITE_REG_32, data);
@@ -1763,6 +1804,49 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
return count;
}
+/* Show function for "count_clock" sysfs files - only for crspace */
+static ssize_t mlxbf_pmc_count_clock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlxbf_pmc_attribute *attr_count_clock = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ unsigned int blk_num;
+ u32 reg;
+
+ blk_num = attr_count_clock->nr;
+
+ if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_CRSPACE_PERFMON_COUNT_CLOCK(pmc->block[blk_num].counters),
+ &reg))
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%u\n", reg);
+}
+
+/* Store function for "count_clock" sysfs files - only for crspace */
+static ssize_t mlxbf_pmc_count_clock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlxbf_pmc_attribute *attr_count_clock = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ unsigned int blk_num;
+ u32 reg;
+ int err;
+
+ blk_num = attr_count_clock->nr;
+
+ err = kstrtouint(buf, 0, &reg);
+ if (err < 0)
+ return err;
+
+ mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_CRSPACE_PERFMON_COUNT_CLOCK(pmc->block[blk_num].counters),
+ MLXBF_PMC_WRITE_REG_32, reg);
+
+ return count;
+}
+
/* Populate attributes for blocks with counters to monitor performance */
static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_num)
{
@@ -1801,6 +1885,21 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
attr = NULL;
}
+ if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) {
+ /* Program crspace counters to count clock cycles using "count_clock" sysfs */
+ attr = &pmc->block[blk_num].attr_count_clock;
+ attr->dev_attr.attr.mode = 0644;
+ attr->dev_attr.show = mlxbf_pmc_count_clock_show;
+ attr->dev_attr.store = mlxbf_pmc_count_clock_store;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "count_clock");
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+ }
+
pmc->block[blk_num].attr_counter = devm_kcalloc(
dev, pmc->block[blk_num].counters,
sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 6aa2a4650367..b347000e4329 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -232,7 +232,7 @@ static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
regval = !!(regval & data->mask);
}
- return sprintf(buf, "%u\n", regval);
+ return sysfs_emit(buf, "%u\n", regval);
}
#define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index 595276206baf..97fefe6c38d1 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -126,7 +126,7 @@ mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
mutex_unlock(&priv->io_lock);
- return sprintf(buf, "%u\n", regval);
+ return sysfs_emit(buf, "%u\n", regval);
access_error:
mutex_unlock(&priv->io_lock);
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index 08db878f1d7d..0e479e35e66e 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -40,7 +40,7 @@ struct ssam_tmp_profile_info {
struct ssam_platform_profile_device {
struct ssam_device *sdev;
- struct platform_profile_handler handler;
+ struct device *ppdev;
bool has_fan;
};
@@ -154,14 +154,14 @@ static int convert_profile_to_ssam_fan(struct ssam_device *sdev, enum platform_p
}
}
-static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
+static int ssam_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
struct ssam_platform_profile_device *tpd;
enum ssam_tmp_profile tp;
int status;
- tpd = container_of(pprof, struct ssam_platform_profile_device, handler);
+ tpd = dev_get_drvdata(dev);
status = ssam_tmp_profile_get(tpd->sdev, &tp);
if (status)
@@ -175,13 +175,13 @@ static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
+static int ssam_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
struct ssam_platform_profile_device *tpd;
int tp;
- tpd = container_of(pprof, struct ssam_platform_profile_device, handler);
+ tpd = dev_get_drvdata(dev);
tp = convert_profile_to_ssam_tmp(tpd->sdev, profile);
if (tp < 0)
@@ -201,6 +201,22 @@ static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
return tp;
}
+static int ssam_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops ssam_platform_profile_ops = {
+ .probe = ssam_platform_profile_probe,
+ .profile_get = ssam_platform_profile_get,
+ .profile_set = ssam_platform_profile_set,
+};
+
static int surface_platform_profile_probe(struct ssam_device *sdev)
{
struct ssam_platform_profile_device *tpd;
@@ -210,23 +226,14 @@ static int surface_platform_profile_probe(struct ssam_device *sdev)
return -ENOMEM;
tpd->sdev = sdev;
-
- tpd->handler.profile_get = ssam_platform_profile_get;
- tpd->handler.profile_set = ssam_platform_profile_set;
+ ssam_device_set_drvdata(sdev, tpd);
tpd->has_fan = device_property_read_bool(&sdev->dev, "has_fan");
- set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
+ tpd->ppdev = devm_platform_profile_register(&sdev->dev, "Surface Platform Profile",
+ tpd, &ssam_platform_profile_ops);
- return platform_profile_register(&tpd->handler);
-}
-
-static void surface_platform_profile_remove(struct ssam_device *sdev)
-{
- platform_profile_remove();
+ return PTR_ERR_OR_ZERO(tpd->ppdev);
}
static const struct ssam_device_id ssam_platform_profile_match[] = {
@@ -237,7 +244,6 @@ MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match);
static struct ssam_device_driver surface_platform_profile = {
.probe = surface_platform_profile_probe,
- .remove = surface_platform_profile_remove,
.match_table = ssam_platform_profile_match,
.driver = {
.name = "surface_platform_profile",
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index d09baa3d3d90..69336bd778ee 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -30,7 +30,10 @@
#include <linux/input/sparse-keymap.h>
#include <acpi/video.h>
#include <linux/hwmon.h>
+#include <linux/units.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
@@ -67,10 +70,16 @@ MODULE_LICENSE("GPL");
#define ACER_WMID_GET_GAMING_SYS_INFO_METHODID 5
#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR 14
#define ACER_WMID_SET_GAMING_MISC_SETTING_METHODID 22
+#define ACER_WMID_GET_GAMING_MISC_SETTING_METHODID 23
-#define ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET 0x54
+#define ACER_GAMING_MISC_SETTING_STATUS_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_MISC_SETTING_INDEX_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_MISC_SETTING_VALUE_MASK GENMASK_ULL(15, 8)
-#define ACER_PREDATOR_V4_FAN_SPEED_READ_BIT_MASK GENMASK(20, 8)
+#define ACER_PREDATOR_V4_RETURN_STATUS_BIT_MASK GENMASK_ULL(7, 0)
+#define ACER_PREDATOR_V4_SENSOR_INDEX_BIT_MASK GENMASK_ULL(15, 8)
+#define ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK GENMASK_ULL(23, 8)
+#define ACER_PREDATOR_V4_SUPPORTED_SENSORS_BIT_MASK GENMASK_ULL(39, 24)
/*
* Acer ACPI method GUIDs
@@ -95,12 +104,33 @@ enum acer_wmi_event_ids {
WMID_HOTKEY_EVENT = 0x1,
WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
WMID_GAMING_TURBO_KEY_EVENT = 0x7,
+ WMID_AC_EVENT = 0x8,
};
enum acer_wmi_predator_v4_sys_info_command {
- ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS = 0x02,
- ACER_WMID_CMD_GET_PREDATOR_V4_CPU_FAN_SPEED = 0x0201,
- ACER_WMID_CMD_GET_PREDATOR_V4_GPU_FAN_SPEED = 0x0601,
+ ACER_WMID_CMD_GET_PREDATOR_V4_SUPPORTED_SENSORS = 0x0000,
+ ACER_WMID_CMD_GET_PREDATOR_V4_SENSOR_READING = 0x0001,
+ ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS = 0x0002,
+};
+
+enum acer_wmi_predator_v4_sensor_id {
+ ACER_WMID_SENSOR_CPU_TEMPERATURE = 0x01,
+ ACER_WMID_SENSOR_CPU_FAN_SPEED = 0x02,
+ ACER_WMID_SENSOR_EXTERNAL_TEMPERATURE_2 = 0x03,
+ ACER_WMID_SENSOR_GPU_FAN_SPEED = 0x06,
+ ACER_WMID_SENSOR_GPU_TEMPERATURE = 0x0A,
+};
+
+enum acer_wmi_predator_v4_oc {
+ ACER_WMID_OC_NORMAL = 0x0000,
+ ACER_WMID_OC_TURBO = 0x0002,
+};
+
+enum acer_wmi_gaming_misc_setting {
+ ACER_WMID_MISC_SETTING_OC_1 = 0x0005,
+ ACER_WMID_MISC_SETTING_OC_2 = 0x0007,
+ ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES = 0x000A,
+ ACER_WMID_MISC_SETTING_PLATFORM_PROFILE = 0x000B,
};
static const struct key_entry acer_wmi_keymap[] __initconst = {
@@ -246,7 +276,7 @@ struct hotkey_function_type_aa {
#define ACER_CAP_TURBO_LED BIT(8)
#define ACER_CAP_TURBO_FAN BIT(9)
#define ACER_CAP_PLATFORM_PROFILE BIT(10)
-#define ACER_CAP_FAN_SPEED_READ BIT(11)
+#define ACER_CAP_HWMON BIT(11)
/*
* Interface type flags
@@ -271,6 +301,7 @@ static u16 commun_func_bitmap;
static u8 commun_fn_key_number;
static bool cycle_gaming_thermal_profile = true;
static bool predator_v4;
+static u64 supported_sensors;
module_param(mailled, int, 0444);
module_param(brightness, int, 0444);
@@ -358,7 +389,7 @@ static void __init set_quirks(void)
if (quirks->predator_v4)
interface->capability |= ACER_CAP_PLATFORM_PROFILE |
- ACER_CAP_FAN_SPEED_READ;
+ ACER_CAP_HWMON;
}
static int __init dmi_matched(const struct dmi_system_id *dmi)
@@ -393,6 +424,20 @@ static struct quirk_entry quirk_acer_predator_ph315_53 = {
.gpu_fans = 1,
};
+static struct quirk_entry quirk_acer_predator_ph16_72 = {
+ .turbo = 1,
+ .cpu_fans = 1,
+ .gpu_fans = 1,
+ .predator_v4 = 1,
+};
+
+static struct quirk_entry quirk_acer_predator_pt14_51 = {
+ .turbo = 1,
+ .cpu_fans = 1,
+ .gpu_fans = 1,
+ .predator_v4 = 1,
+};
+
static struct quirk_entry quirk_acer_predator_v4 = {
.predator_v4 = 1,
};
@@ -566,6 +611,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Acer Nitro AN515-58",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Nitro AN515-58"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Acer Predator PH315-53",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -593,6 +647,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Acer Predator PH16-72",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH16-72"),
+ },
+ .driver_data = &quirk_acer_predator_ph16_72,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Acer Predator PH18-71",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -601,6 +664,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_predator_v4,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PT14-51",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PT14-51"),
+ },
+ .driver_data = &quirk_acer_predator_pt14_51,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
@@ -713,29 +785,24 @@ static const struct dmi_system_id non_acer_quirks[] __initconst = {
{}
};
-static struct platform_profile_handler platform_profile_handler;
+static struct device *platform_profile_device;
static bool platform_profile_support;
/*
* The profile used before turbo mode. This variable is needed for
* returning from turbo mode when the mode key is in toggle mode.
*/
-static int last_non_turbo_profile;
-
-enum acer_predator_v4_thermal_profile_ec {
- ACER_PREDATOR_V4_THERMAL_PROFILE_ECO = 0x04,
- ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO = 0x03,
- ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE = 0x02,
- ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET = 0x01,
- ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED = 0x00,
-};
+static int last_non_turbo_profile = INT_MIN;
+
+/* The most performant supported profile */
+static int acer_predator_v4_max_perf;
-enum acer_predator_v4_thermal_profile_wmi {
- ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI = 0x060B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI = 0x050B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI = 0x040B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI = 0x0B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI = 0x010B,
+enum acer_predator_v4_thermal_profile {
+ ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET = 0x00,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED = 0x01,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE = 0x04,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO = 0x05,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_ECO = 0x06,
};
/* Find which quirks are needed for a particular vendor/ model pair */
@@ -1448,6 +1515,45 @@ WMI_gaming_execute_u64(u32 method_id, u64 in, u64 *out)
return status;
}
+static int WMI_gaming_execute_u32_u64(u32 method_id, u32 in, u64 *out)
+{
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer input = {
+ .length = sizeof(in),
+ .pointer = &in,
+ };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = 0;
+
+ status = wmi_evaluate_method(WMID_GUID4, 0, method_id, &input, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = result.pointer;
+ if (obj && out) {
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ *out = obj->integer.value;
+ break;
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length < sizeof(*out))
+ ret = -ENOMSG;
+ else
+ *out = get_unaligned_le64(obj->buffer.pointer);
+
+ break;
+ default:
+ ret = -ENOMSG;
+ break;
+ }
+ }
+
+ kfree(obj);
+
+ return ret;
+}
+
static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
{
u32 method_id = 0;
@@ -1462,9 +1568,6 @@ static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
case ACER_CAP_TURBO_FAN:
method_id = ACER_WMID_SET_GAMING_FAN_BEHAVIOR;
break;
- case ACER_CAP_TURBO_OC:
- method_id = ACER_WMID_SET_GAMING_MISC_SETTING_METHODID;
- break;
default:
return AE_BAD_PARAMETER;
}
@@ -1497,6 +1600,24 @@ static acpi_status WMID_gaming_get_u64(u64 *value, u32 cap)
return status;
}
+static int WMID_gaming_get_sys_info(u32 command, u64 *out)
+{
+ acpi_status status;
+ u64 result;
+
+ status = WMI_gaming_execute_u64(ACER_WMID_GET_GAMING_SYS_INFO_METHODID, command, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_PREDATOR_V4_RETURN_STATUS_BIT_MASK, result))
+ return -EIO;
+
+ *out = result;
+
+ return 0;
+}
+
static void WMID_gaming_set_fan_mode(u8 fan_mode)
{
/* fan_mode = 1 is used for auto, fan_mode = 2 used for turbo*/
@@ -1518,6 +1639,48 @@ static void WMID_gaming_set_fan_mode(u8 fan_mode)
WMID_gaming_set_u64(gpu_fan_config2 | gpu_fan_config1 << 16, ACER_CAP_TURBO_FAN);
}
+static int WMID_gaming_set_misc_setting(enum acer_wmi_gaming_misc_setting setting, u8 value)
+{
+ acpi_status status;
+ u64 input = 0;
+ u64 result;
+
+ input |= FIELD_PREP(ACER_GAMING_MISC_SETTING_INDEX_MASK, setting);
+ input |= FIELD_PREP(ACER_GAMING_MISC_SETTING_VALUE_MASK, value);
+
+ status = WMI_gaming_execute_u64(ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, input, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_MISC_SETTING_STATUS_MASK, result))
+ return -EIO;
+
+ return 0;
+}
+
+static int WMID_gaming_get_misc_setting(enum acer_wmi_gaming_misc_setting setting, u8 *value)
+{
+ u64 input = 0;
+ u64 result;
+ int ret;
+
+ input |= FIELD_PREP(ACER_GAMING_MISC_SETTING_INDEX_MASK, setting);
+
+ ret = WMI_gaming_execute_u32_u64(ACER_WMID_GET_GAMING_MISC_SETTING_METHODID, input,
+ &result);
+ if (ret < 0)
+ return ret;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_MISC_SETTING_STATUS_MASK, result))
+ return -EIO;
+
+ *value = FIELD_GET(ACER_GAMING_MISC_SETTING_VALUE_MASK, result);
+
+ return 0;
+}
+
/*
* Generic Device (interface-independent)
*/
@@ -1744,26 +1907,6 @@ static int acer_gsensor_event(void)
return 0;
}
-static int acer_get_fan_speed(int fan)
-{
- if (quirks->predator_v4) {
- acpi_status status;
- u64 fanspeed;
-
- status = WMI_gaming_execute_u64(
- ACER_WMID_GET_GAMING_SYS_INFO_METHODID,
- fan == 0 ? ACER_WMID_CMD_GET_PREDATOR_V4_CPU_FAN_SPEED :
- ACER_WMID_CMD_GET_PREDATOR_V4_GPU_FAN_SPEED,
- &fanspeed);
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- return FIELD_GET(ACER_PREDATOR_V4_FAN_SPEED_READ_BIT_MASK, fanspeed);
- }
- return -EOPNOTSUPP;
-}
-
/*
* Predator series turbo button
*/
@@ -1783,8 +1926,12 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_fan_mode(0x1);
/* Set OC to normal */
- WMID_gaming_set_u64(0x5, ACER_CAP_TURBO_OC);
- WMID_gaming_set_u64(0x7, ACER_CAP_TURBO_OC);
+ if (has_cap(ACER_CAP_TURBO_OC)) {
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_1,
+ ACER_WMID_OC_NORMAL);
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_2,
+ ACER_WMID_OC_NORMAL);
+ }
} else {
/* Turn on turbo led */
WMID_gaming_set_u64(0x10001, ACER_CAP_TURBO_LED);
@@ -1793,22 +1940,25 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_fan_mode(0x2);
/* Set OC to turbo mode */
- WMID_gaming_set_u64(0x205, ACER_CAP_TURBO_OC);
- WMID_gaming_set_u64(0x207, ACER_CAP_TURBO_OC);
+ if (has_cap(ACER_CAP_TURBO_OC)) {
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_1,
+ ACER_WMID_OC_TURBO);
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_2,
+ ACER_WMID_OC_TURBO);
+ }
}
return turbo_led_state;
}
static int
-acer_predator_v4_platform_profile_get(struct platform_profile_handler *pprof,
+acer_predator_v4_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
u8 tp;
int err;
- err = ec_read(ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET, &tp);
-
- if (err < 0)
+ err = WMID_gaming_get_misc_setting(ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, &tp);
+ if (err)
return err;
switch (tp) {
@@ -1835,74 +1985,112 @@ acer_predator_v4_platform_profile_get(struct platform_profile_handler *pprof,
}
static int
-acer_predator_v4_platform_profile_set(struct platform_profile_handler *pprof,
+acer_predator_v4_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- int tp;
- acpi_status status;
+ int err, tp;
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
break;
case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
break;
case PLATFORM_PROFILE_QUIET:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
break;
case PLATFORM_PROFILE_LOW_POWER:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
break;
default:
return -EOPNOTSUPP;
}
- status = WMI_gaming_execute_u64(
- ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, tp, NULL);
-
- if (ACPI_FAILURE(status))
- return -EIO;
+ err = WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, tp);
+ if (err)
+ return err;
- if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI)
+ if (tp != acer_predator_v4_max_perf)
last_non_turbo_profile = tp;
return 0;
}
-static int acer_platform_profile_setup(void)
+static int
+acer_predator_v4_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ unsigned long supported_profiles;
+ int err;
+
+ err = WMID_gaming_get_misc_setting(ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES,
+ (u8 *)&supported_profiles);
+ if (err)
+ return err;
+
+ /* Iterate through supported profiles in order of increasing performance */
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_ECO, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
+
+ /* We only use this profile as a fallback option in case no prior
+ * profile is supported.
+ */
+ if (last_non_turbo_profile < 0)
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
+
+ /* We need to handle the hypothetical case where only the turbo profile
+ * is supported. In this case the turbo toggle will essentially be a
+ * no-op.
+ */
+ if (last_non_turbo_profile < 0)
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
+ }
+
+ return 0;
+}
+
+static const struct platform_profile_ops acer_predator_v4_platform_profile_ops = {
+ .probe = acer_predator_v4_platform_profile_probe,
+ .profile_get = acer_predator_v4_platform_profile_get,
+ .profile_set = acer_predator_v4_platform_profile_set,
+};
+
+static int acer_platform_profile_setup(struct platform_device *device)
{
if (quirks->predator_v4) {
- int err;
-
- platform_profile_handler.profile_get =
- acer_predator_v4_platform_profile_get;
- platform_profile_handler.profile_set =
- acer_predator_v4_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_PERFORMANCE,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_QUIET,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_LOW_POWER,
- platform_profile_handler.choices);
-
- err = platform_profile_register(&platform_profile_handler);
- if (err)
- return err;
+ platform_profile_device = devm_platform_profile_register(
+ &device->dev, "acer-wmi", NULL, &acer_predator_v4_platform_profile_ops);
+ if (IS_ERR(platform_profile_device))
+ return PTR_ERR(platform_profile_device);
platform_profile_support = true;
-
- /* Set default non-turbo profile */
- last_non_turbo_profile =
- ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
}
return 0;
}
@@ -1910,83 +2098,41 @@ static int acer_platform_profile_setup(void)
static int acer_thermal_profile_change(void)
{
/*
- * This mode key can rotate each mode or toggle turbo mode.
- * On battery, only ECO and BALANCED mode are available.
+ * This mode key will either cycle through each mode or toggle the
+ * most performant profile.
*/
if (quirks->predator_v4) {
u8 current_tp;
- int tp, err;
- u64 on_AC;
- acpi_status status;
-
- err = ec_read(ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET,
- &current_tp);
+ int err, tp;
- if (err < 0)
- return err;
+ if (cycle_gaming_thermal_profile) {
+ platform_profile_cycle();
+ } else {
+ /* Do nothing if no suitable platform profiles where found */
+ if (last_non_turbo_profile < 0)
+ return 0;
- /* Check power source */
- status = WMI_gaming_execute_u64(
- ACER_WMID_GET_GAMING_SYS_INFO_METHODID,
- ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS, &on_AC);
+ err = WMID_gaming_get_misc_setting(
+ ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, &current_tp);
+ if (err)
+ return err;
- if (ACPI_FAILURE(status))
- return -EIO;
-
- switch (current_tp) {
- case ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
- else
+ if (current_tp == acer_predator_v4_max_perf)
tp = last_non_turbo_profile;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI;
- else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_ECO:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI;
else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- default:
- return -EOPNOTSUPP;
- }
+ tp = acer_predator_v4_max_perf;
- status = WMI_gaming_execute_u64(
- ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, tp, NULL);
-
- if (ACPI_FAILURE(status))
- return -EIO;
+ err = WMID_gaming_set_misc_setting(
+ ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, tp);
+ if (err)
+ return err;
- /* Store non-turbo profile for turbo mode toggle*/
- if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI)
- last_non_turbo_profile = tp;
+ /* Store last profile for toggle */
+ if (current_tp != acer_predator_v4_max_perf)
+ last_non_turbo_profile = current_tp;
- platform_profile_notify();
+ platform_profile_notify(platform_profile_device);
+ }
}
return 0;
@@ -2280,6 +2426,9 @@ static void acer_wmi_notify(union acpi_object *obj, void *context)
if (return_value.key_num == 0x5 && has_cap(ACER_CAP_PLATFORM_PROFILE))
acer_thermal_profile_change();
break;
+ case WMID_AC_EVENT:
+ /* We ignore AC events here */
+ break;
default:
pr_warn("Unknown function number - %d - %d\n",
return_value.function, return_value.key_num);
@@ -2530,12 +2679,12 @@ static int acer_platform_probe(struct platform_device *device)
goto error_rfkill;
if (has_cap(ACER_CAP_PLATFORM_PROFILE)) {
- err = acer_platform_profile_setup();
+ err = acer_platform_profile_setup(device);
if (err)
goto error_platform_profile;
}
- if (has_cap(ACER_CAP_FAN_SPEED_READ)) {
+ if (has_cap(ACER_CAP_HWMON)) {
err = acer_wmi_hwmon_init();
if (err)
goto error_hwmon;
@@ -2544,8 +2693,6 @@ static int acer_platform_probe(struct platform_device *device)
return 0;
error_hwmon:
- if (platform_profile_support)
- platform_profile_remove();
error_platform_profile:
acer_rfkill_exit();
error_rfkill:
@@ -2566,9 +2713,6 @@ static void acer_platform_remove(struct platform_device *device)
acer_backlight_exit();
acer_rfkill_exit();
-
- if (platform_profile_support)
- platform_profile_remove();
}
#ifdef CONFIG_PM_SLEEP
@@ -2655,43 +2799,86 @@ static void __init create_debugfs(void)
&interface->debug.wmid_devices);
}
+static const enum acer_wmi_predator_v4_sensor_id acer_wmi_temp_channel_to_sensor_id[] = {
+ [0] = ACER_WMID_SENSOR_CPU_TEMPERATURE,
+ [1] = ACER_WMID_SENSOR_GPU_TEMPERATURE,
+ [2] = ACER_WMID_SENSOR_EXTERNAL_TEMPERATURE_2,
+};
+
+static const enum acer_wmi_predator_v4_sensor_id acer_wmi_fan_channel_to_sensor_id[] = {
+ [0] = ACER_WMID_SENSOR_CPU_FAN_SPEED,
+ [1] = ACER_WMID_SENSOR_GPU_FAN_SPEED,
+};
+
static umode_t acer_wmi_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type, u32 attr,
int channel)
{
+ enum acer_wmi_predator_v4_sensor_id sensor_id;
+ const u64 *supported_sensors = data;
+
switch (type) {
+ case hwmon_temp:
+ sensor_id = acer_wmi_temp_channel_to_sensor_id[channel];
+ break;
case hwmon_fan:
- if (acer_get_fan_speed(channel) >= 0)
- return 0444;
+ sensor_id = acer_wmi_fan_channel_to_sensor_id[channel];
break;
default:
return 0;
}
+ if (*supported_sensors & BIT(sensor_id - 1))
+ return 0444;
+
return 0;
}
static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
+ u64 command = ACER_WMID_CMD_GET_PREDATOR_V4_SENSOR_READING;
+ u64 result;
int ret;
switch (type) {
+ case hwmon_temp:
+ command |= FIELD_PREP(ACER_PREDATOR_V4_SENSOR_INDEX_BIT_MASK,
+ acer_wmi_temp_channel_to_sensor_id[channel]);
+
+ ret = WMID_gaming_get_sys_info(command, &result);
+ if (ret < 0)
+ return ret;
+
+ result = FIELD_GET(ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK, result);
+ *val = result * MILLIDEGREE_PER_DEGREE;
+ return 0;
case hwmon_fan:
- ret = acer_get_fan_speed(channel);
+ command |= FIELD_PREP(ACER_PREDATOR_V4_SENSOR_INDEX_BIT_MASK,
+ acer_wmi_fan_channel_to_sensor_id[channel]);
+
+ ret = WMID_gaming_get_sys_info(command, &result);
if (ret < 0)
return ret;
- *val = ret;
- break;
+
+ *val = FIELD_GET(ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK, result);
+ return 0;
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
static const struct hwmon_channel_info *const acer_wmi_hwmon_info[] = {
- HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT), NULL
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT
+ ),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT
+ ),
+ NULL
};
static const struct hwmon_ops acer_wmi_hwmon_ops = {
@@ -2708,9 +2895,20 @@ static int acer_wmi_hwmon_init(void)
{
struct device *dev = &acer_platform_device->dev;
struct device *hwmon;
+ u64 result;
+ int ret;
+
+ ret = WMID_gaming_get_sys_info(ACER_WMID_CMD_GET_PREDATOR_V4_SUPPORTED_SENSORS, &result);
+ if (ret < 0)
+ return ret;
+
+ /* Return early if no sensors are available */
+ supported_sensors = FIELD_GET(ACER_PREDATOR_V4_SUPPORTED_SENSORS_BIT_MASK, result);
+ if (!supported_sensors)
+ return 0;
hwmon = devm_hwmon_device_register_with_info(dev, "acer",
- &acer_platform_driver,
+ &supported_sensors,
&acer_wmi_hwmon_chip_info,
NULL);
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index e981d45e1c12..444b43be35a2 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -226,7 +226,7 @@ static int hsmp_parse_acpi_table(struct device *dev, u16 sock_ind)
}
static ssize_t hsmp_metric_tbl_acpi_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -285,19 +285,19 @@ static int init_acpi(struct device *dev)
return ret;
}
-static struct bin_attribute hsmp_metric_tbl_attr = {
+static const struct bin_attribute hsmp_metric_tbl_attr = {
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444},
- .read = hsmp_metric_tbl_acpi_read,
+ .read_new = hsmp_metric_tbl_acpi_read,
.size = sizeof(struct hsmp_metric_table),
};
-static struct bin_attribute *hsmp_attr_list[] = {
+static const struct bin_attribute *hsmp_attr_list[] = {
&hsmp_metric_tbl_attr,
NULL
};
-static struct attribute_group hsmp_attr_grp = {
- .bin_attrs = hsmp_attr_list,
+static const struct attribute_group hsmp_attr_grp = {
+ .bin_attrs_new = hsmp_attr_list,
.is_bin_visible = hsmp_is_sock_attr_visible,
};
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 227b4ad4a51a..03164e30b3a5 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -33,7 +33,13 @@
#define HSMP_WR true
#define HSMP_RD false
-#define DRIVER_VERSION "2.3"
+#define DRIVER_VERSION "2.4"
+
+/*
+ * When same message numbers are used for both GET and SET operation,
+ * bit:31 indicates whether its SET or GET operation.
+ */
+#define CHECK_GET_BIT BIT(31)
static struct hsmp_plat_device hsmp_pdev;
@@ -167,11 +173,28 @@ static int validate_message(struct hsmp_message *msg)
if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_RSVD)
return -ENOMSG;
- /* num_args and response_sz against the HSMP spec */
- if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args ||
- msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz)
+ /*
+ * num_args passed by user should match the num_args specified in
+ * message description table.
+ */
+ if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args)
return -EINVAL;
+ /*
+ * Some older HSMP SET messages are updated to add GET in the same message.
+ * In these messages, GET returns the current value and SET also returns
+ * the successfully set value. To support this GET and SET in same message
+ * while maintaining backward compatibility for the HSMP users,
+ * hsmp_msg_desc_table[] indicates only maximum allowed response_sz.
+ */
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_SET_GET) {
+ if (msg->response_sz > hsmp_msg_desc_table[msg->msg_id].response_sz)
+ return -EINVAL;
+ } else {
+ /* only HSMP_SET or HSMP_GET messages go through this strict check */
+ if (msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz)
+ return -EINVAL;
+ }
return 0;
}
@@ -239,6 +262,18 @@ int hsmp_test(u16 sock_ind, u32 value)
}
EXPORT_SYMBOL_NS_GPL(hsmp_test, "AMD_HSMP");
+static bool is_get_msg(struct hsmp_message *msg)
+{
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_GET)
+ return true;
+
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_SET_GET &&
+ (msg->args[0] & CHECK_GET_BIT))
+ return true;
+
+ return false;
+}
+
long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
int __user *arguser = (int __user *)arg;
@@ -261,7 +296,7 @@ long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
* Device is opened in O_WRONLY mode
* Execute only set/configure commands
*/
- if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_SET)
+ if (is_get_msg(&msg))
return -EPERM;
break;
case FMODE_READ:
@@ -269,7 +304,7 @@ long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
* Device is opened in O_RDONLY mode
* Execute only get/monitor commands
*/
- if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_GET)
+ if (!is_get_msg(&msg))
return -EPERM;
break;
case FMODE_READ | FMODE_WRITE:
diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c
index a61f815c9f80..02ca85762b68 100644
--- a/drivers/platform/x86/amd/hsmp/plat.c
+++ b/drivers/platform/x86/amd/hsmp/plat.c
@@ -59,7 +59,7 @@ static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
}
static ssize_t hsmp_metric_tbl_plat_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct hsmp_socket *sock;
@@ -97,13 +97,13 @@ static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj,
* is_bin_visible function is used to show / hide the necessary groups.
*/
#define HSMP_BIN_ATTR(index, _list) \
-static struct bin_attribute attr##index = { \
+static const struct bin_attribute attr##index = { \
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444}, \
.private = (void *)index, \
- .read = hsmp_metric_tbl_plat_read, \
+ .read_new = hsmp_metric_tbl_plat_read, \
.size = sizeof(struct hsmp_metric_table), \
}; \
-static struct bin_attribute _list[] = { \
+static const struct bin_attribute _list[] = { \
&attr##index, \
NULL \
}
@@ -118,8 +118,8 @@ HSMP_BIN_ATTR(6, *sock6_attr_list);
HSMP_BIN_ATTR(7, *sock7_attr_list);
#define HSMP_BIN_ATTR_GRP(index, _list, _name) \
-static struct attribute_group sock##index##_attr_grp = { \
- .bin_attrs = _list, \
+static const struct attribute_group sock##index##_attr_grp = { \
+ .bin_attrs_new = _list, \
.is_bin_visible = hsmp_is_sock_attr_visible, \
.name = #_name, \
}
diff --git a/drivers/platform/x86/amd/pmc/Kconfig b/drivers/platform/x86/amd/pmc/Kconfig
index 94f9563d8be7..eeffdafd686e 100644
--- a/drivers/platform/x86/amd/pmc/Kconfig
+++ b/drivers/platform/x86/amd/pmc/Kconfig
@@ -5,7 +5,7 @@
config AMD_PMC
tristate "AMD SoC PMC driver"
- depends on ACPI && PCI && RTC_CLASS && AMD_NB
+ depends on ACPI && PCI && RTC_CLASS && AMD_NODE
depends on SUSPEND
select SERIO
help
diff --git a/drivers/platform/x86/amd/pmc/Makefile b/drivers/platform/x86/amd/pmc/Makefile
index f1d9ab19d24c..255d94ddf999 100644
--- a/drivers/platform/x86/amd/pmc/Makefile
+++ b/drivers/platform/x86/amd/pmc/Makefile
@@ -4,6 +4,6 @@
# AMD Power Management Controller Driver
#
-amd-pmc-objs := pmc.o pmc-quirks.o
+amd-pmc-objs := pmc.o pmc-quirks.o mp1_stb.o
obj-$(CONFIG_AMD_PMC) += amd-pmc.o
amd-pmc-$(CONFIG_AMD_MP2_STB) += mp2_stb.o
diff --git a/drivers/platform/x86/amd/pmc/mp1_stb.c b/drivers/platform/x86/amd/pmc/mp1_stb.c
new file mode 100644
index 000000000000..c005f00988f7
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/mp1_stb.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD MP1 Smart Trace Buffer (STB) Layer
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Sanket Goswami <Sanket.Goswami@amd.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/amd_nb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "pmc.h"
+
+/* STB Spill to DRAM Parameters */
+#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
+#define S2D_TELEMETRY_BYTES_MAX 0x100000U
+#define S2D_RSVD_RAM_SPACE 0x100000
+
+/* STB Registers */
+#define AMD_STB_PMI_0 0x03E30600
+#define AMD_PMC_STB_DUMMY_PC 0xC6000007
+
+/* STB Spill to DRAM Message Definition */
+#define STB_FORCE_FLUSH_DATA 0xCF
+#define FIFO_SIZE 4096
+
+/* STB S2D(Spill to DRAM) has different message port offset */
+#define AMD_S2D_REGISTER_MESSAGE 0xA20
+#define AMD_S2D_REGISTER_RESPONSE 0xA80
+#define AMD_S2D_REGISTER_ARGUMENT 0xA88
+
+/* STB S2D (Spill to DRAM) message port offset for 44h model */
+#define AMD_GNR_REGISTER_MESSAGE 0x524
+#define AMD_GNR_REGISTER_RESPONSE 0x570
+#define AMD_GNR_REGISTER_ARGUMENT 0xA40
+
+static bool enable_stb;
+module_param(enable_stb, bool, 0644);
+MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
+
+static bool dump_custom_stb;
+module_param(dump_custom_stb, bool, 0644);
+MODULE_PARM_DESC(dump_custom_stb, "Enable to dump full STB buffer");
+
+enum s2d_arg {
+ S2D_TELEMETRY_SIZE = 0x01,
+ S2D_PHYS_ADDR_LOW,
+ S2D_PHYS_ADDR_HIGH,
+ S2D_NUM_SAMPLES,
+ S2D_DRAM_SIZE,
+};
+
+struct amd_stb_v2_data {
+ size_t size;
+ u8 data[] __counted_by(size);
+};
+
+int amd_stb_write(struct amd_pmc_dev *dev, u32 data)
+{
+ int err;
+
+ err = amd_smn_write(0, AMD_STB_PMI_0, data);
+ if (err) {
+ dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+
+ return 0;
+}
+
+int amd_stb_read(struct amd_pmc_dev *dev, u32 *buf)
+{
+ int i, err;
+
+ for (i = 0; i < FIFO_SIZE; i++) {
+ err = amd_smn_read(0, AMD_STB_PMI_0, buf++);
+ if (err) {
+ dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+ }
+
+ return 0;
+}
+
+static int amd_stb_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 size = FIFO_SIZE * sizeof(u32);
+ u32 *buf;
+ int rc;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = amd_stb_read(dev, buf);
+ if (rc) {
+ kfree(buf);
+ return rc;
+ }
+
+ filp->private_data = buf;
+ return rc;
+}
+
+static ssize_t amd_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, loff_t *pos)
+{
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, filp->private_data,
+ FIFO_SIZE * sizeof(u32));
+}
+
+static int amd_stb_debugfs_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_stb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = amd_stb_debugfs_open,
+ .read = amd_stb_debugfs_read,
+ .release = amd_stb_debugfs_release,
+};
+
+/* Enhanced STB Firmware Reporting Mechanism */
+static int amd_stb_handle_efr(struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ struct amd_stb_v2_data *stb_data_arr;
+ u32 fsize;
+
+ fsize = dev->dram_size - S2D_RSVD_RAM_SPACE;
+ stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
+ if (!stb_data_arr)
+ return -ENOMEM;
+
+ stb_data_arr->size = fsize;
+ memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
+ filp->private_data = stb_data_arr;
+
+ return 0;
+}
+
+static int amd_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 fsize, num_samples, val, stb_rdptr_offset = 0;
+ struct amd_stb_v2_data *stb_data_arr;
+ int ret;
+
+ /* Write dummy postcode while reading the STB buffer */
+ ret = amd_stb_write(dev, AMD_PMC_STB_DUMMY_PC);
+ if (ret)
+ dev_err(dev->dev, "error writing to STB: %d\n", ret);
+
+ /* Spill to DRAM num_samples uses separate SMU message port */
+ dev->msg_port = MSG_PORT_S2D;
+
+ ret = amd_pmc_send_cmd(dev, 0, &val, STB_FORCE_FLUSH_DATA, 1);
+ if (ret)
+ dev_dbg_once(dev->dev, "S2D force flush not supported: %d\n", ret);
+
+ /*
+ * We have a custom stb size and the PMFW is supposed to give
+ * the enhanced dram size. Note that we land here only for the
+ * platforms that support enhanced dram size reporting.
+ */
+ if (dump_custom_stb)
+ return amd_stb_handle_efr(filp);
+
+ /* Get the num_samples to calculate the last push location */
+ ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->stb_arg.s2d_msg_id, true);
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = MSG_PORT_PMC;
+ if (ret) {
+ dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret);
+ return ret;
+ }
+
+ fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX);
+ stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
+ if (!stb_data_arr)
+ return -ENOMEM;
+
+ stb_data_arr->size = fsize;
+
+ /*
+ * Start capturing data from the last push location.
+ * This is for general cases, where the stb limits
+ * are meant for standard usage.
+ */
+ if (num_samples > S2D_TELEMETRY_BYTES_MAX) {
+ /* First read oldest data starting 1 behind last write till end of ringbuffer */
+ stb_rdptr_offset = num_samples % S2D_TELEMETRY_BYTES_MAX;
+ fsize = S2D_TELEMETRY_BYTES_MAX - stb_rdptr_offset;
+
+ memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr + stb_rdptr_offset, fsize);
+ /* Second copy the newer samples from offset 0 - last write */
+ memcpy_fromio(stb_data_arr->data + fsize, dev->stb_virt_addr, stb_rdptr_offset);
+ } else {
+ memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
+ }
+
+ filp->private_data = stb_data_arr;
+
+ return 0;
+}
+
+static ssize_t amd_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amd_stb_v2_data *data = filp->private_data;
+
+ return simple_read_from_buffer(buf, size, pos, data->data, data->size);
+}
+
+static int amd_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_stb_debugfs_fops_v2 = {
+ .owner = THIS_MODULE,
+ .open = amd_stb_debugfs_open_v2,
+ .read = amd_stb_debugfs_read_v2,
+ .release = amd_stb_debugfs_release_v2,
+};
+
+static void amd_stb_update_args(struct amd_pmc_dev *dev)
+{
+ if (cpu_feature_enabled(X86_FEATURE_ZEN5))
+ switch (boot_cpu_data.x86_model) {
+ case 0x44:
+ dev->stb_arg.msg = AMD_GNR_REGISTER_MESSAGE;
+ dev->stb_arg.arg = AMD_GNR_REGISTER_ARGUMENT;
+ dev->stb_arg.resp = AMD_GNR_REGISTER_RESPONSE;
+ return;
+ default:
+ break;
+ }
+
+ dev->stb_arg.msg = AMD_S2D_REGISTER_MESSAGE;
+ dev->stb_arg.arg = AMD_S2D_REGISTER_ARGUMENT;
+ dev->stb_arg.resp = AMD_S2D_REGISTER_RESPONSE;
+}
+
+static bool amd_is_stb_supported(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ if (boot_cpu_data.x86_model == 0x44)
+ dev->stb_arg.s2d_msg_id = 0x9B;
+ else
+ dev->stb_arg.s2d_msg_id = 0xBE;
+ break;
+ case AMD_CPU_ID_PS:
+ dev->stb_arg.s2d_msg_id = 0x85;
+ break;
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
+ if (boot_cpu_data.x86_model == 0x70)
+ dev->stb_arg.s2d_msg_id = 0xF1;
+ else
+ dev->stb_arg.s2d_msg_id = 0xDE;
+ break;
+ default:
+ return false;
+ }
+
+ amd_stb_update_args(dev);
+ return true;
+}
+
+int amd_stb_s2d_init(struct amd_pmc_dev *dev)
+{
+ u32 phys_addr_low, phys_addr_hi;
+ u64 stb_phys_addr;
+ u32 size = 0;
+ int ret;
+
+ if (!enable_stb)
+ return 0;
+
+ if (amd_is_stb_supported(dev)) {
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_stb_debugfs_fops_v2);
+ } else {
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_stb_debugfs_fops);
+ return 0;
+ }
+
+ /* Spill to DRAM feature uses separate SMU message port */
+ dev->msg_port = MSG_PORT_S2D;
+
+ amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->stb_arg.s2d_msg_id, true);
+ if (size != S2D_TELEMETRY_BYTES_MAX)
+ return -EIO;
+
+ /* Get DRAM size */
+ ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->stb_arg.s2d_msg_id, true);
+ if (ret || !dev->dram_size)
+ dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
+
+ /* Get STB DRAM address */
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->stb_arg.s2d_msg_id, true);
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->stb_arg.s2d_msg_id, true);
+
+ stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = MSG_PORT_PMC;
+
+ dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size);
+ if (!dev->stb_virt_addr)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index a254debb9256..e6124498b195 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -10,8 +10,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/debugfs.h>
@@ -28,6 +28,8 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
+#include <asm/amd_node.h>
+
#include "pmc.h"
/* SMU communication registers */
@@ -40,24 +42,9 @@
#define AMD_PMC_SCRATCH_REG_1AH 0xF14
/* STB Registers */
-#define AMD_PMC_STB_PMI_0 0x03E30600
#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
-#define AMD_PMC_STB_DUMMY_PC 0xC6000007
-
-/* STB S2D(Spill to DRAM) has different message port offset */
-#define AMD_S2D_REGISTER_MESSAGE 0xA20
-#define AMD_S2D_REGISTER_RESPONSE 0xA80
-#define AMD_S2D_REGISTER_ARGUMENT 0xA88
-
-/* STB Spill to DRAM Parameters */
-#define S2D_TELEMETRY_BYTES_MAX 0x100000U
-#define S2D_RSVD_RAM_SPACE 0x100000
-#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
-
-/* STB Spill to DRAM Message Definition */
-#define STB_FORCE_FLUSH_DATA 0xCF
/* Base address of SMU for mapping physical address to virtual address */
#define AMD_PMC_MAPPING_SIZE 0x01000
@@ -97,7 +84,6 @@
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
-#define FIFO_SIZE 4096
enum amd_pmc_def {
MSG_TEST = 0x01,
@@ -105,24 +91,39 @@ enum amd_pmc_def {
MSG_OS_HINT_RN,
};
-enum s2d_arg {
- S2D_TELEMETRY_SIZE = 0x01,
- S2D_PHYS_ADDR_LOW,
- S2D_PHYS_ADDR_HIGH,
- S2D_NUM_SAMPLES,
- S2D_DRAM_SIZE,
-};
-
-struct amd_pmc_stb_v2_data {
- size_t size;
- u8 data[] __counted_by(size);
-};
-
struct amd_pmc_bit_map {
const char *name;
u32 bit_mask;
};
+static const struct amd_pmc_bit_map soc15_ip_blk_v2[] = {
+ {"DISPLAY", BIT(0)},
+ {"CPU", BIT(1)},
+ {"GFX", BIT(2)},
+ {"VDD", BIT(3)},
+ {"VDD_CCX", BIT(4)},
+ {"ACP", BIT(5)},
+ {"VCN_0", BIT(6)},
+ {"VCN_1", BIT(7)},
+ {"ISP", BIT(8)},
+ {"NBIO", BIT(9)},
+ {"DF", BIT(10)},
+ {"USB3_0", BIT(11)},
+ {"USB3_1", BIT(12)},
+ {"LAPIC", BIT(13)},
+ {"USB3_2", BIT(14)},
+ {"USB4_RT0", BIT(15)},
+ {"USB4_RT1", BIT(16)},
+ {"USB4_0", BIT(17)},
+ {"USB4_1", BIT(18)},
+ {"MPM", BIT(19)},
+ {"JPEG_0", BIT(20)},
+ {"JPEG_1", BIT(21)},
+ {"IPU", BIT(22)},
+ {"UMSCH", BIT(23)},
+ {"VPE", BIT(24)},
+};
+
static const struct amd_pmc_bit_map soc15_ip_blk[] = {
{"DISPLAY", BIT(0)},
{"CPU", BIT(1)},
@@ -146,25 +147,13 @@ static const struct amd_pmc_bit_map soc15_ip_blk[] = {
{"IPU", BIT(19)},
{"UMSCH", BIT(20)},
{"VPE", BIT(21)},
- {}
};
-static bool enable_stb;
-module_param(enable_stb, bool, 0644);
-MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
-
static bool disable_workarounds;
module_param(disable_workarounds, bool, 0644);
MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
-static bool dump_custom_stb;
-module_param(dump_custom_stb, bool, 0644);
-MODULE_PARM_DESC(dump_custom_stb, "Enable to dump full STB buffer");
-
static struct amd_pmc_dev pmc;
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
-static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -193,155 +182,6 @@ struct smu_metrics {
u64 timecondition_notmet_totaltime[32];
} __packed;
-static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp)
-{
- struct amd_pmc_dev *dev = filp->f_inode->i_private;
- u32 size = FIFO_SIZE * sizeof(u32);
- u32 *buf;
- int rc;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = amd_pmc_read_stb(dev, buf);
- if (rc) {
- kfree(buf);
- return rc;
- }
-
- filp->private_data = buf;
- return rc;
-}
-
-static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
- loff_t *pos)
-{
- if (!filp->private_data)
- return -EINVAL;
-
- return simple_read_from_buffer(buf, size, pos, filp->private_data,
- FIFO_SIZE * sizeof(u32));
-}
-
-static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
-{
- kfree(filp->private_data);
- return 0;
-}
-
-static const struct file_operations amd_pmc_stb_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = amd_pmc_stb_debugfs_open,
- .read = amd_pmc_stb_debugfs_read,
- .release = amd_pmc_stb_debugfs_release,
-};
-
-/* Enhanced STB Firmware Reporting Mechanism */
-static int amd_pmc_stb_handle_efr(struct file *filp)
-{
- struct amd_pmc_dev *dev = filp->f_inode->i_private;
- struct amd_pmc_stb_v2_data *stb_data_arr;
- u32 fsize;
-
- fsize = dev->dram_size - S2D_RSVD_RAM_SPACE;
- stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
- if (!stb_data_arr)
- return -ENOMEM;
-
- stb_data_arr->size = fsize;
- memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
- filp->private_data = stb_data_arr;
-
- return 0;
-}
-
-static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
-{
- struct amd_pmc_dev *dev = filp->f_inode->i_private;
- u32 fsize, num_samples, val, stb_rdptr_offset = 0;
- struct amd_pmc_stb_v2_data *stb_data_arr;
- int ret;
-
- /* Write dummy postcode while reading the STB buffer */
- ret = amd_pmc_write_stb(dev, AMD_PMC_STB_DUMMY_PC);
- if (ret)
- dev_err(dev->dev, "error writing to STB: %d\n", ret);
-
- /* Spill to DRAM num_samples uses separate SMU message port */
- dev->msg_port = 1;
-
- ret = amd_pmc_send_cmd(dev, 0, &val, STB_FORCE_FLUSH_DATA, 1);
- if (ret)
- dev_dbg_once(dev->dev, "S2D force flush not supported: %d\n", ret);
-
- /*
- * We have a custom stb size and the PMFW is supposed to give
- * the enhanced dram size. Note that we land here only for the
- * platforms that support enhanced dram size reporting.
- */
- if (dump_custom_stb)
- return amd_pmc_stb_handle_efr(filp);
-
- /* Get the num_samples to calculate the last push location */
- ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->s2d_msg_id, true);
- /* Clear msg_port for other SMU operation */
- dev->msg_port = 0;
- if (ret) {
- dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret);
- return ret;
- }
-
- fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX);
- stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
- if (!stb_data_arr)
- return -ENOMEM;
-
- stb_data_arr->size = fsize;
-
- /*
- * Start capturing data from the last push location.
- * This is for general cases, where the stb limits
- * are meant for standard usage.
- */
- if (num_samples > S2D_TELEMETRY_BYTES_MAX) {
- /* First read oldest data starting 1 behind last write till end of ringbuffer */
- stb_rdptr_offset = num_samples % S2D_TELEMETRY_BYTES_MAX;
- fsize = S2D_TELEMETRY_BYTES_MAX - stb_rdptr_offset;
-
- memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr + stb_rdptr_offset, fsize);
- /* Second copy the newer samples from offset 0 - last write */
- memcpy_fromio(stb_data_arr->data + fsize, dev->stb_virt_addr, stb_rdptr_offset);
- } else {
- memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
- }
-
- filp->private_data = stb_data_arr;
-
- return 0;
-}
-
-static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
- loff_t *pos)
-{
- struct amd_pmc_stb_v2_data *data = filp->private_data;
-
- return simple_read_from_buffer(buf, size, pos, data->data, data->size);
-}
-
-static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
-{
- kfree(filp->private_data);
- return 0;
-}
-
-static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = {
- .owner = THIS_MODULE,
- .open = amd_pmc_stb_debugfs_open_v2,
- .read = amd_pmc_stb_debugfs_read_v2,
- .release = amd_pmc_stb_debugfs_release_v2,
-};
-
static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -350,18 +190,23 @@ static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
dev->num_ips = 12;
- dev->s2d_msg_id = 0xBE;
+ dev->ips_ptr = soc15_ip_blk;
dev->smu_msg = 0x538;
break;
case AMD_CPU_ID_PS:
dev->num_ips = 21;
- dev->s2d_msg_id = 0x85;
+ dev->ips_ptr = soc15_ip_blk;
dev->smu_msg = 0x538;
break;
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
- dev->num_ips = 22;
- dev->s2d_msg_id = 0xDE;
+ if (boot_cpu_data.x86_model == 0x70) {
+ dev->num_ips = ARRAY_SIZE(soc15_ip_blk_v2);
+ dev->ips_ptr = soc15_ip_blk_v2;
+ } else {
+ dev->num_ips = ARRAY_SIZE(soc15_ip_blk);
+ dev->ips_ptr = soc15_ip_blk;
+ }
dev->smu_msg = 0x938;
break;
}
@@ -529,8 +374,8 @@ static int smu_fw_info_show(struct seq_file *s, void *unused)
seq_puts(s, "\n=== Active time (in us) ===\n");
for (idx = 0 ; idx < dev->num_ips ; idx++) {
- if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
- seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+ if (dev->ips_ptr[idx].bit_mask & dev->active_ips)
+ seq_printf(s, "%-8s : %lld\n", dev->ips_ptr[idx].name,
table.timecondition_notmet_lastcapture[idx]);
}
@@ -625,20 +470,6 @@ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
debugfs_remove_recursive(dev->dbgfs_dir);
}
-static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev)
-{
- switch (dev->cpu_id) {
- case AMD_CPU_ID_YC:
- case AMD_CPU_ID_CB:
- case AMD_CPU_ID_PS:
- case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
- case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
- return true;
- default:
- return false;
- }
-}
-
static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
{
dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
@@ -648,14 +479,17 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
&s0ix_stats_fops);
debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
&amd_pmc_idlemask_fops);
- /* Enable STB only when the module_param is set */
- if (enable_stb) {
- if (amd_pmc_is_stb_supported(dev))
- debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
- &amd_pmc_stb_debugfs_fops_v2);
- else
- debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
- &amd_pmc_stb_debugfs_fops);
+}
+
+static char *amd_pmc_get_msg_port(struct amd_pmc_dev *dev)
+{
+ switch (dev->msg_port) {
+ case MSG_PORT_PMC:
+ return "PMC";
+ case MSG_PORT_S2D:
+ return "S2D";
+ default:
+ return "Invalid message port";
}
}
@@ -663,10 +497,10 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
{
u32 value, message, argument, response;
- if (dev->msg_port) {
- message = AMD_S2D_REGISTER_MESSAGE;
- argument = AMD_S2D_REGISTER_ARGUMENT;
- response = AMD_S2D_REGISTER_RESPONSE;
+ if (dev->msg_port == MSG_PORT_S2D) {
+ message = dev->stb_arg.msg;
+ argument = dev->stb_arg.arg;
+ response = dev->stb_arg.resp;
} else {
message = dev->smu_msg;
argument = AMD_PMC_REGISTER_ARGUMENT;
@@ -674,26 +508,26 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
}
value = amd_pmc_reg_read(dev, response);
- dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", amd_pmc_get_msg_port(dev), value);
value = amd_pmc_reg_read(dev, argument);
- dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", amd_pmc_get_msg_port(dev), value);
value = amd_pmc_reg_read(dev, message);
- dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", amd_pmc_get_msg_port(dev), value);
}
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
+int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
{
int rc;
u32 val, message, argument, response;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (dev->msg_port) {
- message = AMD_S2D_REGISTER_MESSAGE;
- argument = AMD_S2D_REGISTER_ARGUMENT;
- response = AMD_S2D_REGISTER_RESPONSE;
+ if (dev->msg_port == MSG_PORT_S2D) {
+ message = dev->stb_arg.msg;
+ argument = dev->stb_arg.arg;
+ response = dev->stb_arg.resp;
} else {
message = dev->smu_msg;
argument = AMD_PMC_REGISTER_ARGUMENT;
@@ -706,7 +540,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
- goto out_unlock;
+ return rc;
}
/* Write zero to response register */
@@ -724,7 +558,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "SMU response timed out\n");
- goto out_unlock;
+ return rc;
}
switch (val) {
@@ -738,21 +572,19 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
case AMD_PMC_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
rc = -EBUSY;
- goto out_unlock;
+ break;
case AMD_PMC_RESULT_CMD_UNKNOWN:
dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
rc = -EINVAL;
- goto out_unlock;
+ break;
case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
case AMD_PMC_RESULT_FAILED:
default:
dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
rc = -EIO;
- goto out_unlock;
+ break;
}
-out_unlock:
- mutex_unlock(&dev->lock);
amd_pmc_dump_registers(dev);
return rc;
}
@@ -881,7 +713,7 @@ static void amd_pmc_s2idle_prepare(void)
return;
}
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
+ rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
@@ -900,7 +732,7 @@ static void amd_pmc_s2idle_check(void)
/* Dump the IdleMask before we add to the STB */
amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK);
+ rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_CHECK);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
@@ -927,7 +759,7 @@ static void amd_pmc_s2idle_restore(void)
/* Let SMU know that we are looking for stats */
amd_pmc_dump_data(pdev);
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
+ rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
@@ -981,74 +813,6 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ }
};
-static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
-{
- u32 phys_addr_low, phys_addr_hi;
- u64 stb_phys_addr;
- u32 size = 0;
- int ret;
-
- /* Spill to DRAM feature uses separate SMU message port */
- dev->msg_port = 1;
-
- amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->s2d_msg_id, true);
- if (size != S2D_TELEMETRY_BYTES_MAX)
- return -EIO;
-
- /* Get DRAM size */
- ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
- if (ret || !dev->dram_size)
- dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
-
- /* Get STB DRAM address */
- amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true);
- amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true);
-
- if (!phys_addr_hi && !phys_addr_low) {
- dev_err(dev->dev, "STB is not enabled on the system; disable enable_stb or contact system vendor\n");
- return -EINVAL;
- }
-
- stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
-
- /* Clear msg_port for other SMU operation */
- dev->msg_port = 0;
-
- dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size);
- if (!dev->stb_virt_addr)
- return -ENOMEM;
-
- return 0;
-}
-
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
-{
- int err;
-
- err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
- if (err) {
- dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
- return pcibios_err_to_errno(err);
- }
-
- return 0;
-}
-
-static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
-{
- int i, err;
-
- for (i = 0; i < FIFO_SIZE; i++) {
- err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
- if (err) {
- dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
- return pcibios_err_to_errno(err);
- }
- }
-
- return 0;
-}
-
static int amd_pmc_probe(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = &pmc;
@@ -1106,12 +870,6 @@ static int amd_pmc_probe(struct platform_device *pdev)
/* Get num of IP blocks within the SoC */
amd_pmc_get_ip_info(dev);
- if (enable_stb && amd_pmc_is_stb_supported(dev)) {
- err = amd_pmc_s2d_init(dev);
- if (err)
- goto err_pci_dev_put;
- }
-
platform_set_drvdata(pdev, dev);
if (IS_ENABLED(CONFIG_SUSPEND)) {
err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
@@ -1122,6 +880,10 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
amd_pmc_dbgfs_register(dev);
+ err = amd_stb_s2d_init(dev);
+ if (err)
+ goto err_pci_dev_put;
+
if (IS_ENABLED(CONFIG_AMD_MP2_STB))
amd_mp2_stb_init(dev);
pm_report_max_hw_sleep(U64_MAX);
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index f1166d15c856..f43f0253b0f5 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -14,6 +14,11 @@
#include <linux/types.h>
#include <linux/mutex.h>
+enum s2d_msg_port {
+ MSG_PORT_PMC,
+ MSG_PORT_S2D,
+};
+
struct amd_mp2_dev {
void __iomem *mmio;
void __iomem *vslbase;
@@ -25,24 +30,31 @@ struct amd_mp2_dev {
bool is_stb_data;
};
+struct stb_arg {
+ u32 s2d_msg_id;
+ u32 msg;
+ u32 arg;
+ u32 resp;
+};
+
struct amd_pmc_dev {
void __iomem *regbase;
void __iomem *smu_virt_addr;
void __iomem *stb_virt_addr;
void __iomem *fch_virt_addr;
- bool msg_port;
u32 base_addr;
u32 cpu_id;
- u32 active_ips;
u32 dram_size;
+ u32 active_ips;
+ const struct amd_pmc_bit_map *ips_ptr;
u32 num_ips;
- u32 s2d_msg_id;
u32 smu_msg;
/* SMU version information */
u8 smu_program;
u8 major;
u8 minor;
u8 rev;
+ u8 msg_port;
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
@@ -50,6 +62,7 @@ struct amd_pmc_dev {
struct quirk_entry *quirks;
bool disable_8042_wakeup;
struct amd_mp2_dev *mp2;
+ struct stb_arg stb_arg;
};
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
@@ -70,4 +83,9 @@ void amd_mp2_stb_deinit(struct amd_pmc_dev *dev);
#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
#define PCI_DEVICE_ID_AMD_MP2_STB 0x172c
+int amd_stb_s2d_init(struct amd_pmc_dev *dev);
+int amd_stb_read(struct amd_pmc_dev *dev, u32 *buf);
+int amd_stb_write(struct amd_pmc_dev *dev, u32 data);
+int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
+
#endif /* PMC_H */
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
index 99d67cdbd91e..25b8f7ae3abd 100644
--- a/drivers/platform/x86/amd/pmf/Kconfig
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -7,7 +7,7 @@ config AMD_PMF
tristate "AMD Platform Management Framework"
depends on ACPI && PCI
depends on POWER_SUPPLY
- depends on AMD_NB
+ depends on AMD_NODE
select ACPI_PLATFORM_PROFILE
depends on TEE && AMDTEE
depends on AMD_SFH_HID
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 7d6079b02589..6b26e48ce8ad 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
amd-pmf-objs := core.o acpi.o sps.o \
auto-mode.o cnqf.o \
- tee-if.o spc.o pmf-quirks.o
+ tee-if.o spc.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index 1b9c7acf0ddf..dd5780a1d06e 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -321,17 +321,29 @@ int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req
req, sizeof(*req));
}
+static void apmf_event_handler_v2(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ int ret;
+
+ guard(mutex)(&pmf_dev->cb_mutex);
+
+ ret = apmf_get_sbios_requests_v2(pmf_dev, &pmf_dev->req);
+ if (ret)
+ dev_err(pmf_dev->dev, "Failed to get v2 SBIOS requests: %d\n", ret);
+}
+
static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
{
struct amd_pmf_dev *pmf_dev = data;
struct apmf_sbios_req req;
int ret;
- mutex_lock(&pmf_dev->update_mutex);
+ guard(mutex)(&pmf_dev->update_mutex);
ret = apmf_get_sbios_requests(pmf_dev, &req);
if (ret) {
dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret);
- goto out;
+ return;
}
if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) {
@@ -353,8 +365,6 @@ static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
if (pmf_dev->amt_enabled)
amd_pmf_update_2_cql(pmf_dev, req.cql_event);
}
-out:
- mutex_unlock(&pmf_dev->update_mutex);
}
static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
@@ -430,6 +440,15 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
apmf_event_handler(ahandle, 0, pmf_dev);
}
+ if (pmf_dev->smart_pc_enabled && pmf_dev->pmf_if_version == PMF_IF_V2) {
+ status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handler_v2, pmf_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pmf_dev->dev, "failed to install notify handler for custom BIOS inputs\n");
+ return -ENODEV;
+ }
+ }
+
return 0;
}
@@ -480,6 +499,9 @@ void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
+
+ if (pmf_dev->smart_pc_enabled && pmf_dev->pmf_if_version == PMF_IF_V2)
+ acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler_v2);
}
int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 06a97c533cb8..764cc1fe90ae 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -8,13 +8,13 @@
* Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
*/
-#include <asm/amd_nb.h>
#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <asm/amd_node.h>
#include "pmf.h"
/* PMF-SMU communication registers */
@@ -127,7 +127,8 @@ static void amd_pmf_get_metrics(struct work_struct *work)
ktime_t time_elapsed_ms;
int socket_power;
- mutex_lock(&dev->update_mutex);
+ guard(mutex)(&dev->update_mutex);
+
/* Transfer table contents */
memset(dev->buf, 0, sizeof(dev->m_table));
amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
@@ -149,7 +150,6 @@ static void amd_pmf_get_metrics(struct work_struct *work)
dev->start_time = ktime_to_ms(ktime_get());
schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
- mutex_unlock(&dev->update_mutex);
}
static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
@@ -181,7 +181,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
int rc;
u32 val;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
@@ -189,7 +189,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
- goto out_unlock;
+ return rc;
}
/* Write zero to response register */
@@ -207,7 +207,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "SMU response timed out\n");
- goto out_unlock;
+ return rc;
}
switch (val) {
@@ -221,21 +221,19 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
case AMD_PMF_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
rc = -EBUSY;
- goto out_unlock;
+ break;
case AMD_PMF_RESULT_CMD_UNKNOWN:
dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
rc = -EINVAL;
- goto out_unlock;
+ break;
case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
case AMD_PMF_RESULT_FAILED:
default:
dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
rc = -EIO;
- goto out_unlock;
+ break;
}
-out_unlock:
- mutex_unlock(&dev->lock);
amd_pmf_dump_registers(dev);
return rc;
}
@@ -373,7 +371,6 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
power_supply_unreg_notifier(&dev->pwr_src_notifier);
- amd_pmf_deinit_sps(dev);
}
if (dev->smart_pc_enabled) {
@@ -456,7 +453,6 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
- amd_pmf_quirks_init(dev);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_dbgfs_register(dev);
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
deleted file mode 100644
index 7cde5733b9ca..000000000000
--- a/drivers/platform/x86/amd/pmf/pmf-quirks.c
+++ /dev/null
@@ -1,66 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * AMD Platform Management Framework Driver Quirks
- *
- * Copyright (c) 2024, Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Author: Mario Limonciello <mario.limonciello@amd.com>
- */
-
-#include <linux/dmi.h>
-
-#include "pmf.h"
-
-struct quirk_entry {
- u32 supported_func;
-};
-
-static struct quirk_entry quirk_no_sps_bug = {
- .supported_func = 0x4003,
-};
-
-static const struct dmi_system_id fwbug_list[] = {
- {
- .ident = "ROG Zephyrus G14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA403U"),
- },
- .driver_data = &quirk_no_sps_bug,
- },
- {
- .ident = "ROG Ally X",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "RC72LA"),
- },
- .driver_data = &quirk_no_sps_bug,
- },
- {
- .ident = "ASUS TUF Gaming A14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "FA401W"),
- },
- .driver_data = &quirk_no_sps_bug,
- },
- {}
-};
-
-void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
-{
- const struct dmi_system_id *dmi_id;
- struct quirk_entry *quirks;
-
- dmi_id = dmi_first_match(fwbug_list);
- if (!dmi_id)
- return;
-
- quirks = dmi_id->driver_data;
- if (quirks->supported_func) {
- dev->supported_func = quirks->supported_func;
- pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
- dmi_id->ident);
- }
-}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index a79808fda1d8..41b2b91b8fdc 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -338,7 +338,7 @@ struct amd_pmf_dev {
struct mutex lock; /* protects the PMF interface */
u32 supported_func;
enum platform_profile_option current_profile;
- struct platform_profile_handler pprof;
+ struct device *ppdev; /* platform profile class device */
struct dentry *dbgfs_dir;
int hb_interval; /* SBIOS heartbeat interval */
struct delayed_work heart_beat;
@@ -370,6 +370,8 @@ struct amd_pmf_dev {
struct input_dev *pmf_idev;
size_t mtable_size;
struct resource *res;
+ struct apmf_sbios_req_v2 req; /* To get custom bios pending request */
+ struct mutex cb_mutex;
};
struct apmf_sps_prop_granular_v2 {
@@ -616,6 +618,30 @@ enum ta_slider {
TA_MAX,
};
+enum apmf_smartpc_custom_bios_inputs {
+ APMF_SMARTPC_CUSTOM_BIOS_INPUT1,
+ APMF_SMARTPC_CUSTOM_BIOS_INPUT2,
+};
+
+enum apmf_preq_smartpc {
+ NOTIFY_CUSTOM_BIOS_INPUT1 = 5,
+ NOTIFY_CUSTOM_BIOS_INPUT2,
+};
+
+enum platform_type {
+ PTYPE_UNKNOWN = 0,
+ LID_CLOSE,
+ CLAMSHELL,
+ FLAT,
+ TENT,
+ STAND,
+ TABLET,
+ BOOK,
+ PRESENTATION,
+ PULL_FWD,
+ PTYPE_INVALID = 0xf,
+};
+
/* Command ids for TA communication */
enum ta_pmf_command {
TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE,
@@ -657,7 +683,8 @@ struct ta_pmf_condition_info {
u32 power_slider;
u32 lid_state;
bool user_present;
- u32 rsvd1[2];
+ u32 bios_input1;
+ u32 bios_input2;
u32 monitor_count;
u32 rsvd2[2];
u32 bat_design;
@@ -667,7 +694,9 @@ struct ta_pmf_condition_info {
u32 device_state;
u32 socket_power;
u32 skin_temperature;
- u32 rsvd3[5];
+ u32 rsvd3[2];
+ u32 platform_type;
+ u32 rsvd3_1[2];
u32 ambient_light;
u32 length;
u32 avg_c0residency;
@@ -751,7 +780,6 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
struct amd_pmf_static_slider_granular *table);
int amd_pmf_init_sps(struct amd_pmf_dev *dev);
-void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *output);
bool is_pprof_balanced(struct amd_pmf_dev *pmf);
@@ -797,7 +825,4 @@ int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
-/* Quirk infrastructure */
-void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
-
#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index 06226eb0eab3..f34f3130c330 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -16,6 +16,46 @@
#include "pmf.h"
#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *platform_type_as_str(u16 platform_type)
+{
+ switch (platform_type) {
+ case CLAMSHELL:
+ return "CLAMSHELL";
+ case FLAT:
+ return "FLAT";
+ case TENT:
+ return "TENT";
+ case STAND:
+ return "STAND";
+ case TABLET:
+ return "TABLET";
+ case BOOK:
+ return "BOOK";
+ case PRESENTATION:
+ return "PRESENTATION";
+ case PULL_FWD:
+ return "PULL_FWD";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static const char *laptop_placement_as_str(u16 device_state)
+{
+ switch (device_state) {
+ case ON_TABLE:
+ return "ON_TABLE";
+ case ON_LAP_MOTION:
+ return "ON_LAP_MOTION";
+ case IN_BAG:
+ return "IN_BAG";
+ case OUT_OF_BAG:
+ return "OUT_OF_BAG";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static const char *ta_slider_as_str(unsigned int state)
{
switch (state) {
@@ -47,12 +87,38 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
+ dev_dbg(dev->dev, "Platform type: %s\n", platform_type_as_str(in->ev_info.platform_type));
+ dev_dbg(dev->dev, "Laptop placement: %s\n",
+ laptop_placement_as_str(in->ev_info.device_state));
+ dev_dbg(dev->dev, "Custom BIOS input1: %u\n", in->ev_info.bios_input1);
+ dev_dbg(dev->dev, "Custom BIOS input2: %u\n", in->ev_info.bios_input2);
dev_dbg(dev->dev, "==== TA inputs END ====\n");
}
#else
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) {}
#endif
+static void amd_pmf_get_custom_bios_inputs(struct amd_pmf_dev *pdev,
+ struct ta_pmf_enact_table *in)
+{
+ if (!pdev->req.pending_req)
+ return;
+
+ switch (pdev->req.pending_req) {
+ case BIT(NOTIFY_CUSTOM_BIOS_INPUT1):
+ in->ev_info.bios_input1 = pdev->req.custom_policy[APMF_SMARTPC_CUSTOM_BIOS_INPUT1];
+ break;
+ case BIT(NOTIFY_CUSTOM_BIOS_INPUT2):
+ in->ev_info.bios_input2 = pdev->req.custom_policy[APMF_SMARTPC_CUSTOM_BIOS_INPUT2];
+ break;
+ default:
+ dev_dbg(pdev->dev, "Invalid preq for BIOS input: 0x%x\n", pdev->req.pending_req);
+ }
+
+ /* Clear pending requests after handling */
+ memset(&pdev->req, 0, sizeof(pdev->req));
+}
+
static void amd_pmf_get_c0_residency(u16 *core_res, size_t size, struct ta_pmf_enact_table *in)
{
u16 max, avg = 0;
@@ -190,6 +256,14 @@ static void amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact
} else {
dev_dbg(dev->dev, "HPD is not enabled/detected\n");
}
+
+ /* Get SRA (Secondary Accelerometer) data */
+ if (!amd_get_sfh_info(&sfh_info, MT_SRA)) {
+ in->ev_info.platform_type = sfh_info.platform_type;
+ in->ev_info.device_state = sfh_info.laptop_placement;
+ } else {
+ dev_dbg(dev->dev, "SRA is not enabled/detected\n");
+ }
}
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
@@ -201,4 +275,5 @@ void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_tab
amd_pmf_get_battery_info(dev, in);
amd_pmf_get_slider_info(dev, in);
amd_pmf_get_sensor_info(dev, in);
+ amd_pmf_get_custom_bios_inputs(dev, in);
}
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index 92f7fb22277d..e6cf0b22dac3 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -282,10 +282,10 @@ bool is_pprof_balanced(struct amd_pmf_dev *pmf)
return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
}
-static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
+static int amd_pmf_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ struct amd_pmf_dev *pmf = dev_get_drvdata(dev);
*profile = pmf->current_profile;
return 0;
@@ -363,10 +363,10 @@ int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
return 0;
}
-static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+static int amd_pmf_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ struct amd_pmf_dev *pmf = dev_get_drvdata(dev);
int ret = 0;
pmf->current_profile = profile;
@@ -387,10 +387,23 @@ static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
return 0;
}
-int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
{
- int err;
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops amd_pmf_profile_ops = {
+ .probe = amd_pmf_profile_probe,
+ .profile_get = amd_pmf_profile_get,
+ .profile_set = amd_pmf_profile_set,
+};
+int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+{
dev->current_profile = PLATFORM_PROFILE_BALANCED;
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
@@ -405,24 +418,12 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
amd_pmf_set_sps_power_limits(dev);
}
- dev->pprof.profile_get = amd_pmf_profile_get;
- dev->pprof.profile_set = amd_pmf_profile_set;
-
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
-
/* Create platform_profile structure and register */
- err = platform_profile_register(&dev->pprof);
- if (err)
- dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
- err);
+ dev->ppdev = devm_platform_profile_register(dev->dev, "amd-pmf", dev,
+ &amd_pmf_profile_ops);
+ if (IS_ERR(dev->ppdev))
+ dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %ld\n",
+ PTR_ERR(dev->ppdev));
- return err;
-}
-
-void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
-{
- platform_profile_remove();
+ return PTR_ERR_OR_ZERO(dev->ppdev);
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index a5933980ade3..3f8b2a324efd 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -50,7 +50,8 @@ MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-do
static struct quirk_entry *quirks;
static bool atkbd_reports_vol_keys;
-static bool asus_i8042_filter(unsigned char data, unsigned char str, struct serio *port)
+static bool asus_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
{
static bool extended_e0;
static bool extended_e1;
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 8bd187e8b47f..38ef778e8c19 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -313,7 +313,7 @@ struct asus_wmi {
bool mid_fan_curve_available;
struct fan_curve_data custom_fan_curves[3];
- struct platform_profile_handler platform_profile_handler;
+ struct device *ppdev;
bool platform_profile_support;
// The RSOC controls the maximum charging percentage.
@@ -3782,7 +3782,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
* Ensure that platform_profile updates userspace with the change to ensure
* that platform_profile and throttle_thermal_policy_mode are in sync.
*/
- platform_profile_notify();
+ platform_profile_notify(asus->ppdev);
return count;
}
@@ -3793,13 +3793,13 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
static DEVICE_ATTR_RW(throttle_thermal_policy);
/* Platform profile ***********************************************************/
-static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+static int asus_wmi_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
struct asus_wmi *asus;
int tp;
- asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+ asus = dev_get_drvdata(dev);
tp = asus->throttle_thermal_policy_mode;
switch (tp) {
@@ -3819,13 +3819,13 @@ static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+static int asus_wmi_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
struct asus_wmi *asus;
int tp;
- asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+ asus = dev_get_drvdata(dev);
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
@@ -3845,6 +3845,21 @@ static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
return throttle_thermal_policy_write(asus);
}
+static int asus_wmi_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops asus_wmi_platform_profile_ops = {
+ .probe = asus_wmi_platform_profile_probe,
+ .profile_get = asus_wmi_platform_profile_get,
+ .profile_set = asus_wmi_platform_profile_set,
+};
+
static int platform_profile_setup(struct asus_wmi *asus)
{
struct device *dev = &asus->platform_device->dev;
@@ -3869,22 +3884,11 @@ static int platform_profile_setup(struct asus_wmi *asus)
dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n");
- asus->platform_profile_handler.profile_get = asus_wmi_platform_profile_get;
- asus->platform_profile_handler.profile_set = asus_wmi_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_QUIET, asus->platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED,
- asus->platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE,
- asus->platform_profile_handler.choices);
-
- err = platform_profile_register(&asus->platform_profile_handler);
- if (err == -EEXIST) {
- pr_warn("%s, a platform_profile handler is already registered\n", __func__);
- return 0;
- } else if (err) {
- pr_err("%s, failed at platform_profile_register: %d\n", __func__, err);
- return err;
+ asus->ppdev = devm_platform_profile_register(dev, "asus-wmi", asus,
+ &asus_wmi_platform_profile_ops);
+ if (IS_ERR(asus->ppdev)) {
+ dev_err(dev, "Failed to register a platform_profile class device\n");
+ return PTR_ERR(asus->ppdev);
}
asus->platform_profile_support = true;
@@ -4815,7 +4819,7 @@ static int asus_wmi_add(struct platform_device *pdev)
}
if (asus->driver->i8042_filter) {
- err = i8042_install_filter(asus->driver->i8042_filter);
+ err = i8042_install_filter(asus->driver->i8042_filter, NULL);
if (err)
pr_warn("Unable to install key filter - %d\n", err);
}
@@ -4842,8 +4846,6 @@ fail_input:
fail_sysfs:
fail_custom_fan_curve:
fail_platform_profile_setup:
- if (asus->platform_profile_support)
- platform_profile_remove();
fail_fan_boost_mode:
fail_platform:
kfree(asus);
@@ -4869,9 +4871,6 @@ static void asus_wmi_remove(struct platform_device *device)
throttle_thermal_policy_set_default(asus);
asus_wmi_battery_exit(asus);
- if (asus->platform_profile_support)
- platform_profile_remove();
-
kfree(asus);
}
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index d02f15fd3482..018dfde4025e 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -73,8 +73,7 @@ struct asus_wmi_driver {
void (*key_filter) (struct asus_wmi_driver *driver, int *code,
unsigned int *value, bool *autorelease);
/* Optional standard i8042 filter */
- bool (*i8042_filter)(unsigned char data, unsigned char str,
- struct serio *serio);
+ i8042_filter_t i8042_filter;
int (*probe) (struct platform_device *device);
void (*detect_quirks) (struct asus_wmi_driver *driver);
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 2dddafb3f7fa..d09060aedd3f 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -152,6 +152,7 @@ config DELL_SMBIOS_SMM
config DELL_SMO8800
tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
default m
+ depends on I2C
depends on ACPI || COMPILE_TEST
help
Say Y here if you want to support SMO88XX freefall devices
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
index 79d60f1bf4c1..bb3cbd470a46 100644
--- a/drivers/platform/x86/dell/Makefile
+++ b/drivers/platform/x86/dell/Makefile
@@ -15,6 +15,7 @@ dell-smbios-objs := dell-smbios-base.o
dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
+obj-$(CONFIG_DELL_SMO8800) += dell-lis3lv02d.o
obj-$(CONFIG_DELL_UART_BACKLIGHT) += dell-uart-backlight.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
dell-wmi-objs := dell-wmi-base.o
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index 341d01d3e3e4..e252e0cf47ef 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -385,12 +385,6 @@ struct color_platform {
u8 red;
} __packed;
-struct platform_zone {
- u8 location;
- struct device_attribute *attr;
- struct color_platform colors;
-};
-
struct wmax_brightness_args {
u32 led_mask;
u32 percentage;
@@ -420,22 +414,9 @@ struct wmax_u32_args {
};
static struct platform_device *platform_device;
-static struct device_attribute *zone_dev_attrs;
-static struct attribute **zone_attrs;
-static struct platform_zone *zone_data;
-static struct platform_profile_handler pp_handler;
+static struct color_platform colors[4];
static enum wmax_thermal_mode supported_thermal_profiles[PLATFORM_PROFILE_LAST];
-static struct platform_driver platform_driver = {
- .driver = {
- .name = "alienware-wmi",
- }
-};
-
-static struct attribute_group zone_attribute_group = {
- .name = "rgb_zones",
-};
-
static u8 interface;
static u8 lighting_control_state;
static u8 global_brightness;
@@ -443,7 +424,7 @@ static u8 global_brightness;
/*
* Helpers used for zone control
*/
-static int parse_rgb(const char *buf, struct platform_zone *zone)
+static int parse_rgb(const char *buf, struct color_platform *colors)
{
long unsigned int rgb;
int ret;
@@ -463,28 +444,14 @@ static int parse_rgb(const char *buf, struct platform_zone *zone)
repackager.package = rgb & 0x0f0f0f0f;
pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
repackager.cp.red, repackager.cp.green, repackager.cp.blue);
- zone->colors = repackager.cp;
+ *colors = repackager.cp;
return 0;
}
-static struct platform_zone *match_zone(struct device_attribute *attr)
-{
- u8 zone;
-
- for (zone = 0; zone < quirks->num_zones; zone++) {
- if ((struct device_attribute *)zone_data[zone].attr == attr) {
- pr_debug("alienware-wmi: matched zone location: %d\n",
- zone_data[zone].location);
- return &zone_data[zone];
- }
- }
- return NULL;
-}
-
/*
* Individual RGB zone control
*/
-static int alienware_update_led(struct platform_zone *zone)
+static int alienware_update_led(u8 location)
{
int method_id;
acpi_status status;
@@ -493,8 +460,8 @@ static int alienware_update_led(struct platform_zone *zone)
struct legacy_led_args legacy_args;
struct wmax_led_args wmax_basic_args;
if (interface == WMAX) {
- wmax_basic_args.led_mask = 1 << zone->location;
- wmax_basic_args.colors = zone->colors;
+ wmax_basic_args.led_mask = 1 << location;
+ wmax_basic_args.colors = colors[location];
wmax_basic_args.state = lighting_control_state;
guid = WMAX_CONTROL_GUID;
method_id = WMAX_METHOD_ZONE_CONTROL;
@@ -502,7 +469,7 @@ static int alienware_update_led(struct platform_zone *zone)
input.length = sizeof(wmax_basic_args);
input.pointer = &wmax_basic_args;
} else {
- legacy_args.colors = zone->colors;
+ legacy_args.colors = colors[location];
legacy_args.brightness = global_brightness;
legacy_args.state = 0;
if (lighting_control_state == LEGACY_BOOTING ||
@@ -511,7 +478,7 @@ static int alienware_update_led(struct platform_zone *zone)
legacy_args.state = lighting_control_state;
} else
guid = LEGACY_CONTROL_GUID;
- method_id = zone->location + 1;
+ method_id = location + 1;
input.length = sizeof(legacy_args);
input.pointer = &legacy_args;
@@ -525,35 +492,153 @@ static int alienware_update_led(struct platform_zone *zone)
}
static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf, u8 location)
{
- struct platform_zone *target_zone;
- target_zone = match_zone(attr);
- if (target_zone == NULL)
- return sprintf(buf, "red: -1, green: -1, blue: -1\n");
return sprintf(buf, "red: %d, green: %d, blue: %d\n",
- target_zone->colors.red,
- target_zone->colors.green, target_zone->colors.blue);
+ colors[location].red, colors[location].green,
+ colors[location].blue);
}
-static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t zone_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count, u8 location)
{
- struct platform_zone *target_zone;
int ret;
- target_zone = match_zone(attr);
- if (target_zone == NULL) {
- pr_err("alienware-wmi: invalid target zone\n");
- return 1;
- }
- ret = parse_rgb(buf, target_zone);
+
+ ret = parse_rgb(buf, &colors[location]);
if (ret)
return ret;
- ret = alienware_update_led(target_zone);
+
+ ret = alienware_update_led(location);
+
return ret ? ret : count;
}
+static ssize_t zone00_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 0);
+}
+
+static ssize_t zone00_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 0);
+}
+
+static DEVICE_ATTR_RW(zone00);
+
+static ssize_t zone01_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 1);
+}
+
+static ssize_t zone01_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 1);
+}
+
+static DEVICE_ATTR_RW(zone01);
+
+static ssize_t zone02_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 2);
+}
+
+static ssize_t zone02_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 2);
+}
+
+static DEVICE_ATTR_RW(zone02);
+
+static ssize_t zone03_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 3);
+}
+
+static ssize_t zone03_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 3);
+}
+
+static DEVICE_ATTR_RW(zone03);
+
+/*
+ * Lighting control state device attribute (Global)
+ */
+static ssize_t lighting_control_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (lighting_control_state == LEGACY_BOOTING)
+ return sysfs_emit(buf, "[booting] running suspend\n");
+ else if (lighting_control_state == LEGACY_SUSPEND)
+ return sysfs_emit(buf, "booting running [suspend]\n");
+
+ return sysfs_emit(buf, "booting [running] suspend\n");
+}
+
+static ssize_t lighting_control_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u8 val;
+
+ if (strcmp(buf, "booting\n") == 0)
+ val = LEGACY_BOOTING;
+ else if (strcmp(buf, "suspend\n") == 0)
+ val = LEGACY_SUSPEND;
+ else if (interface == LEGACY)
+ val = LEGACY_RUNNING;
+ else
+ val = WMAX_RUNNING;
+
+ lighting_control_state = val;
+ pr_debug("alienware-wmi: updated control state to %d\n",
+ lighting_control_state);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(lighting_control_state);
+
+static umode_t zone_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (n < quirks->num_zones + 1)
+ return attr->mode;
+
+ return 0;
+}
+
+static bool zone_group_visible(struct kobject *kobj)
+{
+ return quirks->num_zones > 0;
+}
+DEFINE_SYSFS_GROUP_VISIBLE(zone);
+
+static struct attribute *zone_attrs[] = {
+ &dev_attr_lighting_control_state.attr,
+ &dev_attr_zone00.attr,
+ &dev_attr_zone01.attr,
+ &dev_attr_zone02.attr,
+ &dev_attr_zone03.attr,
+ NULL
+};
+
+static struct attribute_group zone_attribute_group = {
+ .name = "rgb_zones",
+ .is_visible = SYSFS_GROUP_VISIBLE(zone),
+ .attrs = zone_attrs,
+};
+
/*
* LED Brightness (Global)
*/
@@ -582,7 +667,7 @@ static void global_led_set(struct led_classdev *led_cdev,
if (interface == WMAX)
ret = wmax_brightness(brightness);
else
- ret = alienware_update_led(&zone_data[0]);
+ ret = alienware_update_led(0);
if (ret)
pr_err("LED brightness update failed\n");
}
@@ -598,46 +683,8 @@ static struct led_classdev global_led = {
.name = "alienware::global_brightness",
};
-/*
- * Lighting control state device attribute (Global)
- */
-static ssize_t show_control_state(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- if (lighting_control_state == LEGACY_BOOTING)
- return sysfs_emit(buf, "[booting] running suspend\n");
- else if (lighting_control_state == LEGACY_SUSPEND)
- return sysfs_emit(buf, "booting running [suspend]\n");
- return sysfs_emit(buf, "booting [running] suspend\n");
-}
-
-static ssize_t store_control_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- long unsigned int val;
- if (strcmp(buf, "booting\n") == 0)
- val = LEGACY_BOOTING;
- else if (strcmp(buf, "suspend\n") == 0)
- val = LEGACY_SUSPEND;
- else if (interface == LEGACY)
- val = LEGACY_RUNNING;
- else
- val = WMAX_RUNNING;
- lighting_control_state = val;
- pr_debug("alienware-wmi: updated control state to %d\n",
- lighting_control_state);
- return count;
-}
-
-static DEVICE_ATTR(lighting_control_state, 0644, show_control_state,
- store_control_state);
-
static int alienware_zone_init(struct platform_device *dev)
{
- u8 zone;
- char *name;
-
if (interface == WMAX) {
lighting_control_state = WMAX_RUNNING;
} else if (interface == LEGACY) {
@@ -646,68 +693,15 @@ static int alienware_zone_init(struct platform_device *dev)
global_led.max_brightness = 0x0F;
global_brightness = global_led.max_brightness;
- /*
- * - zone_dev_attrs num_zones + 1 is for individual zones and then
- * null terminated
- * - zone_attrs num_zones + 2 is for all attrs in zone_dev_attrs +
- * the lighting control + null terminated
- * - zone_data num_zones is for the distinct zones
- */
- zone_dev_attrs =
- kcalloc(quirks->num_zones + 1, sizeof(struct device_attribute),
- GFP_KERNEL);
- if (!zone_dev_attrs)
- return -ENOMEM;
-
- zone_attrs =
- kcalloc(quirks->num_zones + 2, sizeof(struct attribute *),
- GFP_KERNEL);
- if (!zone_attrs)
- return -ENOMEM;
-
- zone_data =
- kcalloc(quirks->num_zones, sizeof(struct platform_zone),
- GFP_KERNEL);
- if (!zone_data)
- return -ENOMEM;
-
- for (zone = 0; zone < quirks->num_zones; zone++) {
- name = kasprintf(GFP_KERNEL, "zone%02hhX", zone);
- if (name == NULL)
- return 1;
- sysfs_attr_init(&zone_dev_attrs[zone].attr);
- zone_dev_attrs[zone].attr.name = name;
- zone_dev_attrs[zone].attr.mode = 0644;
- zone_dev_attrs[zone].show = zone_show;
- zone_dev_attrs[zone].store = zone_set;
- zone_data[zone].location = zone;
- zone_attrs[zone] = &zone_dev_attrs[zone].attr;
- zone_data[zone].attr = &zone_dev_attrs[zone];
- }
- zone_attrs[quirks->num_zones] = &dev_attr_lighting_control_state.attr;
- zone_attribute_group.attrs = zone_attrs;
-
- led_classdev_register(&dev->dev, &global_led);
-
- return sysfs_create_group(&dev->dev.kobj, &zone_attribute_group);
+ return led_classdev_register(&dev->dev, &global_led);
}
static void alienware_zone_exit(struct platform_device *dev)
{
- u8 zone;
-
if (!quirks->num_zones)
return;
- sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group);
led_classdev_unregister(&global_led);
- if (zone_dev_attrs) {
- for (zone = 0; zone < quirks->num_zones; zone++)
- kfree(zone_dev_attrs[zone].attr.name);
- }
- kfree(zone_dev_attrs);
- kfree(zone_data);
- kfree(zone_attrs);
}
static acpi_status alienware_wmax_command(void *in_args, size_t in_size,
@@ -742,14 +736,15 @@ static acpi_status alienware_wmax_command(void *in_args, size_t in_size,
* The HDMI mux sysfs node indicates the status of the HDMI input mux.
* It can toggle between standard system GPU output and HDMI input.
*/
-static ssize_t show_hdmi_cable(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status =
alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_HDMI_CABLE, &out_data);
@@ -763,14 +758,15 @@ static ssize_t show_hdmi_cable(struct device *dev,
return sysfs_emit(buf, "unconnected connected [unknown]\n");
}
-static ssize_t show_hdmi_source(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t source_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status =
alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_HDMI_STATUS, &out_data);
@@ -785,12 +781,12 @@ static ssize_t show_hdmi_source(struct device *dev,
return sysfs_emit(buf, "input gpu [unknown]\n");
}
-static ssize_t toggle_hdmi_source(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t source_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- acpi_status status;
struct wmax_basic_args args;
+ acpi_status status;
+
if (strcmp(buf, "gpu\n") == 0)
args.arg = 1;
else if (strcmp(buf, "input\n") == 0)
@@ -808,9 +804,14 @@ static ssize_t toggle_hdmi_source(struct device *dev,
return count;
}
-static DEVICE_ATTR(cable, S_IRUGO, show_hdmi_cable, NULL);
-static DEVICE_ATTR(source, S_IRUGO | S_IWUSR, show_hdmi_source,
- toggle_hdmi_source);
+static DEVICE_ATTR_RO(cable);
+static DEVICE_ATTR_RW(source);
+
+static bool hdmi_group_visible(struct kobject *kobj)
+{
+ return quirks->hdmi_mux;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(hdmi);
static struct attribute *hdmi_attrs[] = {
&dev_attr_cable.attr,
@@ -820,38 +821,24 @@ static struct attribute *hdmi_attrs[] = {
static const struct attribute_group hdmi_attribute_group = {
.name = "hdmi",
+ .is_visible = SYSFS_GROUP_VISIBLE(hdmi),
.attrs = hdmi_attrs,
};
-static void remove_hdmi(struct platform_device *dev)
-{
- if (quirks->hdmi_mux > 0)
- sysfs_remove_group(&dev->dev.kobj, &hdmi_attribute_group);
-}
-
-static int create_hdmi(struct platform_device *dev)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->dev.kobj, &hdmi_attribute_group);
- if (ret)
- remove_hdmi(dev);
- return ret;
-}
-
/*
* Alienware GFX amplifier support
* - Currently supports reading cable status
* - Leaving expansion room to possibly support dock/undock events later
*/
-static ssize_t show_amplifier_status(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status =
alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_AMPLIFIER_CABLE, &out_data);
@@ -865,7 +852,13 @@ static ssize_t show_amplifier_status(struct device *dev,
return sysfs_emit(buf, "unconnected connected [unknown]\n");
}
-static DEVICE_ATTR(status, S_IRUGO, show_amplifier_status, NULL);
+static DEVICE_ATTR_RO(status);
+
+static bool amplifier_group_visible(struct kobject *kobj)
+{
+ return quirks->amplifier;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(amplifier);
static struct attribute *amplifier_attrs[] = {
&dev_attr_status.attr,
@@ -874,37 +867,23 @@ static struct attribute *amplifier_attrs[] = {
static const struct attribute_group amplifier_attribute_group = {
.name = "amplifier",
+ .is_visible = SYSFS_GROUP_VISIBLE(amplifier),
.attrs = amplifier_attrs,
};
-static void remove_amplifier(struct platform_device *dev)
-{
- if (quirks->amplifier > 0)
- sysfs_remove_group(&dev->dev.kobj, &amplifier_attribute_group);
-}
-
-static int create_amplifier(struct platform_device *dev)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->dev.kobj, &amplifier_attribute_group);
- if (ret)
- remove_amplifier(dev);
- return ret;
-}
-
/*
* Deep Sleep Control support
* - Modifies BIOS setting for deep sleep control allowing extra wakeup events
*/
-static ssize_t show_deepsleep_status(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t deepsleep_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status = alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_DEEP_SLEEP_STATUS, &out_data);
if (ACPI_SUCCESS(status)) {
@@ -919,12 +898,11 @@ static ssize_t show_deepsleep_status(struct device *dev,
return sysfs_emit(buf, "disabled s5 s5_s4 [unknown]\n");
}
-static ssize_t toggle_deepsleep(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t deepsleep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- acpi_status status;
struct wmax_basic_args args;
+ acpi_status status;
if (strcmp(buf, "disabled\n") == 0)
args.arg = 0;
@@ -943,7 +921,13 @@ static ssize_t toggle_deepsleep(struct device *dev,
return count;
}
-static DEVICE_ATTR(deepsleep, S_IRUGO | S_IWUSR, show_deepsleep_status, toggle_deepsleep);
+static DEVICE_ATTR_RW(deepsleep);
+
+static bool deepsleep_group_visible(struct kobject *kobj)
+{
+ return quirks->deepslp;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(deepsleep);
static struct attribute *deepsleep_attrs[] = {
&dev_attr_deepsleep.attr,
@@ -952,25 +936,10 @@ static struct attribute *deepsleep_attrs[] = {
static const struct attribute_group deepsleep_attribute_group = {
.name = "deepsleep",
+ .is_visible = SYSFS_GROUP_VISIBLE(deepsleep),
.attrs = deepsleep_attrs,
};
-static void remove_deepsleep(struct platform_device *dev)
-{
- if (quirks->deepslp > 0)
- sysfs_remove_group(&dev->dev.kobj, &deepsleep_attribute_group);
-}
-
-static int create_deepsleep(struct platform_device *dev)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->dev.kobj, &deepsleep_attribute_group);
- if (ret)
- remove_deepsleep(dev);
- return ret;
-}
-
/*
* Thermal Profile control
* - Provides thermal profile control through the Platform Profile API
@@ -1000,13 +969,13 @@ static bool is_wmax_thermal_code(u32 code)
static int wmax_thermal_information(u8 operation, u8 arg, u32 *out_data)
{
- acpi_status status;
struct wmax_u32_args in_args = {
.operation = operation,
.arg1 = arg,
.arg2 = 0,
.arg3 = 0,
};
+ acpi_status status;
status = alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_THERMAL_INFORMATION,
@@ -1023,13 +992,13 @@ static int wmax_thermal_information(u8 operation, u8 arg, u32 *out_data)
static int wmax_thermal_control(u8 profile)
{
- acpi_status status;
struct wmax_u32_args in_args = {
.operation = WMAX_OPERATION_ACTIVATE_PROFILE,
.arg1 = profile,
.arg2 = 0,
.arg3 = 0,
};
+ acpi_status status;
u32 out_data;
status = alienware_wmax_command(&in_args, sizeof(in_args),
@@ -1047,13 +1016,13 @@ static int wmax_thermal_control(u8 profile)
static int wmax_game_shift_status(u8 operation, u32 *out_data)
{
- acpi_status status;
struct wmax_u32_args in_args = {
.operation = operation,
.arg1 = 0,
.arg2 = 0,
.arg3 = 0,
};
+ acpi_status status;
status = alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_GAME_SHIFT_STATUS,
@@ -1068,7 +1037,7 @@ static int wmax_game_shift_status(u8 operation, u32 *out_data)
return 0;
}
-static int thermal_profile_get(struct platform_profile_handler *pprof,
+static int thermal_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
u32 out_data;
@@ -1094,7 +1063,7 @@ static int thermal_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int thermal_profile_set(struct platform_profile_handler *pprof,
+static int thermal_profile_set(struct device *dev,
enum platform_profile_option profile)
{
if (quirks->gmode) {
@@ -1120,13 +1089,13 @@ static int thermal_profile_set(struct platform_profile_handler *pprof,
return wmax_thermal_control(supported_thermal_profiles[profile]);
}
-static int create_thermal_profile(void)
+static int thermal_profile_probe(void *drvdata, unsigned long *choices)
{
- u32 out_data;
+ enum platform_profile_option profile;
+ enum wmax_thermal_mode mode;
u8 sys_desc[4];
u32 first_mode;
- enum wmax_thermal_mode mode;
- enum platform_profile_option profile;
+ u32 out_data;
int ret;
ret = wmax_thermal_information(WMAX_OPERATION_SYS_DESCRIPTION,
@@ -1153,31 +1122,56 @@ static int create_thermal_profile(void)
profile = wmax_mode_to_platform_profile[mode];
supported_thermal_profiles[profile] = out_data;
- set_bit(profile, pp_handler.choices);
+ set_bit(profile, choices);
}
- if (bitmap_empty(pp_handler.choices, PLATFORM_PROFILE_LAST))
+ if (bitmap_empty(choices, PLATFORM_PROFILE_LAST))
return -ENODEV;
if (quirks->gmode) {
supported_thermal_profiles[PLATFORM_PROFILE_PERFORMANCE] =
WMAX_THERMAL_MODE_GMODE;
- set_bit(PLATFORM_PROFILE_PERFORMANCE, pp_handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
}
- pp_handler.profile_get = thermal_profile_get;
- pp_handler.profile_set = thermal_profile_set;
-
- return platform_profile_register(&pp_handler);
+ return 0;
}
-static void remove_thermal_profile(void)
+static const struct platform_profile_ops awcc_platform_profile_ops = {
+ .probe = thermal_profile_probe,
+ .profile_get = thermal_profile_get,
+ .profile_set = thermal_profile_set,
+};
+
+static int create_thermal_profile(struct platform_device *platform_device)
{
- if (quirks->thermal)
- platform_profile_remove();
+ struct device *ppdev;
+
+ ppdev = devm_platform_profile_register(&platform_device->dev, "alienware-wmi",
+ NULL, &awcc_platform_profile_ops);
+
+ return PTR_ERR_OR_ZERO(ppdev);
}
+/*
+ * Platform Driver
+ */
+static const struct attribute_group *alienfx_groups[] = {
+ &zone_attribute_group,
+ &hdmi_attribute_group,
+ &amplifier_attribute_group,
+ &deepsleep_attribute_group,
+ NULL
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "alienware-wmi",
+ .dev_groups = alienfx_groups,
+ },
+};
+
static int __init alienware_wmi_init(void)
{
int ret;
@@ -1217,26 +1211,8 @@ static int __init alienware_wmi_init(void)
if (ret)
goto fail_platform_device2;
- if (quirks->hdmi_mux > 0) {
- ret = create_hdmi(platform_device);
- if (ret)
- goto fail_prep_hdmi;
- }
-
- if (quirks->amplifier > 0) {
- ret = create_amplifier(platform_device);
- if (ret)
- goto fail_prep_amplifier;
- }
-
- if (quirks->deepslp > 0) {
- ret = create_deepsleep(platform_device);
- if (ret)
- goto fail_prep_deepsleep;
- }
-
if (quirks->thermal) {
- ret = create_thermal_profile();
+ ret = create_thermal_profile(platform_device);
if (ret)
goto fail_prep_thermal_profile;
}
@@ -1251,11 +1227,7 @@ static int __init alienware_wmi_init(void)
fail_prep_zones:
alienware_zone_exit(platform_device);
- remove_thermal_profile();
fail_prep_thermal_profile:
-fail_prep_deepsleep:
-fail_prep_amplifier:
-fail_prep_hdmi:
platform_device_del(platform_device);
fail_platform_device2:
platform_device_put(platform_device);
@@ -1269,13 +1241,9 @@ module_init(alienware_wmi_init);
static void __exit alienware_wmi_exit(void)
{
- if (platform_device) {
- alienware_zone_exit(platform_device);
- remove_hdmi(platform_device);
- remove_thermal_profile();
- platform_device_unregister(platform_device);
- platform_driver_unregister(&platform_driver);
- }
+ alienware_zone_exit(platform_device);
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
}
module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/dell/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
index 0aeb8149c16b..8149be25fa26 100644
--- a/drivers/platform/x86/dell/dcdbas.c
+++ b/drivers/platform/x86/dell/dcdbas.c
@@ -163,7 +163,7 @@ static ssize_t smi_data_buf_size_store(struct device *dev,
}
static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
ssize_t ret;
@@ -176,7 +176,7 @@ static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
}
static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
ssize_t ret;
@@ -636,9 +636,9 @@ static struct notifier_block dcdbas_reboot_nb = {
.priority = INT_MIN
};
-static DCDBAS_BIN_ATTR_RW(smi_data);
+static const BIN_ATTR_ADMIN_RW(smi_data, 0);
-static struct bin_attribute *dcdbas_bin_attrs[] = {
+static const struct bin_attribute *const dcdbas_bin_attrs[] = {
&bin_attr_smi_data,
NULL
};
@@ -662,7 +662,7 @@ static struct attribute *dcdbas_dev_attrs[] = {
static const struct attribute_group dcdbas_attr_group = {
.attrs = dcdbas_dev_attrs,
- .bin_attrs = dcdbas_bin_attrs,
+ .bin_attrs_new = dcdbas_bin_attrs,
};
static int dcdbas_probe(struct platform_device *dev)
diff --git a/drivers/platform/x86/dell/dcdbas.h b/drivers/platform/x86/dell/dcdbas.h
index 942a23ddded0..a05d7f667586 100644
--- a/drivers/platform/x86/dell/dcdbas.h
+++ b/drivers/platform/x86/dell/dcdbas.h
@@ -56,14 +56,6 @@
#define DCDBAS_DEV_ATTR_WO(_name) \
DEVICE_ATTR(_name,0200,NULL,_name##_store);
-#define DCDBAS_BIN_ATTR_RW(_name) \
-struct bin_attribute bin_attr_##_name = { \
- .attr = { .name = __stringify(_name), \
- .mode = 0600 }, \
- .read = _name##_read, \
- .write = _name##_write, \
-}
-
struct smi_cmd {
__u32 magic;
__u32 ebx;
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 5671bd0deee7..58b860b88fff 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -725,8 +725,8 @@ static void dell_update_rfkill(struct work_struct *ignored)
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
{
static bool extended;
@@ -884,7 +884,7 @@ static int __init dell_setup_rfkill(void)
pr_warn("Unable to register dell rbtn notifier\n");
goto err_filter;
} else {
- ret = i8042_install_filter(dell_laptop_i8042_filter);
+ ret = i8042_install_filter(dell_laptop_i8042_filter, NULL);
if (ret) {
pr_warn("Unable to install key filter\n");
goto err_filter;
diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c
new file mode 100644
index 000000000000..efe26d667973
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-lis3lv02d.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * lis3lv02d i2c-client instantiation for ACPI SMO88xx devices without I2C resources.
+ *
+ * Copyright (C) 2024 Hans de Goede <hansg@kernel.org>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device/bus.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include "dell-smo8800-ids.h"
+
+#define LIS3_WHO_AM_I 0x0f
+
+#define DELL_LIS3LV02D_DMI_ENTRY(product_name, i2c_addr) \
+ { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), \
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, product_name), \
+ }, \
+ .driver_data = (void *)(uintptr_t)(i2c_addr), \
+ }
+
+/*
+ * Accelerometer's I2C address is not specified in DMI nor ACPI,
+ * so it is needed to define mapping table based on DMI product names.
+ */
+static const struct dmi_system_id lis3lv02d_devices[] __initconst = {
+ /*
+ * Dell platform team told us that these Latitude devices have
+ * ST microelectronics accelerometer at I2C address 0x29.
+ */
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E5250", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E5450", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E5550", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6440", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6440 ATG", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6540", 0x29),
+ /*
+ * Additional individual entries were added after verification.
+ */
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude 5480", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6430", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Precision 3540", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Precision M6800", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Vostro V131", 0x1d),
+ DELL_LIS3LV02D_DMI_ENTRY("Vostro 5568", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("XPS 15 7590", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("XPS 15 9550", 0x29),
+ { }
+};
+
+static u8 i2c_addr;
+static struct i2c_client *i2c_dev;
+static bool notifier_registered;
+
+static bool probe_i2c_addr;
+module_param(probe_i2c_addr, bool, 0444);
+MODULE_PARM_DESC(probe_i2c_addr, "Probe the i801 I2C bus for the accelerometer on models where the address is unknown, this may be dangerous.");
+
+static int detect_lis3lv02d(struct i2c_adapter *adap, unsigned short addr)
+{
+ union i2c_smbus_data smbus_data;
+ int err;
+
+ dev_info(&adap->dev, "Probing for lis3lv02d on address 0x%02x\n", addr);
+
+ err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, LIS3_WHO_AM_I,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (err < 0)
+ return 0; /* Not found */
+
+ /* valid who-am-i values are from drivers/misc/lis3lv02d/lis3lv02d.c */
+ switch (smbus_data.byte) {
+ case 0x32:
+ case 0x33:
+ case 0x3a:
+ case 0x3b:
+ break;
+ default:
+ dev_warn(&adap->dev, "Unknown who-am-i register value 0x%02x\n",
+ smbus_data.byte);
+ return 0; /* Not found */
+ }
+
+ dev_info(&adap->dev,
+ "Detected lis3lv02d on address 0x%02x, please report this upstream to platform-driver-x86@vger.kernel.org so that a quirk can be added\n",
+ addr);
+
+ return 1; /* Found */
+}
+
+static bool i2c_adapter_is_main_i801(struct i2c_adapter *adap)
+{
+ /*
+ * Only match the main I801 adapter and reject secondary adapters
+ * which names start with "SMBus I801 IDF adapter".
+ */
+ return strstarts(adap->name, "SMBus I801 adapter");
+}
+
+static int find_i801(struct device *dev, void *data)
+{
+ struct i2c_adapter *adap, **adap_ret = data;
+
+ adap = i2c_verify_adapter(dev);
+ if (!adap)
+ return 0;
+
+ if (!i2c_adapter_is_main_i801(adap))
+ return 0;
+
+ *adap_ret = i2c_get_adapter(adap->nr);
+ return 1;
+}
+
+static void instantiate_i2c_client(struct work_struct *work)
+{
+ struct i2c_board_info info = { };
+ struct i2c_adapter *adap = NULL;
+
+ if (i2c_dev)
+ return;
+
+ /*
+ * bus_for_each_dev() and not i2c_for_each_dev() to avoid
+ * a deadlock when find_i801() calls i2c_get_adapter().
+ */
+ bus_for_each_dev(&i2c_bus_type, NULL, &adap, find_i801);
+ if (!adap)
+ return;
+
+ strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
+
+ if (i2c_addr) {
+ info.addr = i2c_addr;
+ i2c_dev = i2c_new_client_device(adap, &info);
+ } else {
+ /* First try address 0x29 (most used) and then try 0x1d */
+ static const unsigned short addr_list[] = { 0x29, 0x1d, I2C_CLIENT_END };
+
+ i2c_dev = i2c_new_scanned_device(adap, &info, addr_list, detect_lis3lv02d);
+ }
+
+ if (IS_ERR(i2c_dev)) {
+ dev_err(&adap->dev, "error %ld registering i2c_client\n", PTR_ERR(i2c_dev));
+ i2c_dev = NULL;
+ } else {
+ dev_dbg(&adap->dev, "registered lis3lv02d on address 0x%02x\n", info.addr);
+ }
+
+ i2c_put_adapter(adap);
+}
+static DECLARE_WORK(i2c_work, instantiate_i2c_client);
+
+static int i2c_bus_notify(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct i2c_client *client;
+ struct i2c_adapter *adap;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ adap = i2c_verify_adapter(dev);
+ if (!adap)
+ break;
+
+ if (i2c_adapter_is_main_i801(adap))
+ queue_work(system_long_wq, &i2c_work);
+ break;
+ case BUS_NOTIFY_REMOVED_DEVICE:
+ client = i2c_verify_client(dev);
+ if (!client)
+ break;
+
+ if (i2c_dev == client) {
+ dev_dbg(&client->adapter->dev, "lis3lv02d i2c_client removed\n");
+ i2c_dev = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+static struct notifier_block i2c_nb = { .notifier_call = i2c_bus_notify };
+
+static int __init match_acpi_device_ids(struct device *dev, const void *data)
+{
+ return acpi_match_device(data, dev) ? 1 : 0;
+}
+
+static int __init dell_lis3lv02d_init(void)
+{
+ const struct dmi_system_id *lis3lv02d_dmi_id;
+ struct device *dev;
+ int err;
+
+ /*
+ * First check for a matching platform_device. This protects against
+ * SMO88xx ACPI fwnodes which actually do have an I2C resource, which
+ * will already have an i2c_client instantiated (not a platform_device).
+ */
+ dev = bus_find_device(&platform_bus_type, NULL, smo8800_ids, match_acpi_device_ids);
+ if (!dev) {
+ pr_debug("No SMO88xx platform-device found\n");
+ return 0;
+ }
+ put_device(dev);
+
+ lis3lv02d_dmi_id = dmi_first_match(lis3lv02d_devices);
+ if (!lis3lv02d_dmi_id && !probe_i2c_addr) {
+ pr_warn("accelerometer is present on SMBus but its address is unknown, skipping registration\n");
+ pr_info("Pass dell_lis3lv02d.probe_i2c_addr=1 on the kernel command line to probe, this may be dangerous!\n");
+ return 0;
+ }
+
+ if (lis3lv02d_dmi_id)
+ i2c_addr = (long)lis3lv02d_dmi_id->driver_data;
+
+ /*
+ * Register i2c-bus notifier + queue initial scan for lis3lv02d
+ * i2c_client instantiation.
+ */
+ err = bus_register_notifier(&i2c_bus_type, &i2c_nb);
+ if (err)
+ return err;
+
+ notifier_registered = true;
+
+ queue_work(system_long_wq, &i2c_work);
+ return 0;
+}
+module_init(dell_lis3lv02d_init);
+
+static void __exit dell_lis3lv02d_module_exit(void)
+{
+ if (!notifier_registered)
+ return;
+
+ bus_unregister_notifier(&i2c_bus_type, &i2c_nb);
+ cancel_work_sync(&i2c_work);
+ i2c_unregister_device(i2c_dev);
+}
+module_exit(dell_lis3lv02d_module_exit);
+
+MODULE_DESCRIPTION("lis3lv02d i2c-client instantiation for ACPI SMO88xx devices");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-pc.c b/drivers/platform/x86/dell/dell-pc.c
index 972385ca1990..483240bb36e7 100644
--- a/drivers/platform/x86/dell/dell-pc.c
+++ b/drivers/platform/x86/dell/dell-pc.c
@@ -18,10 +18,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_profile.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dell-smbios.h"
+static struct platform_device *platform_device;
+static int supported_modes;
+
static const struct dmi_system_id dell_device_table[] __initconst = {
{
.ident = "Dell Inc.",
@@ -105,8 +109,6 @@ MODULE_DEVICE_TABLE(dmi, dell_device_table);
#define DELL_ACC_SET_FIELD GENMASK(11, 8)
#define DELL_THERMAL_SUPPORTED GENMASK(3, 0)
-static struct platform_profile_handler *thermal_handler;
-
enum thermal_mode_bits {
DELL_BALANCED = BIT(0),
DELL_COOL_BOTTOM = BIT(1),
@@ -182,7 +184,7 @@ static int thermal_set_mode(enum thermal_mode_bits state)
return dell_send_request(&buffer, CLASS_INFO, SELECT_THERMAL_MANAGEMENT);
}
-static int thermal_platform_profile_set(struct platform_profile_handler *pprof,
+static int thermal_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
switch (profile) {
@@ -199,7 +201,7 @@ static int thermal_platform_profile_set(struct platform_profile_handler *pprof,
}
}
-static int thermal_platform_profile_get(struct platform_profile_handler *pprof,
+static int thermal_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
int ret;
@@ -228,10 +230,30 @@ static int thermal_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
+static int thermal_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ if (supported_modes & DELL_QUIET)
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ if (supported_modes & DELL_COOL_BOTTOM)
+ set_bit(PLATFORM_PROFILE_COOL, choices);
+ if (supported_modes & DELL_BALANCED)
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ if (supported_modes & DELL_PERFORMANCE)
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops dell_pc_platform_profile_ops = {
+ .probe = thermal_platform_profile_probe,
+ .profile_get = thermal_platform_profile_get,
+ .profile_set = thermal_platform_profile_set,
+};
+
static int thermal_init(void)
{
+ struct device *ppdev;
int ret;
- int supported_modes;
/* If thermal commands are not supported, exit without error */
if (!dell_smbios_class_is_supported(CLASS_INFO))
@@ -244,37 +266,28 @@ static int thermal_init(void)
if (!supported_modes)
return 0;
- thermal_handler = kzalloc(sizeof(*thermal_handler), GFP_KERNEL);
- if (!thermal_handler)
- return -ENOMEM;
- thermal_handler->profile_get = thermal_platform_profile_get;
- thermal_handler->profile_set = thermal_platform_profile_set;
+ platform_device = platform_device_register_simple("dell-pc", PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(platform_device))
+ return PTR_ERR(platform_device);
- if (supported_modes & DELL_QUIET)
- set_bit(PLATFORM_PROFILE_QUIET, thermal_handler->choices);
- if (supported_modes & DELL_COOL_BOTTOM)
- set_bit(PLATFORM_PROFILE_COOL, thermal_handler->choices);
- if (supported_modes & DELL_BALANCED)
- set_bit(PLATFORM_PROFILE_BALANCED, thermal_handler->choices);
- if (supported_modes & DELL_PERFORMANCE)
- set_bit(PLATFORM_PROFILE_PERFORMANCE, thermal_handler->choices);
-
- /* Clean up if failed */
- ret = platform_profile_register(thermal_handler);
- if (ret) {
- kfree(thermal_handler);
- thermal_handler = NULL;
+ ppdev = devm_platform_profile_register(&platform_device->dev, "dell-pc",
+ NULL, &dell_pc_platform_profile_ops);
+ if (IS_ERR(ppdev)) {
+ ret = PTR_ERR(ppdev);
+ goto cleanup_platform_device;
}
+ return 0;
+
+cleanup_platform_device:
+ platform_device_unregister(platform_device);
+
return ret;
}
static void thermal_cleanup(void)
{
- if (thermal_handler) {
- platform_profile_remove();
- kfree(thermal_handler);
- }
+ platform_device_unregister(platform_device);
}
static int __init dell_init(void)
diff --git a/drivers/platform/x86/dell/dell-smo8800-ids.h b/drivers/platform/x86/dell/dell-smo8800-ids.h
new file mode 100644
index 000000000000..ec58e229ba7a
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-smo8800-ids.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ACPI SMO88XX lis3lv02d freefall / accelerometer device-ids.
+ *
+ * Copyright (C) 2012 Sonal Santan <sonal.santan@gmail.com>
+ * Copyright (C) 2014 Pali Rohár <pali@kernel.org>
+ */
+#ifndef _DELL_SMO8800_IDS_H_
+#define _DELL_SMO8800_IDS_H_
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+static const struct acpi_device_id smo8800_ids[] = {
+ { "SMO8800" },
+ { "SMO8801" },
+ { "SMO8810" },
+ { "SMO8811" },
+ { "SMO8820" },
+ { "SMO8821" },
+ { "SMO8830" },
+ { "SMO8831" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smo8800_ids);
+
+#endif
diff --git a/drivers/platform/x86/dell/dell-smo8800.c b/drivers/platform/x86/dell/dell-smo8800.c
index 87fe03f23f24..8872f9b57fce 100644
--- a/drivers/platform/x86/dell/dell-smo8800.c
+++ b/drivers/platform/x86/dell/dell-smo8800.c
@@ -14,10 +14,10 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
-#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
+#include "dell-smo8800-ids.h"
struct smo8800_device {
u32 irq; /* acpi device irq */
@@ -163,20 +163,6 @@ static void smo8800_remove(struct platform_device *device)
dev_dbg(&device->dev, "device /dev/freefall unregistered\n");
}
-/* NOTE: Keep this list in sync with drivers/i2c/busses/i2c-i801.c */
-static const struct acpi_device_id smo8800_ids[] = {
- { "SMO8800", 0 },
- { "SMO8801", 0 },
- { "SMO8810", 0 },
- { "SMO8811", 0 },
- { "SMO8820", 0 },
- { "SMO8821", 0 },
- { "SMO8830", 0 },
- { "SMO8831", 0 },
- { "", 0 },
-};
-MODULE_DEVICE_TABLE(acpi, smo8800_ids);
-
static struct platform_driver smo8800_driver = {
.probe = smo8800_probe,
.remove = smo8800_remove,
diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
index 6e5dc7e3674f..50002ef13d5d 100644
--- a/drivers/platform/x86/dell/dell-uart-backlight.c
+++ b/drivers/platform/x86/dell/dell-uart-backlight.c
@@ -159,7 +159,7 @@ static int dell_uart_set_bl_power(struct dell_uart_backlight *dell_bl, int power
set_power[0] = DELL_SOF(SET_CMD_LEN);
set_power[1] = CMD_SET_BL_POWER;
- set_power[2] = (power == FB_BLANK_UNBLANK) ? 1 : 0;
+ set_power[2] = (power == BACKLIGHT_POWER_ON) ? 1 : 0;
set_power[3] = dell_uart_checksum(set_power, 3);
ret = dell_uart_bl_command(dell_bl, set_power, SET_CMD_LEN, resp, SET_RESP_LEN);
@@ -283,6 +283,9 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
init_waitqueue_head(&dell_bl->wait_queue);
dell_bl->dev = dev;
+ serdev_device_set_drvdata(serdev, dell_bl);
+ serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
@@ -290,8 +293,6 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
/* 9600 bps, no flow control, these are the default but set them to be sure */
serdev_device_set_baudrate(serdev, 9600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, dell_bl);
- serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
get_version[0] = DELL_SOF(GET_CMD_LEN);
get_version[1] = CMD_GET_VERSION;
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index 40ddc6eb7562..d00389b860e4 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -25,7 +25,6 @@ struct wmi_sysman_priv wmi_priv = {
/* reset bios to defaults */
static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"};
static int reset_option = -1;
-static const struct class *fw_attr_class;
/**
@@ -541,15 +540,11 @@ static int __init sysman_init(void)
goto err_exit_bios_attr_pass_interface;
}
- ret = fw_attributes_class_get(&fw_attr_class);
- if (ret)
- goto err_exit_bios_attr_pass_interface;
-
- wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME);
if (IS_ERR(wmi_priv.class_dev)) {
ret = PTR_ERR(wmi_priv.class_dev);
- goto err_unregister_class;
+ goto err_exit_bios_attr_pass_interface;
}
wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL,
@@ -602,10 +597,7 @@ err_release_attributes_data:
release_attributes_data();
err_destroy_classdev:
- device_destroy(fw_attr_class, MKDEV(0, 0));
-
-err_unregister_class:
- fw_attributes_class_put();
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
err_exit_bios_attr_pass_interface:
exit_bios_attr_pass_interface();
@@ -619,8 +611,7 @@ err_exit_bios_attr_set_interface:
static void __exit sysman_exit(void)
{
release_attributes_data();
- device_destroy(fw_attr_class, MKDEV(0, 0));
- fw_attributes_class_put();
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
exit_bios_attr_set_interface();
exit_bios_attr_pass_interface();
}
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index 9f51e0fcab04..e30ca325938c 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -475,7 +475,7 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
}
static ssize_t data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
ssize_t ret_count = 0;
@@ -492,7 +492,7 @@ static ssize_t data_read(struct file *filp, struct kobject *kobj,
spin_unlock(&rbu_data.lock);
return ret_count;
}
-static BIN_ATTR_RO(data, 0);
+static const BIN_ATTR_RO(data, 0);
static void callbackfn_rbu(const struct firmware *fw, void *context)
{
@@ -530,7 +530,7 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
}
static ssize_t image_type_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int size = 0;
@@ -540,7 +540,7 @@ static ssize_t image_type_read(struct file *filp, struct kobject *kobj,
}
static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int rc = count;
@@ -597,10 +597,10 @@ static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
return rc;
}
-static BIN_ATTR_RW(image_type, 0);
+static const BIN_ATTR_RW(image_type, 0);
static ssize_t packet_size_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int size = 0;
@@ -613,7 +613,7 @@ static ssize_t packet_size_read(struct file *filp, struct kobject *kobj,
}
static ssize_t packet_size_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
unsigned long temp;
@@ -626,9 +626,9 @@ static ssize_t packet_size_write(struct file *filp, struct kobject *kobj,
spin_unlock(&rbu_data.lock);
return count;
}
-static BIN_ATTR_RW(packet_size, 0);
+static const BIN_ATTR_RW(packet_size, 0);
-static struct bin_attribute *rbu_bin_attrs[] = {
+static const struct bin_attribute *const rbu_bin_attrs[] = {
&bin_attr_data,
&bin_attr_image_type,
&bin_attr_packet_size,
@@ -636,7 +636,7 @@ static struct bin_attribute *rbu_bin_attrs[] = {
};
static const struct attribute_group rbu_group = {
- .bin_attrs = rbu_bin_attrs,
+ .bin_attrs_new = rbu_bin_attrs,
};
static int __init dcdrbu_init(void)
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
index 182a07d8ae3d..736e96c186d9 100644
--- a/drivers/platform/x86/firmware_attributes_class.c
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -2,51 +2,25 @@
/* Firmware attributes class helper module */
-#include <linux/mutex.h>
-#include <linux/device/class.h>
#include <linux/module.h>
#include "firmware_attributes_class.h"
-static DEFINE_MUTEX(fw_attr_lock);
-static int fw_attr_inuse;
-
-static const struct class firmware_attributes_class = {
+const struct class firmware_attributes_class = {
.name = "firmware-attributes",
};
+EXPORT_SYMBOL_GPL(firmware_attributes_class);
-int fw_attributes_class_get(const struct class **fw_attr_class)
+static __init int fw_attributes_class_init(void)
{
- int err;
-
- mutex_lock(&fw_attr_lock);
- if (!fw_attr_inuse) { /*first time class is being used*/
- err = class_register(&firmware_attributes_class);
- if (err) {
- mutex_unlock(&fw_attr_lock);
- return err;
- }
- }
- fw_attr_inuse++;
- *fw_attr_class = &firmware_attributes_class;
- mutex_unlock(&fw_attr_lock);
- return 0;
+ return class_register(&firmware_attributes_class);
}
-EXPORT_SYMBOL_GPL(fw_attributes_class_get);
+module_init(fw_attributes_class_init);
-int fw_attributes_class_put(void)
+static __exit void fw_attributes_class_exit(void)
{
- mutex_lock(&fw_attr_lock);
- if (!fw_attr_inuse) {
- mutex_unlock(&fw_attr_lock);
- return -EINVAL;
- }
- fw_attr_inuse--;
- if (!fw_attr_inuse) /* No more consumers */
- class_unregister(&firmware_attributes_class);
- mutex_unlock(&fw_attr_lock);
- return 0;
+ class_unregister(&firmware_attributes_class);
}
-EXPORT_SYMBOL_GPL(fw_attributes_class_put);
+module_exit(fw_attributes_class_exit);
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
MODULE_DESCRIPTION("Firmware attributes class helper module");
diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h
index 363c75f1ac1b..d27abe54fcf9 100644
--- a/drivers/platform/x86/firmware_attributes_class.h
+++ b/drivers/platform/x86/firmware_attributes_class.h
@@ -5,7 +5,8 @@
#ifndef FW_ATTR_CLASS_H
#define FW_ATTR_CLASS_H
-int fw_attributes_class_get(const struct class **fw_attr_class);
-int fw_attributes_class_put(void);
+#include <linux/device/class.h>
+
+extern const struct class firmware_attributes_class;
#endif /* FW_ATTR_CLASS_H */
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ae992ac1ab4a..a0eae24ca9e6 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -505,8 +505,8 @@ static int acpi_fujitsu_bl_add(struct acpi_device *device)
return -ENOMEM;
fujitsu_bl = priv;
- strcpy(acpi_device_name(device), ACPI_FUJITSU_BL_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ strscpy(acpi_device_name(device), ACPI_FUJITSU_BL_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
device->driver_data = priv;
pr_info("ACPI: %s [%s]\n",
@@ -891,8 +891,8 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
WARN_ONCE(fext, "More than one FUJ02E3 ACPI device was found. Driver may not work as intended.");
fext = device;
- strcpy(acpi_device_name(device), ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ strscpy(acpi_device_name(device), ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
device->driver_data = priv;
/* kfifo */
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 2dc50152158a..0b277b7e37dd 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -24,8 +24,6 @@ struct bioscfg_priv bioscfg_drv = {
.mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex),
};
-static const struct class *fw_attr_class;
-
ssize_t display_name_language_code_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -972,11 +970,7 @@ static int __init hp_init(void)
if (ret)
return ret;
- ret = fw_attributes_class_get(&fw_attr_class);
- if (ret)
- goto err_unregister_class;
-
- bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ bioscfg_drv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME);
if (IS_ERR(bioscfg_drv.class_dev)) {
ret = PTR_ERR(bioscfg_drv.class_dev);
@@ -1043,10 +1037,9 @@ err_release_attributes_data:
release_attributes_data();
err_destroy_classdev:
- device_destroy(fw_attr_class, MKDEV(0, 0));
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
err_unregister_class:
- fw_attributes_class_put();
hp_exit_attr_set_interface();
return ret;
@@ -1055,9 +1048,8 @@ err_unregister_class:
static void __exit hp_exit(void)
{
release_attributes_data();
- device_destroy(fw_attr_class, MKDEV(0, 0));
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
- fw_attributes_class_put();
hp_exit_attr_set_interface();
}
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 20c55bab3b8c..db5fdee2109c 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -45,6 +45,10 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45E9-BE91-3D44E2C707E4");
#define HP_OMEN_EC_THERMAL_PROFILE_TIMER_OFFSET 0x63
#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+#define HP_FAN_SPEED_AUTOMATIC 0x00
+#define HP_POWER_LIMIT_DEFAULT 0x00
+#define HP_POWER_LIMIT_NO_CHANGE 0xFF
+
#define ACPI_AC_CLASS "ac_adapter"
#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
@@ -83,11 +87,16 @@ static const char * const omen_timed_thermal_profile_boards[] = {
"8BAD", "8A42", "8A15"
};
-/* DMI Board names of Victus laptops */
+/* DMI Board names of Victus 16-d1xxx laptops */
static const char * const victus_thermal_profile_boards[] = {
"8A25"
};
+/* DMI Board names of Victus 16-s1000 laptops */
+static const char * const victus_s_thermal_profile_boards[] = {
+ "8C9C"
+};
+
enum hp_wmi_radio {
HPWMI_WIFI = 0x0,
HPWMI_BLUETOOTH = 0x1,
@@ -147,12 +156,32 @@ enum hp_wmi_commandtype {
HPWMI_THERMAL_PROFILE_QUERY = 0x4c,
};
+struct victus_power_limits {
+ u8 pl1;
+ u8 pl2;
+ u8 pl4;
+ u8 cpu_gpu_concurrent_limit;
+};
+
+struct victus_gpu_power_modes {
+ u8 ctgp_enable;
+ u8 ppab_enable;
+ u8 dstate;
+ u8 gpu_slowdown_temp;
+};
+
enum hp_wmi_gm_commandtype {
- HPWMI_FAN_SPEED_GET_QUERY = 0x11,
- HPWMI_SET_PERFORMANCE_MODE = 0x1A,
- HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
- HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
- HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
+ HPWMI_FAN_SPEED_GET_QUERY = 0x11,
+ HPWMI_SET_PERFORMANCE_MODE = 0x1A,
+ HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
+ HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
+ HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
+ HPWMI_FAN_COUNT_GET_QUERY = 0x10,
+ HPWMI_GET_GPU_THERMAL_MODES_QUERY = 0x21,
+ HPWMI_SET_GPU_THERMAL_MODES_QUERY = 0x22,
+ HPWMI_SET_POWER_LIMITS_QUERY = 0x29,
+ HPWMI_VICTUS_S_FAN_SPEED_GET_QUERY = 0x2D,
+ HPWMI_FAN_SPEED_SET_QUERY = 0x2E,
};
enum hp_wmi_command {
@@ -211,6 +240,11 @@ enum hp_thermal_profile_victus {
HP_VICTUS_THERMAL_PROFILE_QUIET = 0x03,
};
+enum hp_thermal_profile_victus_s {
+ HP_VICTUS_S_THERMAL_PROFILE_DEFAULT = 0x00,
+ HP_VICTUS_S_THERMAL_PROFILE_PERFORMANCE = 0x01,
+};
+
enum hp_thermal_profile {
HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
HP_THERMAL_PROFILE_DEFAULT = 0x01,
@@ -273,7 +307,7 @@ static DEFINE_MUTEX(active_platform_profile_lock);
static struct input_dev *hp_wmi_input_dev;
static struct input_dev *camera_shutter_input_dev;
static struct platform_device *hp_wmi_platform_dev;
-static struct platform_profile_handler platform_profile_handler;
+static struct device *platform_profile_device;
static struct notifier_block platform_power_source_nb;
static enum platform_profile_option active_platform_profile;
static bool platform_profile_support;
@@ -411,6 +445,26 @@ out_free:
return ret;
}
+/*
+ * Calling this hp_wmi_get_fan_count_userdefine_trigger function also enables
+ * and/or maintains the laptop in user defined thermal and fan states, instead
+ * of using a fallback state. After a 120 seconds timeout however, the laptop
+ * goes back to its fallback state.
+ */
+static int hp_wmi_get_fan_count_userdefine_trigger(void)
+{
+ u8 fan_data[4] = {};
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_COUNT_GET_QUERY, HPWMI_GM,
+ &fan_data, sizeof(u8),
+ sizeof(fan_data));
+ if (ret != 0)
+ return -EINVAL;
+
+ return fan_data[0]; /* Others bytes aren't providing fan count */
+}
+
static int hp_wmi_get_fan_speed(int fan)
{
u8 fsh, fsl;
@@ -429,6 +483,23 @@ static int hp_wmi_get_fan_speed(int fan)
return (fsh << 8) | fsl;
}
+static int hp_wmi_get_fan_speed_victus_s(int fan)
+{
+ u8 fan_data[128] = {};
+ int ret;
+
+ if (fan < 0 || fan >= sizeof(fan_data))
+ return -EINVAL;
+
+ ret = hp_wmi_perform_query(HPWMI_VICTUS_S_FAN_SPEED_GET_QUERY,
+ HPWMI_GM, &fan_data, sizeof(u8),
+ sizeof(fan_data));
+ if (ret != 0)
+ return -EINVAL;
+
+ return fan_data[fan] * 100;
+}
+
static int hp_wmi_read_int(int query)
{
int val = 0, ret;
@@ -557,6 +628,30 @@ static int hp_wmi_fan_speed_max_set(int enabled)
return enabled;
}
+static int hp_wmi_fan_speed_reset(void)
+{
+ u8 fan_speed[2] = { HP_FAN_SPEED_AUTOMATIC, HP_FAN_SPEED_AUTOMATIC };
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_SET_QUERY, HPWMI_GM,
+ &fan_speed, sizeof(fan_speed), 0);
+
+ return ret;
+}
+
+static int hp_wmi_fan_speed_max_reset(void)
+{
+ int ret;
+
+ ret = hp_wmi_fan_speed_max_set(0);
+ if (ret)
+ return ret;
+
+ /* Disabling max fan speed on Victus s1xxx laptops needs a 2nd step: */
+ ret = hp_wmi_fan_speed_reset();
+ return ret;
+}
+
static int hp_wmi_fan_speed_max_get(void)
{
int val = 0, ret;
@@ -1221,7 +1316,7 @@ static int platform_profile_omen_get_ec(enum platform_profile_option *profile)
return 0;
}
-static int platform_profile_omen_get(struct platform_profile_handler *pprof,
+static int platform_profile_omen_get(struct device *dev,
enum platform_profile_option *profile)
{
/*
@@ -1318,7 +1413,7 @@ static int platform_profile_omen_set_ec(enum platform_profile_option profile)
return 0;
}
-static int platform_profile_omen_set(struct platform_profile_handler *pprof,
+static int platform_profile_omen_set(struct device *dev,
enum platform_profile_option profile)
{
int err;
@@ -1345,7 +1440,7 @@ static int thermal_profile_set(int thermal_profile)
sizeof(thermal_profile), 0);
}
-static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+static int hp_wmi_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
int tp;
@@ -1374,7 +1469,7 @@ static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+static int hp_wmi_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
int err, tp;
@@ -1440,11 +1535,11 @@ static int platform_profile_victus_get_ec(enum platform_profile_option *profile)
return 0;
}
-static int platform_profile_victus_get(struct platform_profile_handler *pprof,
+static int platform_profile_victus_get(struct device *dev,
enum platform_profile_option *profile)
{
/* Same behaviour as platform_profile_omen_get */
- return platform_profile_omen_get(pprof, profile);
+ return platform_profile_omen_get(dev, profile);
}
static int platform_profile_victus_set_ec(enum platform_profile_option profile)
@@ -1472,7 +1567,162 @@ static int platform_profile_victus_set_ec(enum platform_profile_option profile)
return 0;
}
-static int platform_profile_victus_set(struct platform_profile_handler *pprof,
+static bool is_victus_s_thermal_profile(void)
+{
+ const char *board_name;
+
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (!board_name)
+ return false;
+
+ return match_string(victus_s_thermal_profile_boards,
+ ARRAY_SIZE(victus_s_thermal_profile_boards),
+ board_name) >= 0;
+}
+
+static int victus_s_gpu_thermal_profile_get(bool *ctgp_enable,
+ bool *ppab_enable,
+ u8 *dstate,
+ u8 *gpu_slowdown_temp)
+{
+ struct victus_gpu_power_modes gpu_power_modes;
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_GET_GPU_THERMAL_MODES_QUERY, HPWMI_GM,
+ &gpu_power_modes, sizeof(gpu_power_modes),
+ sizeof(gpu_power_modes));
+ if (ret == 0) {
+ *ctgp_enable = gpu_power_modes.ctgp_enable ? true : false;
+ *ppab_enable = gpu_power_modes.ppab_enable ? true : false;
+ *dstate = gpu_power_modes.dstate;
+ *gpu_slowdown_temp = gpu_power_modes.gpu_slowdown_temp;
+ }
+
+ return ret;
+}
+
+static int victus_s_gpu_thermal_profile_set(bool ctgp_enable,
+ bool ppab_enable,
+ u8 dstate)
+{
+ struct victus_gpu_power_modes gpu_power_modes;
+ int ret;
+
+ bool current_ctgp_state, current_ppab_state;
+ u8 current_dstate, current_gpu_slowdown_temp;
+
+ /* Retrieving GPU slowdown temperature, in order to keep it unchanged */
+ ret = victus_s_gpu_thermal_profile_get(&current_ctgp_state,
+ &current_ppab_state,
+ &current_dstate,
+ &current_gpu_slowdown_temp);
+ if (ret < 0) {
+ pr_warn("GPU modes not updated, unable to get slowdown temp\n");
+ return ret;
+ }
+
+ gpu_power_modes.ctgp_enable = ctgp_enable ? 0x01 : 0x00;
+ gpu_power_modes.ppab_enable = ppab_enable ? 0x01 : 0x00;
+ gpu_power_modes.dstate = dstate;
+ gpu_power_modes.gpu_slowdown_temp = current_gpu_slowdown_temp;
+
+
+ ret = hp_wmi_perform_query(HPWMI_SET_GPU_THERMAL_MODES_QUERY, HPWMI_GM,
+ &gpu_power_modes, sizeof(gpu_power_modes), 0);
+
+ return ret;
+}
+
+/* Note: HP_POWER_LIMIT_DEFAULT can be used to restore default PL1 and PL2 */
+static int victus_s_set_cpu_pl1_pl2(u8 pl1, u8 pl2)
+{
+ struct victus_power_limits power_limits;
+ int ret;
+
+ /* We need to know both PL1 and PL2 values in order to check them */
+ if (pl1 == HP_POWER_LIMIT_NO_CHANGE || pl2 == HP_POWER_LIMIT_NO_CHANGE)
+ return -EINVAL;
+
+ /* PL2 is not supposed to be lower than PL1 */
+ if (pl2 < pl1)
+ return -EINVAL;
+
+ power_limits.pl1 = pl1;
+ power_limits.pl2 = pl2;
+ power_limits.pl4 = HP_POWER_LIMIT_NO_CHANGE;
+ power_limits.cpu_gpu_concurrent_limit = HP_POWER_LIMIT_NO_CHANGE;
+
+ ret = hp_wmi_perform_query(HPWMI_SET_POWER_LIMITS_QUERY, HPWMI_GM,
+ &power_limits, sizeof(power_limits), 0);
+
+ return ret;
+}
+
+static int platform_profile_victus_s_set_ec(enum platform_profile_option profile)
+{
+ bool gpu_ctgp_enable, gpu_ppab_enable;
+ u8 gpu_dstate; /* Test shows 1 = 100%, 2 = 50%, 3 = 25%, 4 = 12.5% */
+ int err, tp;
+
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = HP_VICTUS_S_THERMAL_PROFILE_PERFORMANCE;
+ gpu_ctgp_enable = true;
+ gpu_ppab_enable = true;
+ gpu_dstate = 1;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = HP_VICTUS_S_THERMAL_PROFILE_DEFAULT;
+ gpu_ctgp_enable = false;
+ gpu_ppab_enable = true;
+ gpu_dstate = 1;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ tp = HP_VICTUS_S_THERMAL_PROFILE_DEFAULT;
+ gpu_ctgp_enable = false;
+ gpu_ppab_enable = false;
+ gpu_dstate = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ hp_wmi_get_fan_count_userdefine_trigger();
+
+ err = omen_thermal_profile_set(tp);
+ if (err < 0) {
+ pr_err("Failed to set platform profile %d: %d\n", profile, err);
+ return err;
+ }
+
+ err = victus_s_gpu_thermal_profile_set(gpu_ctgp_enable,
+ gpu_ppab_enable,
+ gpu_dstate);
+ if (err < 0) {
+ pr_err("Failed to set GPU profile %d: %d\n", profile, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int platform_profile_victus_s_set(struct device *dev,
+ enum platform_profile_option profile)
+{
+ int err;
+
+ guard(mutex)(&active_platform_profile_lock);
+
+ err = platform_profile_victus_s_set_ec(profile);
+ if (err < 0)
+ return err;
+
+ active_platform_profile = profile;
+
+ return 0;
+}
+
+static int platform_profile_victus_set(struct device *dev,
enum platform_profile_option profile)
{
int err;
@@ -1488,6 +1738,26 @@ static int platform_profile_victus_set(struct platform_profile_handler *pprof,
return 0;
}
+static int hp_wmi_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ if (is_omen_thermal_profile()) {
+ set_bit(PLATFORM_PROFILE_COOL, choices);
+ } else if (is_victus_thermal_profile()) {
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ } else if (is_victus_s_thermal_profile()) {
+ /* Adding an equivalent to HP Omen software ECO mode: */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ } else {
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_COOL, choices);
+ }
+
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
static int omen_powersource_event(struct notifier_block *nb,
unsigned long value,
void *data)
@@ -1545,6 +1815,39 @@ static int omen_powersource_event(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int victus_s_powersource_event(struct notifier_block *nb,
+ unsigned long value,
+ void *data)
+{
+ struct acpi_bus_event *event_entry = data;
+ int err;
+
+ if (strcmp(event_entry->device_class, ACPI_AC_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ pr_debug("Received power source device event\n");
+
+ /*
+ * Switching to battery power source while Performance mode is active
+ * needs manual triggering of CPU power limits. Same goes when switching
+ * to AC power source while Performance mode is active. Other modes
+ * however are automatically behaving without any manual action.
+ * Seen on HP 16-s1034nf (board 8C9C) with F.11 and F.13 BIOS versions.
+ */
+
+ if (active_platform_profile == PLATFORM_PROFILE_PERFORMANCE) {
+ pr_debug("Triggering CPU PL1/PL2 actualization\n");
+ err = victus_s_set_cpu_pl1_pl2(HP_POWER_LIMIT_DEFAULT,
+ HP_POWER_LIMIT_DEFAULT);
+ if (err)
+ pr_warn("Failed to actualize power limits: %d\n", err);
+
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
static int omen_register_powersource_event_handler(void)
{
int err;
@@ -1560,13 +1863,57 @@ static int omen_register_powersource_event_handler(void)
return 0;
}
+static int victus_s_register_powersource_event_handler(void)
+{
+ int err;
+
+ platform_power_source_nb.notifier_call = victus_s_powersource_event;
+ err = register_acpi_notifier(&platform_power_source_nb);
+ if (err < 0) {
+ pr_warn("Failed to install ACPI power source notify handler\n");
+ return err;
+ }
+
+ return 0;
+}
+
static inline void omen_unregister_powersource_event_handler(void)
{
unregister_acpi_notifier(&platform_power_source_nb);
}
-static int thermal_profile_setup(void)
+static inline void victus_s_unregister_powersource_event_handler(void)
{
+ unregister_acpi_notifier(&platform_power_source_nb);
+}
+
+static const struct platform_profile_ops platform_profile_omen_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = platform_profile_omen_get,
+ .profile_set = platform_profile_omen_set,
+};
+
+static const struct platform_profile_ops platform_profile_victus_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = platform_profile_victus_get,
+ .profile_set = platform_profile_victus_set,
+};
+
+static const struct platform_profile_ops platform_profile_victus_s_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = platform_profile_omen_get,
+ .profile_set = platform_profile_victus_s_set,
+};
+
+static const struct platform_profile_ops hp_wmi_platform_profile_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = hp_wmi_platform_profile_get,
+ .profile_set = hp_wmi_platform_profile_set,
+};
+
+static int thermal_profile_setup(struct platform_device *device)
+{
+ const struct platform_profile_ops *ops;
int err, tp;
if (is_omen_thermal_profile()) {
@@ -1582,10 +1929,7 @@ static int thermal_profile_setup(void)
if (err < 0)
return err;
- platform_profile_handler.profile_get = platform_profile_omen_get;
- platform_profile_handler.profile_set = platform_profile_omen_set;
-
- set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+ ops = &platform_profile_omen_ops;
} else if (is_victus_thermal_profile()) {
err = platform_profile_victus_get_ec(&active_platform_profile);
if (err < 0)
@@ -1599,10 +1943,19 @@ static int thermal_profile_setup(void)
if (err < 0)
return err;
- platform_profile_handler.profile_get = platform_profile_victus_get;
- platform_profile_handler.profile_set = platform_profile_victus_set;
+ ops = &platform_profile_victus_ops;
+ } else if (is_victus_s_thermal_profile()) {
+ /*
+ * Being unable to retrieve laptop's current thermal profile,
+ * during this setup, we set it to Balanced by default.
+ */
+ active_platform_profile = PLATFORM_PROFILE_BALANCED;
+
+ err = platform_profile_victus_s_set_ec(active_platform_profile);
+ if (err < 0)
+ return err;
- set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
+ ops = &platform_profile_victus_s_ops;
} else {
tp = thermal_profile_get();
@@ -1617,20 +1970,15 @@ static int thermal_profile_setup(void)
if (err)
return err;
- platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
- platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+ ops = &hp_wmi_platform_profile_ops;
}
- set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
-
- err = platform_profile_register(&platform_profile_handler);
- if (err)
- return err;
+ platform_profile_device = devm_platform_profile_register(&device->dev, "hp-wmi",
+ NULL, ops);
+ if (IS_ERR(platform_profile_device))
+ return PTR_ERR(platform_profile_device);
+ pr_info("Registered as platform profile handler\n");
platform_profile_support = true;
return 0;
@@ -1663,7 +2011,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
if (err < 0)
return err;
- thermal_profile_setup();
+ thermal_profile_setup(device);
return 0;
}
@@ -1689,9 +2037,6 @@ static void __exit hp_wmi_bios_remove(struct platform_device *device)
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
-
- if (platform_profile_support)
- platform_profile_remove();
}
static int hp_wmi_resume_handler(struct device *device)
@@ -1759,8 +2104,13 @@ static umode_t hp_wmi_hwmon_is_visible(const void *data,
case hwmon_pwm:
return 0644;
case hwmon_fan:
- if (hp_wmi_get_fan_speed(channel) >= 0)
- return 0444;
+ if (is_victus_s_thermal_profile()) {
+ if (hp_wmi_get_fan_speed_victus_s(channel) >= 0)
+ return 0444;
+ } else {
+ if (hp_wmi_get_fan_speed(channel) >= 0)
+ return 0444;
+ }
break;
default:
return 0;
@@ -1776,8 +2126,10 @@ static int hp_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
switch (type) {
case hwmon_fan:
- ret = hp_wmi_get_fan_speed(channel);
-
+ if (is_victus_s_thermal_profile())
+ ret = hp_wmi_get_fan_speed_victus_s(channel);
+ else
+ ret = hp_wmi_get_fan_speed(channel);
if (ret < 0)
return ret;
*val = ret;
@@ -1810,11 +2162,17 @@ static int hp_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
case hwmon_pwm:
switch (val) {
case 0:
+ if (is_victus_s_thermal_profile())
+ hp_wmi_get_fan_count_userdefine_trigger();
/* 0 is no fan speed control (max), which is 1 for us */
return hp_wmi_fan_speed_max_set(1);
case 2:
/* 2 is automatic speed control, which is 0 for us */
- return hp_wmi_fan_speed_max_set(0);
+ if (is_victus_s_thermal_profile()) {
+ hp_wmi_get_fan_count_userdefine_trigger();
+ return hp_wmi_fan_speed_max_reset();
+ } else
+ return hp_wmi_fan_speed_max_set(0);
default:
/* we don't support manual fan speed control */
return -EINVAL;
@@ -1893,6 +2251,10 @@ static int __init hp_wmi_init(void)
err = omen_register_powersource_event_handler();
if (err)
goto err_unregister_device;
+ } else if (is_victus_s_thermal_profile()) {
+ err = victus_s_register_powersource_event_handler();
+ if (err)
+ goto err_unregister_device;
}
return 0;
@@ -1912,6 +2274,9 @@ static void __exit hp_wmi_exit(void)
if (is_omen_thermal_profile() || is_victus_thermal_profile())
omen_unregister_powersource_event_handler();
+ if (is_victus_s_thermal_profile())
+ victus_s_unregister_powersource_event_handler();
+
if (wmi_has_guid(HPWMI_EVENT_GUID))
hp_wmi_input_destroy();
diff --git a/drivers/platform/x86/hp/hp_accel.c b/drivers/platform/x86/hp/hp_accel.c
index 39a6530f5072..10d5af18d639 100644
--- a/drivers/platform/x86/hp/hp_accel.c
+++ b/drivers/platform/x86/hp/hp_accel.c
@@ -267,7 +267,7 @@ static struct delayed_led_classdev hpled_led = {
};
static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
static bool extended;
@@ -326,7 +326,7 @@ static int lis3lv02d_probe(struct platform_device *device)
/* filter to remove HPQ6000 accelerometer data
* from keyboard bus stream */
if (strstr(dev_name(&device->dev), "HPQ6000"))
- i8042_install_filter(hp_accel_i8042_filter);
+ i8042_install_filter(hp_accel_i8042_filter, NULL);
INIT_WORK(&hpled_led.work, delayed_set_status_worker);
ret = led_classdev_register(NULL, &hpled_led.led_classdev);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index e980dd18e5f6..dfb5d4b8c046 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -142,7 +142,7 @@ enum {
struct ideapad_dytc_priv {
enum platform_profile_option current_profile;
- struct platform_profile_handler pprof;
+ struct device *ppdev; /* platform profile device */
struct mutex mutex; /* protects the DYTC interface */
struct ideapad_private *priv;
};
@@ -933,10 +933,10 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
* dytc_profile_get: Function to register with platform_profile
* handler. Returns current platform profile.
*/
-static int dytc_profile_get(struct platform_profile_handler *pprof,
+static int dytc_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+ struct ideapad_dytc_priv *dytc = dev_get_drvdata(dev);
*profile = dytc->current_profile;
return 0;
@@ -986,10 +986,10 @@ static int dytc_cql_command(struct ideapad_private *priv, unsigned long cmd,
* dytc_profile_set: Function to register with platform_profile
* handler. Sets current platform profile.
*/
-static int dytc_profile_set(struct platform_profile_handler *pprof,
+static int dytc_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+ struct ideapad_dytc_priv *dytc = dev_get_drvdata(dev);
struct ideapad_private *priv = dytc->priv;
unsigned long output;
int err;
@@ -1023,6 +1023,15 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
return -EINTR;
}
+static int dytc_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
static void dytc_profile_refresh(struct ideapad_private *priv)
{
enum platform_profile_option profile;
@@ -1041,7 +1050,7 @@ static void dytc_profile_refresh(struct ideapad_private *priv)
if (profile != priv->dytc->current_profile) {
priv->dytc->current_profile = profile;
- platform_profile_notify();
+ platform_profile_notify(priv->dytc->ppdev);
}
}
@@ -1063,6 +1072,12 @@ static const struct dmi_system_id ideapad_dytc_v4_allow_table[] = {
{}
};
+static const struct platform_profile_ops dytc_profile_ops = {
+ .probe = dytc_profile_probe,
+ .profile_get = dytc_profile_get,
+ .profile_set = dytc_profile_set,
+};
+
static int ideapad_dytc_profile_init(struct ideapad_private *priv)
{
int err, dytc_version;
@@ -1103,18 +1118,15 @@ static int ideapad_dytc_profile_init(struct ideapad_private *priv)
mutex_init(&priv->dytc->mutex);
priv->dytc->priv = priv;
- priv->dytc->pprof.profile_get = dytc_profile_get;
- priv->dytc->pprof.profile_set = dytc_profile_set;
-
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, priv->dytc->pprof.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, priv->dytc->pprof.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, priv->dytc->pprof.choices);
/* Create platform_profile structure and register */
- err = platform_profile_register(&priv->dytc->pprof);
- if (err)
+ priv->dytc->ppdev = devm_platform_profile_register(&priv->platform_device->dev,
+ "ideapad-laptop", &priv->dytc,
+ &dytc_profile_ops);
+ if (IS_ERR(priv->dytc->ppdev)) {
+ err = PTR_ERR(priv->dytc->ppdev);
goto pp_reg_failed;
+ }
/* Ensure initial values are correct */
dytc_profile_refresh(priv);
@@ -1134,7 +1146,6 @@ static void ideapad_dytc_profile_exit(struct ideapad_private *priv)
if (!priv->dytc)
return;
- platform_profile_remove();
mutex_destroy(&priv->dytc->mutex);
kfree(priv->dytc);
diff --git a/drivers/platform/x86/inspur_platform_profile.c b/drivers/platform/x86/inspur_platform_profile.c
index 8440defa6788..e02f5a55a6c5 100644
--- a/drivers/platform/x86/inspur_platform_profile.c
+++ b/drivers/platform/x86/inspur_platform_profile.c
@@ -32,7 +32,7 @@ enum inspur_tmp_profile {
struct inspur_wmi_priv {
struct wmi_device *wdev;
- struct platform_profile_handler handler;
+ struct device *ppdev;
};
static int inspur_wmi_perform_query(struct wmi_device *wdev,
@@ -84,11 +84,10 @@ out_free:
* 0x0: No Error
* 0x1: Error
*/
-static int inspur_platform_profile_set(struct platform_profile_handler *pprof,
+static int inspur_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- struct inspur_wmi_priv *priv = container_of(pprof, struct inspur_wmi_priv,
- handler);
+ struct inspur_wmi_priv *priv = dev_get_drvdata(dev);
u8 ret_code[4] = {0, 0, 0, 0};
int ret;
@@ -132,11 +131,10 @@ static int inspur_platform_profile_set(struct platform_profile_handler *pprof,
* 0x1: Performance Mode
* 0x2: Power Saver Mode
*/
-static int inspur_platform_profile_get(struct platform_profile_handler *pprof,
+static int inspur_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct inspur_wmi_priv *priv = container_of(pprof, struct inspur_wmi_priv,
- handler);
+ struct inspur_wmi_priv *priv = dev_get_drvdata(dev);
u8 ret_code[4] = {0, 0, 0, 0};
int ret;
@@ -166,6 +164,21 @@ static int inspur_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
+static int inspur_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops inspur_platform_profile_ops = {
+ .probe = inspur_platform_profile_probe,
+ .profile_get = inspur_platform_profile_get,
+ .profile_set = inspur_platform_profile_set,
+};
+
static int inspur_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct inspur_wmi_priv *priv;
@@ -177,19 +190,10 @@ static int inspur_wmi_probe(struct wmi_device *wdev, const void *context)
priv->wdev = wdev;
dev_set_drvdata(&wdev->dev, priv);
- priv->handler.profile_get = inspur_platform_profile_get;
- priv->handler.profile_set = inspur_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_LOW_POWER, priv->handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, priv->handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, priv->handler.choices);
+ priv->ppdev = devm_platform_profile_register(&wdev->dev, "inspur-wmi", priv,
+ &inspur_platform_profile_ops);
- return platform_profile_register(&priv->handler);
-}
-
-static void inspur_wmi_remove(struct wmi_device *wdev)
-{
- platform_profile_remove();
+ return PTR_ERR_OR_ZERO(priv->ppdev);
}
static const struct wmi_device_id inspur_wmi_id_table[] = {
@@ -206,7 +210,6 @@ static struct wmi_driver inspur_wmi_driver = {
},
.id_table = inspur_wmi_id_table,
.probe = inspur_wmi_probe,
- .remove = inspur_wmi_remove,
.no_singleton = true,
};
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index eb698dcb9af9..19a2246f2770 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -83,6 +83,7 @@ config INTEL_BXTWC_PMIC_TMU
config INTEL_BYTCRC_PWRSRC
tristate "Intel Bay Trail Crystal Cove power source driver"
depends on INTEL_SOC_PMIC
+ depends on POWER_SUPPLY
help
This option adds a power source driver for Crystal Cove PMICs
on Intel Bay Trail devices.
diff --git a/drivers/platform/x86/intel/bytcrc_pwrsrc.c b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
index 3edc2a9dab38..68ac040082df 100644
--- a/drivers/platform/x86/intel/bytcrc_pwrsrc.c
+++ b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
@@ -8,13 +8,22 @@
* Copyright (C) 2013 Intel Corporation
*/
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/debugfs.h>
+#include <linux/interrupt.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
#include <linux/regmap.h>
+#define CRYSTALCOVE_PWRSRC_IRQ 0x03
#define CRYSTALCOVE_SPWRSRC_REG 0x1E
+#define CRYSTALCOVE_SPWRSRC_USB BIT(0)
+#define CRYSTALCOVE_SPWRSRC_DC BIT(1)
+#define CRYSTALCOVE_SPWRSRC_BATTERY BIT(2)
#define CRYSTALCOVE_RESETSRC0_REG 0x20
#define CRYSTALCOVE_RESETSRC1_REG 0x21
#define CRYSTALCOVE_WAKESRC_REG 0x22
@@ -22,6 +31,7 @@
struct crc_pwrsrc_data {
struct regmap *regmap;
struct dentry *debug_dentry;
+ struct power_supply *psy;
unsigned int resetsrc0;
unsigned int resetsrc1;
unsigned int wakesrc;
@@ -118,13 +128,60 @@ static int crc_pwrsrc_read_and_clear(struct crc_pwrsrc_data *data,
return regmap_write(data->regmap, reg, *val);
}
+static irqreturn_t crc_pwrsrc_irq_handler(int irq, void *_data)
+{
+ struct crc_pwrsrc_data *data = _data;
+ unsigned int irq_mask;
+
+ if (regmap_read(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, &irq_mask))
+ return IRQ_NONE;
+
+ regmap_write(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, irq_mask);
+
+ power_supply_changed(data->psy);
+ return IRQ_HANDLED;
+}
+
+static int crc_pwrsrc_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct crc_pwrsrc_data *data = power_supply_get_drvdata(psy);
+ unsigned int pwrsrc;
+ int ret;
+
+ if (psp != POWER_SUPPLY_PROP_ONLINE)
+ return -EINVAL;
+
+ ret = regmap_read(data->regmap, CRYSTALCOVE_SPWRSRC_REG, &pwrsrc);
+ if (ret)
+ return ret;
+
+ val->intval = !!(pwrsrc & (CRYSTALCOVE_SPWRSRC_USB |
+ CRYSTALCOVE_SPWRSRC_DC));
+ return 0;
+}
+
+static const enum power_supply_property crc_pwrsrc_psy_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc crc_pwrsrc_psy_desc = {
+ .name = "crystal_cove_pwrsrc",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = crc_pwrsrc_psy_props,
+ .num_properties = ARRAY_SIZE(crc_pwrsrc_psy_props),
+ .get_property = crc_pwrsrc_psy_get_property,
+};
+
static int crc_pwrsrc_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
struct crc_pwrsrc_data *data;
- int ret;
+ int irq, ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -149,6 +206,24 @@ static int crc_pwrsrc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (device_property_read_bool(dev->parent, "linux,register-pwrsrc-power_supply")) {
+ struct power_supply_config psy_cfg = { .drv_data = data };
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ data->psy = devm_power_supply_register(dev, &crc_pwrsrc_psy_desc, &psy_cfg);
+ if (IS_ERR(data->psy))
+ return dev_err_probe(dev, PTR_ERR(data->psy), "registering power-supply\n");
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ crc_pwrsrc_irq_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting IRQ\n");
+ }
+
data->debug_dentry = debugfs_create_dir(KBUILD_MODNAME, NULL);
debugfs_create_file("pwrsrc", 0444, data->debug_dentry, data, &pwrsrc_fops);
debugfs_create_file("resetsrc", 0444, data->debug_dentry, data, &resetsrc_fops);
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 0cc80603a8a9..3b48cd7a4075 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -83,8 +83,12 @@ static void int0002_irq_ack(struct irq_data *data)
static void int0002_irq_unmask(struct irq_data *data)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
u32 gpe_en_reg;
+ gpiochip_enable_irq(gc, hwirq);
+
gpe_en_reg = inl(GPE0A_EN_PORT);
gpe_en_reg |= GPE0A_PME_B0_EN_BIT;
outl(gpe_en_reg, GPE0A_EN_PORT);
@@ -92,11 +96,15 @@ static void int0002_irq_unmask(struct irq_data *data)
static void int0002_irq_mask(struct irq_data *data)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
u32 gpe_en_reg;
gpe_en_reg = inl(GPE0A_EN_PORT);
gpe_en_reg &= ~GPE0A_PME_B0_EN_BIT;
outl(gpe_en_reg, GPE0A_EN_PORT);
+
+ gpiochip_disable_irq(gc, hwirq);
}
static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
@@ -140,12 +148,14 @@ static bool int0002_check_wake(void *data)
return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
}
-static struct irq_chip int0002_irqchip = {
+static const struct irq_chip int0002_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
.irq_mask = int0002_irq_mask,
.irq_unmask = int0002_irq_unmask,
.irq_set_wake = int0002_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
@@ -203,7 +213,7 @@ static int int0002_probe(struct platform_device *pdev)
}
girq = &chip->irq;
- girq->chip = &int0002_irqchip;
+ gpio_irq_chip_set_chip(girq, &int0002_irqchip);
/* This let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
index b3a2578e06c1..1638be8fa71e 100644
--- a/drivers/platform/x86/intel/int3472/common.c
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -70,6 +70,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
return -ENODEV;
}
+ dev_dbg(dev, "Sensor name %s\n", acpi_dev_name(sensor));
+
*name_ret = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
acpi_dev_name(sensor));
if (!*name_ret)
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index d881b2cfcdfc..31015ebe20d8 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -178,11 +178,11 @@ static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polar
* to create clocks and regulators via the usual frameworks.
*
* Return:
- * * 1 - To continue the loop
- * * 0 - When all resources found are handled properly.
- * * -EINVAL - If the resource is not a GPIO IO resource
- * * -ENODEV - If the resource has no corresponding _DSM entry
- * * -Other - Errors propagated from one of the sub-functions.
+ * * 1 - Continue the loop without adding a copy of the resource to
+ * * the list passed to acpi_dev_get_resources()
+ * * 0 - Continue the loop after adding a copy of the resource to
+ * * the list passed to acpi_dev_get_resources()
+ * * -errno - Error, break loop
*/
static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
void *data)
@@ -220,10 +220,10 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
int3472_get_func_and_polarity(type, &func, &polarity);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
- if (pin != agpio->pin_table[0])
- dev_warn(int3472->dev, "%s %s pin number mismatch _DSM %d resource %d\n",
- func, agpio->resource_source.string_ptr, pin,
- agpio->pin_table[0]);
+ /* Pin field is not really used under Windows and wraps around at 8 bits */
+ if (pin != (agpio->pin_table[0] & 0xff))
+ dev_dbg(int3472->dev, FW_BUG "%s %s pin number mismatch _DSM %d resource %d\n",
+ func, agpio->resource_source.string_ptr, pin, agpio->pin_table[0]);
active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
if (!active_value)
@@ -289,7 +289,8 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
if (ret < 0)
return dev_err_probe(int3472->dev, ret, err_msg);
- return ret;
+ /* Tell acpi_dev_get_resources() to not make a copy of the resource */
+ return 1;
}
static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
@@ -336,6 +337,9 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
struct int3472_cldb cldb;
int ret;
+ if (!adev)
+ return -ENODEV;
+
ret = skl_int3472_fill_cldb(adev, &cldb);
if (ret) {
dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index 1e107fd49f82..81ac4c691963 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -152,6 +152,9 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
int ret;
int i;
+ if (!adev)
+ return -ENODEV;
+
n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
if (n_consumers < 0)
return n_consumers;
diff --git a/drivers/platform/x86/intel/plr_tpmi.c b/drivers/platform/x86/intel/plr_tpmi.c
index 691d43c3592c..2b55347a5a93 100644
--- a/drivers/platform/x86/intel/plr_tpmi.c
+++ b/drivers/platform/x86/intel/plr_tpmi.c
@@ -262,7 +262,7 @@ static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxilia
struct resource *res;
struct tpmi_plr *plr;
void __iomem *base;
- char name[16];
+ char name[17];
int err;
plat_info = tpmi_get_platform_data(auxdev);
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 3e7f99ac8c94..10f04b944117 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -22,6 +22,7 @@
#include <linux/suspend.h>
#include <linux/units.h>
+#include <asm/cpuid.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/msr.h>
@@ -935,13 +936,13 @@ static unsigned int pmc_core_get_crystal_freq(void)
{
unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
- if (boot_cpu_data.cpuid_level < 0x15)
+ if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
return 0;
eax_denominator = ebx_numerator = ecx_hz = edx = 0;
- /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
- cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
+ /* TSC/Crystal ratio, plus optionally Crystal Hz */
+ cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
if (ebx_numerator == 0 || eax_denominator == 0)
return 0;
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 8ed54b7a3333..7233b654bbad 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -81,7 +81,7 @@ EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio, "INTEL_PMT");
*/
static ssize_t
intel_pmt_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct intel_pmt_entry *entry = container_of(attr,
@@ -308,7 +308,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
entry->pmt_bin_attr.attr.name = ns->name;
entry->pmt_bin_attr.attr.mode = 0440;
entry->pmt_bin_attr.mmap = intel_pmt_mmap;
- entry->pmt_bin_attr.read = intel_pmt_read;
+ entry->pmt_bin_attr.read_new = intel_pmt_read;
entry->pmt_bin_attr.size = entry->size;
ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index cd0ba84cc8e4..bafac8aa2baf 100644
--- a/drivers/platform/x86/intel/punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -131,39 +131,6 @@ static int intel_punit_ipc_check_status(IPC_DEV *ipcdev, IPC_TYPE type)
}
/**
- * intel_punit_ipc_simple_command() - Simple IPC command
- * @cmd: IPC command code.
- * @para1: First 8bit parameter, set 0 if not used.
- * @para2: Second 8bit parameter, set 0 if not used.
- *
- * Send a IPC command to P-Unit when there is no data transaction
- *
- * Return: IPC error code or 0 on success.
- */
-int intel_punit_ipc_simple_command(int cmd, int para1, int para2)
-{
- IPC_DEV *ipcdev = punit_ipcdev;
- IPC_TYPE type;
- u32 val;
- int ret;
-
- mutex_lock(&ipcdev->lock);
-
- reinit_completion(&ipcdev->cmd_complete);
- type = (cmd & IPC_PUNIT_CMD_TYPE_MASK) >> IPC_TYPE_OFFSET;
-
- val = cmd & ~IPC_PUNIT_CMD_TYPE_MASK;
- val |= CMD_RUN | para2 << CMD_PARA2_SHIFT | para1 << CMD_PARA1_SHIFT;
- ipc_write_cmd(ipcdev, type, val);
- ret = intel_punit_ipc_check_status(ipcdev, type);
-
- mutex_unlock(&ipcdev->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(intel_punit_ipc_simple_command);
-
-/**
* intel_punit_ipc_command() - IPC command with data and pointers
* @cmd: IPC command code.
* @para1: First 8bit parameter, set 0 if not used.
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
index 33f33b1070fd..30d1c2caf984 100644
--- a/drivers/platform/x86/intel/sdsi.c
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -398,8 +398,8 @@ free_payload:
}
static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -409,11 +409,11 @@ static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
}
-static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
+static const BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -423,7 +423,7 @@ static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
}
-static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
+static const BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
static ssize_t
certificate_read(u64 command, u64 control_flags, struct sdsi_priv *priv,
@@ -469,7 +469,7 @@ free_buffer:
static ssize_t
state_certificate_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -477,11 +477,11 @@ state_certificate_read(struct file *filp, struct kobject *kobj,
return certificate_read(SDSI_CMD_READ_STATE, 0, priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
static ssize_t
meter_certificate_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -489,11 +489,11 @@ meter_certificate_read(struct file *filp, struct kobject *kobj,
return certificate_read(SDSI_CMD_READ_METER, 0, priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
static ssize_t
meter_current_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -502,11 +502,11 @@ meter_current_read(struct file *filp, struct kobject *kobj,
return certificate_read(SDSI_CMD_READ_METER, CTRL_METER_ENABLE_DRAM,
priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
static ssize_t registers_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -528,9 +528,9 @@ static ssize_t registers_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
+static const BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
-static struct bin_attribute *sdsi_bin_attrs[] = {
+static const struct bin_attribute *const sdsi_bin_attrs[] = {
&bin_attr_registers,
&bin_attr_state_certificate,
&bin_attr_meter_certificate,
@@ -576,7 +576,7 @@ static struct attribute *sdsi_attrs[] = {
static const struct attribute_group sdsi_group = {
.attrs = sdsi_attrs,
- .bin_attrs = sdsi_bin_attrs,
+ .bin_attrs_new = sdsi_bin_attrs,
.is_bin_visible = sdsi_battr_is_visible,
};
__ATTRIBUTE_GROUPS(sdsi);
diff --git a/drivers/platform/x86/lenovo-wmi-camera.c b/drivers/platform/x86/lenovo-wmi-camera.c
index 0c0bedaf7407..eb60fb9a5b3f 100644
--- a/drivers/platform/x86/lenovo-wmi-camera.c
+++ b/drivers/platform/x86/lenovo-wmi-camera.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/wmi.h>
+#include <linux/cleanup.h>
#define WMI_LENOVO_CAMERABUTTON_EVENT_GUID "50C76F1F-D8E4-D895-0A3D-62F4EA400013"
@@ -26,10 +27,38 @@ enum {
SW_CAMERA_ON = 1,
};
+static int camera_shutter_input_setup(struct wmi_device *wdev, u8 camera_mode)
+{
+ struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ int err;
+
+ priv->idev = input_allocate_device();
+ if (!priv->idev)
+ return -ENOMEM;
+
+ priv->idev->name = "Lenovo WMI Camera Button";
+ priv->idev->phys = "wmi/input0";
+ priv->idev->id.bustype = BUS_HOST;
+ priv->idev->dev.parent = &wdev->dev;
+
+ input_set_capability(priv->idev, EV_SW, SW_CAMERA_LENS_COVER);
+
+ input_report_switch(priv->idev, SW_CAMERA_LENS_COVER,
+ camera_mode == SW_CAMERA_ON ? 0 : 1);
+ input_sync(priv->idev);
+
+ err = input_register_device(priv->idev);
+ if (err) {
+ input_free_device(priv->idev);
+ priv->idev = NULL;
+ }
+
+ return err;
+}
+
static void lenovo_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
{
struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
- unsigned int keycode;
u8 camera_mode;
if (obj->type != ACPI_TYPE_BUFFER) {
@@ -53,22 +82,24 @@ static void lenovo_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
return;
}
- mutex_lock(&priv->notify_lock);
+ guard(mutex)(&priv->notify_lock);
- keycode = camera_mode == SW_CAMERA_ON ?
- KEY_CAMERA_ACCESS_ENABLE : KEY_CAMERA_ACCESS_DISABLE;
- input_report_key(priv->idev, keycode, 1);
- input_sync(priv->idev);
- input_report_key(priv->idev, keycode, 0);
- input_sync(priv->idev);
+ if (!priv->idev) {
+ if (camera_shutter_input_setup(wdev, camera_mode))
+ dev_warn(&wdev->dev, "Failed to register input device\n");
+ return;
+ }
- mutex_unlock(&priv->notify_lock);
+ if (camera_mode == SW_CAMERA_ON)
+ input_report_switch(priv->idev, SW_CAMERA_LENS_COVER, 0);
+ else
+ input_report_switch(priv->idev, SW_CAMERA_LENS_COVER, 1);
+ input_sync(priv->idev);
}
static int lenovo_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct lenovo_wmi_priv *priv;
- int ret;
priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -76,21 +107,6 @@ static int lenovo_wmi_probe(struct wmi_device *wdev, const void *context)
dev_set_drvdata(&wdev->dev, priv);
- priv->idev = devm_input_allocate_device(&wdev->dev);
- if (!priv->idev)
- return -ENOMEM;
-
- priv->idev->name = "Lenovo WMI Camera Button";
- priv->idev->phys = "wmi/input0";
- priv->idev->id.bustype = BUS_HOST;
- priv->idev->dev.parent = &wdev->dev;
- input_set_capability(priv->idev, EV_KEY, KEY_CAMERA_ACCESS_ENABLE);
- input_set_capability(priv->idev, EV_KEY, KEY_CAMERA_ACCESS_DISABLE);
-
- ret = input_register_device(priv->idev);
- if (ret)
- return ret;
-
mutex_init(&priv->notify_lock);
return 0;
@@ -100,6 +116,9 @@ static void lenovo_wmi_remove(struct wmi_device *wdev)
{
struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ if (priv->idev)
+ input_unregister_device(priv->idev);
+
mutex_destroy(&priv->notify_lock);
}
diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
index d2699ca24f34..a96b215cd2c5 100644
--- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
@@ -199,14 +199,15 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
if (ret)
return ret;
+ serdev_device_set_drvdata(serdev, fc);
+ serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
serdev_device_set_baudrate(serdev, 600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, fc);
- serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
ret = devm_extcon_register_notifier_all(dev, fc->extcon, &fc->nb);
if (ret)
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index e5391a37014d..c4b150fa093f 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -806,8 +806,8 @@ static void msi_send_touchpad_key(struct work_struct *ignored)
}
static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
-static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
{
static bool extended;
@@ -996,7 +996,7 @@ static int __init load_scm_model_init(struct platform_device *sdev)
if (result)
goto fail_input;
- result = i8042_install_filter(msi_laptop_i8042_filter);
+ result = i8042_install_filter(msi_laptop_i8042_filter, NULL);
if (result) {
pr_err("Unable to install key filter\n");
goto fail_filter;
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 22ca70eb8227..2987b4db6009 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -260,7 +260,7 @@ struct pcc_acpi {
* keypress events over the PS/2 kbd interface, filter these out.
*/
static bool panasonic_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
static bool extended;
@@ -1100,7 +1100,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pcc->platform = NULL;
}
- i8042_install_filter(panasonic_i8042_filter);
+ i8042_install_filter(panasonic_i8042_filter, NULL);
return 0;
out_platform:
diff --git a/drivers/platform/x86/quickstart.c b/drivers/platform/x86/quickstart.c
index 8d540a1c8602..c332c7cdaff5 100644
--- a/drivers/platform/x86/quickstart.c
+++ b/drivers/platform/x86/quickstart.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeup.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
index bcf3a0c356ea..57eac75805e2 100644
--- a/drivers/platform/x86/serdev_helpers.h
+++ b/drivers/platform/x86/serdev_helpers.h
@@ -22,32 +22,14 @@
#include <linux/string.h>
static inline struct device *
-get_serdev_controller(const char *serial_ctrl_hid,
- const char *serial_ctrl_uid,
- int serial_ctrl_port,
- const char *serdev_ctrl_name)
+get_serdev_controller_from_parent(struct device *ctrl_dev,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
{
- struct device *ctrl_dev, *child;
- struct acpi_device *ctrl_adev;
+ struct device *child;
char name[32];
int i;
- ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
- if (!ctrl_adev) {
- pr_err("error could not get %s/%s serial-ctrl adev\n",
- serial_ctrl_hid, serial_ctrl_uid);
- return ERR_PTR(-ENODEV);
- }
-
- /* get_first_physical_node() returns a weak ref */
- ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s serial-ctrl physical node\n",
- serial_ctrl_hid, serial_ctrl_uid);
- ctrl_dev = ERR_PTR(-ENODEV);
- goto put_ctrl_adev;
- }
-
/* Walk host -> uart-ctrl -> port -> serdev-ctrl */
for (i = 0; i < 3; i++) {
switch (i) {
@@ -67,14 +49,40 @@ get_serdev_controller(const char *serial_ctrl_hid,
put_device(ctrl_dev);
if (!child) {
pr_err("error could not find '%s' device\n", name);
- ctrl_dev = ERR_PTR(-ENODEV);
- goto put_ctrl_adev;
+ return ERR_PTR(-ENODEV);
}
ctrl_dev = child;
}
-put_ctrl_adev:
- acpi_dev_put(ctrl_adev);
return ctrl_dev;
}
+
+static inline struct device *
+get_serdev_controller(const char *serial_ctrl_hid,
+ const char *serial_ctrl_uid,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
+{
+ struct acpi_device *adev;
+ struct device *parent;
+
+ adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ if (!adev) {
+ pr_err("error could not get %s/%s serial-ctrl adev\n",
+ serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* get_first_physical_node() returns a weak ref */
+ parent = get_device(acpi_get_first_physical_node(adev));
+ acpi_dev_put(adev);
+ if (!parent) {
+ pr_err("error could not get %s/%s serial-ctrl physical node\n",
+ serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* This puts our reference on parent and returns a ref on the ctrl */
+ return get_serdev_controller_from_parent(parent, serial_ctrl_port, serdev_ctrl_name);
+}
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index ed6b28505cd6..db030b0f176a 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -384,6 +384,17 @@ static const struct smi_node cs35l57_hda = {
.bus_type = SMI_AUTO_DETECT,
};
+static const struct smi_node tas2781_hda = {
+ .instances = {
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
/*
* Note new device-ids must also be added to ignore_serial_bus_ids in
* drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
@@ -396,6 +407,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
{ "CSC3556", (unsigned long)&cs35l56_hda },
{ "CSC3557", (unsigned long)&cs35l57_hda },
{ "INT3515", (unsigned long)&int3515_data },
+ { "TXNW2781", (unsigned long)&tas2781_hda },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
{ "CLSA0101", (unsigned long)&cs35l41_hda },
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 38de0cb20d77..323316ac6783 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -194,7 +194,6 @@ static const char * const level_options[] = {
[TLMI_LEVEL_MASTER] = "master",
};
static struct think_lmi tlmi_priv;
-static const struct class *fw_attr_class;
static DEFINE_MUTEX(tlmi_mutex);
static inline struct tlmi_pwd_setting *to_tlmi_pwd_setting(struct kobject *kobj)
@@ -1446,11 +1445,7 @@ static int tlmi_sysfs_init(void)
{
int i, ret;
- ret = fw_attributes_class_get(&fw_attr_class);
- if (ret)
- return ret;
-
- tlmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ tlmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
NULL, "%s", "thinklmi");
if (IS_ERR(tlmi_priv.class_dev)) {
ret = PTR_ERR(tlmi_priv.class_dev);
@@ -1563,9 +1558,8 @@ static int tlmi_sysfs_init(void)
fail_create_attr:
tlmi_release_attr();
fail_device_created:
- device_destroy(fw_attr_class, MKDEV(0, 0));
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
fail_class_created:
- fw_attributes_class_put();
return ret;
}
@@ -1788,8 +1782,7 @@ fail_clear_attr:
static void tlmi_remove(struct wmi_device *wdev)
{
tlmi_release_attr();
- device_destroy(fw_attr_class, MKDEV(0, 0));
- fw_attributes_class_put();
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
}
static int tlmi_probe(struct wmi_device *wdev, const void *context)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 2cfb2ac3f465..1fcb0f99695a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -963,6 +963,7 @@ static const struct proc_ops dispatch_proc_ops = {
static struct platform_device *tpacpi_pdev;
static struct platform_device *tpacpi_sensors_pdev;
static struct device *tpacpi_hwmon;
+static struct device *tpacpi_pprof;
static struct input_dev *tpacpi_inputdev;
static struct mutex tpacpi_inputdev_send_mutex;
static LIST_HEAD(tpacpi_all_drivers);
@@ -3275,6 +3276,7 @@ static const struct key_entry keymap_lenovo[] __initconst = {
* scancodes to preserve uAPI compatibility, see tpacpi_input_send_key().
*/
{ KE_KEY, 0x131d, { KEY_VENDOR } }, /* System debug info, similar to old ThinkPad key */
+ { KE_KEY, 0x1320, { KEY_LINK_PHONE } },
{ KE_KEY, TP_HKEY_EV_TRACK_DOUBLETAP /* 0x8036 */, { KEY_PROG4 } },
{ KE_END }
};
@@ -10415,7 +10417,7 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
* dytc_profile_get: Function to register with platform_profile
* handler. Returns current platform profile.
*/
-static int dytc_profile_get(struct platform_profile_handler *pprof,
+static int dytc_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
*profile = dytc_current_profile;
@@ -10490,7 +10492,7 @@ static int dytc_cql_command(int command, int *output)
* dytc_profile_set: Function to register with platform_profile
* handler. Sets current platform profile.
*/
-static int dytc_profile_set(struct platform_profile_handler *pprof,
+static int dytc_profile_set(struct device *dev,
enum platform_profile_option profile)
{
int perfmode;
@@ -10539,6 +10541,21 @@ unlock:
return err;
}
+static int dytc_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops dytc_profile_ops = {
+ .probe = dytc_profile_probe,
+ .profile_get = dytc_profile_get,
+ .profile_set = dytc_profile_set,
+};
+
static void dytc_profile_refresh(void)
{
enum platform_profile_option profile;
@@ -10567,24 +10584,14 @@ static void dytc_profile_refresh(void)
err = convert_dytc_to_profile(funcmode, perfmode, &profile);
if (!err && profile != dytc_current_profile) {
dytc_current_profile = profile;
- platform_profile_notify();
+ platform_profile_notify(tpacpi_pprof);
}
}
-static struct platform_profile_handler dytc_profile = {
- .profile_get = dytc_profile_get,
- .profile_set = dytc_profile_set,
-};
-
static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
{
int err, output;
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, dytc_profile.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, dytc_profile.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, dytc_profile.choices);
-
err = dytc_command(DYTC_CMD_QUERY, &output);
if (err)
return err;
@@ -10639,12 +10646,13 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
"DYTC version %d: thermal mode available\n", dytc_version);
/* Create platform_profile structure and register */
- err = platform_profile_register(&dytc_profile);
+ tpacpi_pprof = devm_platform_profile_register(&tpacpi_pdev->dev, "thinkpad-acpi",
+ NULL, &dytc_profile_ops);
/*
* If for some reason platform_profiles aren't enabled
* don't quit terminally.
*/
- if (err)
+ if (IS_ERR(tpacpi_pprof))
return -ENODEV;
/* Ensure initial values are correct */
@@ -10657,14 +10665,8 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
return 0;
}
-static void dytc_profile_exit(void)
-{
- platform_profile_remove();
-}
-
static struct ibm_struct dytc_profile_driver_data = {
.name = "dytc-profile",
- .exit = dytc_profile_exit,
};
/*************************************************************************
@@ -11681,7 +11683,7 @@ static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
if (strcmp(ibm->name, kp->name) == 0 && ibm->write) {
if (strlen(val) > sizeof(ibms_init[i].param) - 1)
return -ENOSPC;
- strcpy(ibms_init[i].param, val);
+ strscpy(ibms_init[i].param, val);
return 0;
}
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 78a5aac2dcfd..5ad3a7183d33 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2755,7 +2755,7 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
}
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
if (str & I8042_STR_AUXDATA)
return false;
@@ -2915,7 +2915,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
- error = i8042_install_filter(toshiba_acpi_i8042_filter);
+ error = i8042_install_filter(toshiba_acpi_i8042_filter, NULL);
if (error) {
pr_err("Error installing key filter\n");
goto err_free_dev;
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
index df6f0ae6e6c7..3e33da36da8a 100644
--- a/drivers/platform/x86/wmi-bmof.c
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -20,66 +20,66 @@
#define WMI_BMOF_GUID "05901221-D566-11D1-B2F0-00A0C9062910"
-struct bmof_priv {
- union acpi_object *bmofdata;
- struct bin_attribute bmof_bin_attr;
-};
-
-static ssize_t read_bmof(struct file *filp, struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t bmof_read(struct file *filp, struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
- struct bmof_priv *priv = container_of(attr, struct bmof_priv, bmof_bin_attr);
+ struct device *dev = kobj_to_dev(kobj);
+ union acpi_object *obj = dev_get_drvdata(dev);
- return memory_read_from_buffer(buf, count, &off, priv->bmofdata->buffer.pointer,
- priv->bmofdata->buffer.length);
+ return memory_read_from_buffer(buf, count, &off, obj->buffer.pointer, obj->buffer.length);
}
-static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
+static const BIN_ATTR_ADMIN_RO(bmof, 0);
+
+static const struct bin_attribute * const bmof_attrs[] = {
+ &bin_attr_bmof,
+ NULL
+};
+
+static size_t bmof_bin_size(struct kobject *kobj, const struct bin_attribute *attr, int n)
{
- struct bmof_priv *priv;
- int ret;
+ struct device *dev = kobj_to_dev(kobj);
+ union acpi_object *obj = dev_get_drvdata(dev);
+
+ return obj->buffer.length;
+}
- priv = devm_kzalloc(&wdev->dev, sizeof(struct bmof_priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+static const struct attribute_group bmof_group = {
+ .bin_size = bmof_bin_size,
+ .bin_attrs_new = bmof_attrs,
+};
+
+static const struct attribute_group *bmof_groups[] = {
+ &bmof_group,
+ NULL
+};
- dev_set_drvdata(&wdev->dev, priv);
+static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
+{
+ union acpi_object *obj;
- priv->bmofdata = wmidev_block_query(wdev, 0);
- if (!priv->bmofdata) {
+ obj = wmidev_block_query(wdev, 0);
+ if (!obj) {
dev_err(&wdev->dev, "failed to read Binary MOF\n");
return -EIO;
}
- if (priv->bmofdata->type != ACPI_TYPE_BUFFER) {
+ if (obj->type != ACPI_TYPE_BUFFER) {
dev_err(&wdev->dev, "Binary MOF is not a buffer\n");
- ret = -EIO;
- goto err_free;
+ kfree(obj);
+ return -EIO;
}
- sysfs_bin_attr_init(&priv->bmof_bin_attr);
- priv->bmof_bin_attr.attr.name = "bmof";
- priv->bmof_bin_attr.attr.mode = 0400;
- priv->bmof_bin_attr.read = read_bmof;
- priv->bmof_bin_attr.size = priv->bmofdata->buffer.length;
-
- ret = device_create_bin_file(&wdev->dev, &priv->bmof_bin_attr);
- if (ret)
- goto err_free;
+ dev_set_drvdata(&wdev->dev, obj);
return 0;
-
- err_free:
- kfree(priv->bmofdata);
- return ret;
}
static void wmi_bmof_remove(struct wmi_device *wdev)
{
- struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
+ union acpi_object *obj = dev_get_drvdata(&wdev->dev);
- device_remove_bin_file(&wdev->dev, &priv->bmof_bin_attr);
- kfree(priv->bmofdata);
+ kfree(obj);
}
static const struct wmi_device_id wmi_bmof_id_table[] = {
@@ -90,6 +90,7 @@ static const struct wmi_device_id wmi_bmof_id_table[] = {
static struct wmi_driver wmi_bmof_driver = {
.driver = {
.name = "wmi-bmof",
+ .dev_groups = bmof_groups,
},
.probe = wmi_bmof_probe,
.remove = wmi_bmof_remove,
diff --git a/drivers/platform/x86/x86-android-tablets/Makefile b/drivers/platform/x86/x86-android-tablets/Makefile
index 41ece5a37137..313be30548bc 100644
--- a/drivers/platform/x86/x86-android-tablets/Makefile
+++ b/drivers/platform/x86/x86-android-tablets/Makefile
@@ -3,7 +3,7 @@
# X86 Android tablet support Makefile
#
+obj-$(CONFIG_X86_ANDROID_TABLETS) += vexia_atla10_ec.o
obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets.o
-
x86-android-tablets-y := core.o dmi.o shared-psy-info.o \
asus.o lenovo.o other.o
diff --git a/drivers/platform/x86/x86-android-tablets/asus.c b/drivers/platform/x86/x86-android-tablets/asus.c
index 07fbeab2319a..7dde63b9943f 100644
--- a/drivers/platform/x86/x86-android-tablets/asus.c
+++ b/drivers/platform/x86/x86-android-tablets/asus.c
@@ -145,8 +145,8 @@ static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst =
static const struct x86_serdev_info asus_me176c_serdevs[] __initconst = {
{
- .ctrl_hid = "80860F0A",
- .ctrl_uid = "2",
+ .ctrl.acpi.hid = "80860F0A",
+ .ctrl.acpi.uid = "2",
.ctrl_devname = "serial0",
.serdev_hid = "BCM2E3A",
},
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index 4218afcec0e9..2a9c47178505 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -157,7 +157,7 @@ static struct gpiod_lookup_table * const *gpiod_lookup_tables;
static const struct software_node *bat_swnode;
static void (*exit_handler)(void);
-static struct i2c_adapter *
+static __init struct i2c_adapter *
get_i2c_adap_by_handle(const struct x86_i2c_client_info *client_info)
{
acpi_handle handle;
@@ -177,7 +177,7 @@ static __init int match_parent(struct device *dev, const void *data)
return dev->parent == data;
}
-static struct i2c_adapter *
+static __init struct i2c_adapter *
get_i2c_adap_by_pci_parent(const struct x86_i2c_client_info *client_info)
{
struct i2c_adapter *adap = NULL;
@@ -212,7 +212,7 @@ static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info
if (board_info.irq < 0)
return board_info.irq;
- if (dev_info->use_pci_devname)
+ if (dev_info->use_pci)
adap = get_i2c_adap_by_pci_parent(client_info);
else
adap = get_i2c_adap_by_handle(client_info);
@@ -271,15 +271,32 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i
return 0;
}
-static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
+static __init struct device *
+get_serdev_controller_by_pci_parent(const struct x86_serdev_info *info)
{
+ struct pci_dev *pdev;
+
+ pdev = pci_get_domain_bus_and_slot(0, 0, info->ctrl.pci.devfn);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ /* This puts our reference on pdev and returns a ref on the ctrl */
+ return get_serdev_controller_from_parent(&pdev->dev, 0, info->ctrl_devname);
+}
+
+static __init int x86_instantiate_serdev(const struct x86_dev_info *dev_info, int idx)
+{
+ const struct x86_serdev_info *info = &dev_info->serdev_info[idx];
struct acpi_device *serdev_adev;
struct serdev_device *serdev;
struct device *ctrl_dev;
int ret = -ENODEV;
- ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0,
- info->ctrl_devname);
+ if (dev_info->use_pci)
+ ctrl_dev = get_serdev_controller_by_pci_parent(info);
+ else
+ ctrl_dev = get_serdev_controller(info->ctrl.acpi.hid, info->ctrl.acpi.uid,
+ 0, info->ctrl_devname);
if (IS_ERR(ctrl_dev))
return PTR_ERR(ctrl_dev);
@@ -446,7 +463,7 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev)
serdev_count = dev_info->serdev_count;
for (i = 0; i < serdev_count; i++) {
- ret = x86_instantiate_serdev(&dev_info->serdev_info[i], i);
+ ret = x86_instantiate_serdev(dev_info, i);
if (ret < 0) {
x86_android_tablet_remove(pdev);
return ret;
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index ae087f1471c1..1241a97cda39 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -178,8 +178,8 @@ static const struct platform_device_info lenovo_yb1_x90_pdevs[] __initconst = {
*/
static const struct x86_serdev_info lenovo_yb1_x90_serdevs[] __initconst = {
{
- .ctrl_hid = "8086228A",
- .ctrl_uid = "1",
+ .ctrl.acpi.hid = "8086228A",
+ .ctrl.acpi.uid = "1",
.ctrl_devname = "serial0",
.serdev_hid = "BCM2E1A",
},
@@ -601,7 +601,7 @@ static const struct regulator_init_data lenovo_yoga_tab2_1380_bq24190_vbus_init_
.num_consumer_supplies = 1,
};
-struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
+static struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
.regulator_init_data = &lenovo_yoga_tab2_1380_bq24190_vbus_init_data,
};
@@ -726,7 +726,7 @@ static const struct platform_device_info lenovo_yoga_tab2_1380_pdevs[] __initcon
},
};
-const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
+static const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
"bq24190_charger", /* For the Vbus regulator for lc824206xa */
NULL
};
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index 735df818f76b..1d93d9edb23f 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -602,14 +602,14 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
* Vexia EDU ATLA 10 tablet, Android 4.2 / 4.4 + Guadalinex Ubuntu tablet
* distributed to schools in the Spanish Andalucía region.
*/
-const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
+static const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
static const struct property_entry vexia_edu_atla10_ulpmc_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", crystal_cove_pwrsrc_psy),
{ }
};
-const struct software_node vexia_edu_atla10_ulpmc_node = {
+static const struct software_node vexia_edu_atla10_ulpmc_node = {
.properties = vexia_edu_atla10_ulpmc_props,
};
@@ -715,6 +715,14 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
}
};
+static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = {
+ {
+ .ctrl.pci.devfn = PCI_DEVFN(0x1e, 3),
+ .ctrl_devname = "serial0",
+ .serdev_hid = "OBDA8723",
+ },
+};
+
static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = {
.dev_id = "i2c-FTSC1000",
.table = {
@@ -755,9 +763,11 @@ static int __init vexia_edu_atla10_init(struct device *dev)
const struct x86_dev_info vexia_edu_atla10_info __initconst = {
.i2c_client_info = vexia_edu_atla10_i2c_clients,
.i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_i2c_clients),
+ .serdev_info = vexia_edu_atla10_serdevs,
+ .serdev_count = ARRAY_SIZE(vexia_edu_atla10_serdevs),
.gpiod_lookup_tables = vexia_edu_atla10_gpios,
.init = vexia_edu_atla10_init,
- .use_pci_devname = true,
+ .use_pci = true,
};
/*
diff --git a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
new file mode 100644
index 000000000000..5d02af1c5aaa
--- /dev/null
+++ b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * power_supply class (battery) driver for the I2C attached embedded controller
+ * found on Vexia EDU ATLA 10 (9V version) tablets.
+ *
+ * This is based on the ACPI Battery device in the DSDT which should work
+ * expect that it expects the I2C controller to be enumerated as an ACPI
+ * device and the tablet's BIOS enumerates all LPSS devices as PCI devices
+ * (and changing the LPSS BIOS settings from PCI -> ACPI does not work).
+ *
+ * Copyright (c) 2024 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/bits.h>
+#include <linux/devm-helpers.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <asm/byteorder.h>
+
+/* State field uses ACPI Battery spec status bits */
+#define ACPI_BATTERY_STATE_DISCHARGING BIT(0)
+#define ACPI_BATTERY_STATE_CHARGING BIT(1)
+
+#define ATLA10_EC_BATTERY_STATE_COMMAND 0x87
+#define ATLA10_EC_BATTERY_INFO_COMMAND 0x88
+
+/* From broken ACPI battery device in DSDT */
+#define ATLA10_EC_VOLTAGE_MIN_DESIGN_uV 3750000
+
+/* Update data every 5 seconds */
+#define UPDATE_INTERVAL_JIFFIES (5 * HZ)
+
+struct atla10_ec_battery_state {
+ u8 status; /* Using ACPI Battery spec status bits */
+ u8 capacity; /* Percent */
+ __le16 charge_now_mAh;
+ __le16 voltage_now_mV;
+ __le16 current_now_mA;
+ __le16 charge_full_mAh;
+ __le16 temp; /* centi degrees Celsius */
+} __packed;
+
+struct atla10_ec_battery_info {
+ __le16 charge_full_design_mAh;
+ __le16 voltage_now_mV; /* Should be design voltage, but is not ? */
+ __le16 charge_full_design2_mAh;
+} __packed;
+
+struct atla10_ec_data {
+ struct i2c_client *client;
+ struct power_supply *psy;
+ struct delayed_work work;
+ struct mutex update_lock;
+ struct atla10_ec_battery_info info;
+ struct atla10_ec_battery_state state;
+ bool valid; /* true if state is valid */
+ unsigned long last_update; /* In jiffies */
+};
+
+static int atla10_ec_cmd(struct atla10_ec_data *data, u8 cmd, u8 len, u8 *values)
+{
+ struct device *dev = &data->client->dev;
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+ int ret;
+
+ ret = i2c_smbus_read_block_data(data->client, cmd, buf);
+ if (ret != len) {
+ dev_err(dev, "I2C command 0x%02x error: %d\n", cmd, ret);
+ return -EIO;
+ }
+
+ memcpy(values, buf, len);
+ return 0;
+}
+
+static int atla10_ec_update(struct atla10_ec_data *data)
+{
+ int ret;
+
+ if (data->valid && time_before(jiffies, data->last_update + UPDATE_INTERVAL_JIFFIES))
+ return 0;
+
+ ret = atla10_ec_cmd(data, ATLA10_EC_BATTERY_STATE_COMMAND,
+ sizeof(data->state), (u8 *)&data->state);
+ if (ret)
+ return ret;
+
+ data->last_update = jiffies;
+ data->valid = true;
+ return 0;
+}
+
+static int atla10_ec_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct atla10_ec_data *data = power_supply_get_drvdata(psy);
+ int charge_now_mAh, charge_full_mAh, ret;
+
+ guard(mutex)(&data->update_lock);
+
+ ret = atla10_ec_update(data);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (data->state.status & ACPI_BATTERY_STATE_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (data->state.status & ACPI_BATTERY_STATE_CHARGING)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (data->state.capacity == 100)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = data->state.capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ /*
+ * The EC has a bug where it reports charge-full-design as
+ * charge-now when the battery is full. Clamp charge-now to
+ * charge-full to workaround this.
+ */
+ charge_now_mAh = le16_to_cpu(data->state.charge_now_mAh);
+ charge_full_mAh = le16_to_cpu(data->state.charge_full_mAh);
+ val->intval = min(charge_now_mAh, charge_full_mAh) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = le16_to_cpu(data->state.voltage_now_mV) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = le16_to_cpu(data->state.current_now_mA) * 1000;
+ /*
+ * Documentation/ABI/testing/sysfs-class-power specifies
+ * negative current for discharging.
+ */
+ if (data->state.status & ACPI_BATTERY_STATE_DISCHARGING)
+ val->intval = -val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = le16_to_cpu(data->state.charge_full_mAh) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = le16_to_cpu(data->state.temp) / 10;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = le16_to_cpu(data->info.charge_full_design_mAh) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = ATLA10_EC_VOLTAGE_MIN_DESIGN_uV;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void atla10_ec_external_power_changed_work(struct work_struct *work)
+{
+ struct atla10_ec_data *data = container_of(work, struct atla10_ec_data, work.work);
+
+ dev_dbg(&data->client->dev, "External power changed\n");
+ data->valid = false;
+ power_supply_changed(data->psy);
+}
+
+static void atla10_ec_external_power_changed(struct power_supply *psy)
+{
+ struct atla10_ec_data *data = power_supply_get_drvdata(psy);
+
+ /* After charger plug in/out wait 0.5s for things to stabilize */
+ mod_delayed_work(system_wq, &data->work, HZ / 2);
+}
+
+static const enum power_supply_property atla10_ec_psy_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
+static const struct power_supply_desc atla10_ec_psy_desc = {
+ .name = "atla10_ec_battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = atla10_ec_psy_props,
+ .num_properties = ARRAY_SIZE(atla10_ec_psy_props),
+ .get_property = atla10_ec_psy_get_property,
+ .external_power_changed = atla10_ec_external_power_changed,
+};
+
+static int atla10_ec_probe(struct i2c_client *client)
+{
+ struct power_supply_config psy_cfg = { };
+ struct device *dev = &client->dev;
+ struct atla10_ec_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ psy_cfg.drv_data = data;
+ data->client = client;
+
+ ret = devm_mutex_init(dev, &data->update_lock);
+ if (ret)
+ return ret;
+
+ ret = devm_delayed_work_autocancel(dev, &data->work,
+ atla10_ec_external_power_changed_work);
+ if (ret)
+ return ret;
+
+ ret = atla10_ec_cmd(data, ATLA10_EC_BATTERY_INFO_COMMAND,
+ sizeof(data->info), (u8 *)&data->info);
+ if (ret)
+ return ret;
+
+ data->psy = devm_power_supply_register(dev, &atla10_ec_psy_desc, &psy_cfg);
+ return PTR_ERR_OR_ZERO(data->psy);
+}
+
+static const struct i2c_device_id atla10_ec_id_table[] = {
+ { "vexia_atla10_ec" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, atla10_ec_id_table);
+
+static struct i2c_driver atla10_ec_driver = {
+ .driver = {
+ .name = "vexia_atla10_ec",
+ },
+ .probe = atla10_ec_probe,
+ .id_table = atla10_ec_id_table,
+};
+module_i2c_driver(atla10_ec_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Battery driver for Vexia EDU ATLA 10 tablet EC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 0fc7e8cff672..63a38a0069ba 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -57,8 +57,15 @@ struct x86_spi_dev_info {
};
struct x86_serdev_info {
- const char *ctrl_hid;
- const char *ctrl_uid;
+ union {
+ struct {
+ const char *hid;
+ const char *uid;
+ } acpi;
+ struct {
+ unsigned int devfn;
+ } pci;
+ } ctrl;
const char *ctrl_devname;
/*
* ATM the serdev core only supports of or ACPI matching; and so far all
@@ -91,7 +98,7 @@ struct x86_dev_info {
int gpio_button_count;
int (*init)(struct device *dev);
void (*exit)(void);
- bool use_pci_devname;
+ bool use_pci;
};
int x86_android_tablet_get_gpiod(const char *chip, int pin, const char *con_id,
diff --git a/drivers/pmdomain/arm/scmi_pm_domain.c b/drivers/pmdomain/arm/scmi_pm_domain.c
index a7784a8bb5db..86b531e15b85 100644
--- a/drivers/pmdomain/arm/scmi_pm_domain.c
+++ b/drivers/pmdomain/arm/scmi_pm_domain.c
@@ -96,6 +96,14 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
continue;
}
+ /*
+ * Register the explicit power on request to the firmware so
+ * that it is tracked as used by OSPM agent and not
+ * accidentally turned off with OSPM's knowledge
+ */
+ if (state == SCMI_POWER_STATE_GENERIC_ON)
+ power_ops->state_set(ph, i, state);
+
scmi_pd->domain = i;
scmi_pd->ph = ph;
scmi_pd->name = power_ops->name_get(ph, i);
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 20a9efebbcb7..6c94137865c9 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -3180,6 +3180,8 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
if (!err)
genpd_state->residency_ns = 1000LL * residency;
+ of_property_read_string(state_node, "idle-state-name", &genpd_state->name);
+
genpd_state->power_on_latency_ns = 1000LL * exit_latency;
genpd_state->power_off_latency_ns = 1000LL * entry_latency;
genpd_state->fwnode = &state_node->fwnode;
@@ -3458,7 +3460,10 @@ static int idle_states_show(struct seq_file *s, void *data)
seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
for (i = 0; i < genpd->state_count; i++) {
- idle_time += genpd->states[i].idle_time;
+ struct genpd_power_state *state = &genpd->states[i];
+ char state_name[15];
+
+ idle_time += state->idle_time;
if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
now = ktime_get_mono_fast_ns();
@@ -3468,9 +3473,13 @@ static int idle_states_show(struct seq_file *s, void *data)
}
}
+ if (!state->name)
+ snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i);
+
do_div(idle_time, NSEC_PER_MSEC);
- seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
- genpd->states[i].usage, genpd->states[i].rejected);
+ seq_printf(s, "%-14s %-14llu %-14llu %llu\n",
+ state->name ?: state_name, idle_time,
+ state->usage, state->rejected);
}
genpd_unlock(genpd);
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 9bdb80fd7210..958d34d4821b 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1437,6 +1437,7 @@ static struct platform_driver imx_pgc_domain_driver = {
.driver = {
.name = "imx-pgc",
.pm = &imx_pgc_domain_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = imx_pgc_domain_probe,
.remove = imx_pgc_domain_remove,
@@ -1549,6 +1550,7 @@ static struct platform_driver imx_gpc_driver = {
.driver = {
.name = "imx-gpcv2",
.of_match_table = imx_gpcv2_dt_ids,
+ .suppress_bind_attrs = true,
},
.probe = imx_gpcv2_probe,
};
diff --git a/drivers/pmdomain/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
index 23db85b7aa9e..912802b5215b 100644
--- a/drivers/pmdomain/imx/imx8m-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
@@ -894,6 +894,7 @@ static struct platform_driver imx8m_blk_ctrl_driver = {
.name = "imx8m-blk-ctrl",
.pm = &imx8m_blk_ctrl_pm_ops,
.of_match_table = imx8m_blk_ctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
module_platform_driver(imx8m_blk_ctrl_driver);
diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index e3a0f64c144c..34576be606e3 100644
--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
@@ -770,7 +770,7 @@ static void imx8mp_blk_ctrl_remove(struct platform_device *pdev)
of_genpd_del_provider(pdev->dev.of_node);
- for (i = 0; bc->onecell_data.num_domains; i++) {
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
pm_genpd_remove(&domain->genpd);
@@ -862,6 +862,7 @@ static struct platform_driver imx8mp_blk_ctrl_driver = {
.name = "imx8mp-blk-ctrl",
.pm = &imx8mp_blk_ctrl_pm_ops,
.of_match_table = imx8mp_blk_ctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
module_platform_driver(imx8mp_blk_ctrl_driver);
diff --git a/drivers/pmdomain/mediatek/Kconfig b/drivers/pmdomain/mediatek/Kconfig
index 21305c4f17fe..0e34a517ab7d 100644
--- a/drivers/pmdomain/mediatek/Kconfig
+++ b/drivers/pmdomain/mediatek/Kconfig
@@ -26,4 +26,16 @@ config MTK_SCPSYS_PM_DOMAINS
Control Processor System (SCPSYS) has several power management related
tasks in the system.
+config AIROHA_CPU_PM_DOMAIN
+ tristate "Airoha CPU power domain"
+ default ARCH_AIROHA
+ depends on HAVE_ARM_SMCCC
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y here to enable CPU power domain support for Airoha SoC.
+
+ CPU frequency and power is controlled by ATF with SMC command to
+ set performance states.
+
endmenu
diff --git a/drivers/pmdomain/mediatek/Makefile b/drivers/pmdomain/mediatek/Makefile
index 8cde09e654b3..18ba92e3c418 100644
--- a/drivers/pmdomain/mediatek/Makefile
+++ b/drivers/pmdomain/mediatek/Makefile
@@ -1,3 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
+obj-$(CONFIG_AIROHA_CPU_PM_DOMAIN) += airoha-cpu-pmdomain.o
+
+ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
+# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
+# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling
+# hooks are inserted via the -pg switch.
+CFLAGS_REMOVE_airoha-cpu-pmdomain.o += $(CC_FLAGS_FTRACE)
+endif
diff --git a/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c b/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
new file mode 100644
index 000000000000..0fd88d2f9ac2
--- /dev/null
+++ b/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+#define AIROHA_SIP_AVS_HANDLE 0x82000301
+#define AIROHA_AVS_OP_BASE 0xddddddd0
+#define AIROHA_AVS_OP_MASK GENMASK(1, 0)
+#define AIROHA_AVS_OP_FREQ_DYN_ADJ (AIROHA_AVS_OP_BASE | \
+ FIELD_PREP(AIROHA_AVS_OP_MASK, 0x1))
+#define AIROHA_AVS_OP_GET_FREQ (AIROHA_AVS_OP_BASE | \
+ FIELD_PREP(AIROHA_AVS_OP_MASK, 0x2))
+
+struct airoha_cpu_pmdomain_priv {
+ struct clk_hw hw;
+ struct generic_pm_domain pd;
+};
+
+static long airoha_cpu_pmdomain_clk_round(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static unsigned long airoha_cpu_pmdomain_clk_get(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(AIROHA_SIP_AVS_HANDLE, AIROHA_AVS_OP_GET_FREQ,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ /* SMCCC returns freq in MHz */
+ return (int)(res.a0 * 1000 * 1000);
+}
+
+/* Airoha CPU clk SMCC is always enabled */
+static int airoha_cpu_pmdomain_clk_is_enabled(struct clk_hw *hw)
+{
+ return true;
+}
+
+static const struct clk_ops airoha_cpu_pmdomain_clk_ops = {
+ .recalc_rate = airoha_cpu_pmdomain_clk_get,
+ .is_enabled = airoha_cpu_pmdomain_clk_is_enabled,
+ .round_rate = airoha_cpu_pmdomain_clk_round,
+};
+
+static int airoha_cpu_pmdomain_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(AIROHA_SIP_AVS_HANDLE, AIROHA_AVS_OP_FREQ_DYN_ADJ,
+ 0, state, 0, 0, 0, 0, &res);
+
+ /* SMC signal correct apply by unsetting BIT 0 */
+ return res.a0 & BIT(0) ? -EINVAL : 0;
+}
+
+static int airoha_cpu_pmdomain_probe(struct platform_device *pdev)
+{
+ struct airoha_cpu_pmdomain_priv *priv;
+ struct device *dev = &pdev->dev;
+ const struct clk_init_data init = {
+ .name = "cpu",
+ .ops = &airoha_cpu_pmdomain_clk_ops,
+ /* Clock with no set_rate, can't cache */
+ .flags = CLK_GET_RATE_NOCACHE,
+ };
+ struct generic_pm_domain *pd;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Init and register a get-only clk for Cpufreq */
+ priv->hw.init = &init;
+ ret = devm_clk_hw_register(dev, &priv->hw);
+ if (ret)
+ return ret;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &priv->hw);
+ if (ret)
+ return ret;
+
+ /* Init and register a PD for CPU */
+ pd = &priv->pd;
+ pd->name = "cpu_pd";
+ pd->flags = GENPD_FLAG_ALWAYS_ON;
+ pd->set_performance_state = airoha_cpu_pmdomain_set_performance_state;
+
+ ret = pm_genpd_init(pd, NULL, false);
+ if (ret)
+ return ret;
+
+ ret = of_genpd_add_provider_simple(dev->of_node, pd);
+ if (ret)
+ goto err_add_provider;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+err_add_provider:
+ pm_genpd_remove(pd);
+
+ return ret;
+}
+
+static void airoha_cpu_pmdomain_remove(struct platform_device *pdev)
+{
+ struct airoha_cpu_pmdomain_priv *priv = platform_get_drvdata(pdev);
+
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&priv->pd);
+}
+
+static const struct of_device_id airoha_cpu_pmdomain_of_match[] = {
+ { .compatible = "airoha,en7581-cpufreq" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, airoha_cpu_pmdomain_of_match);
+
+static struct platform_driver airoha_cpu_pmdomain_driver = {
+ .probe = airoha_cpu_pmdomain_probe,
+ .remove = airoha_cpu_pmdomain_remove,
+ .driver = {
+ .name = "airoha-cpu-pmdomain",
+ .of_match_table = airoha_cpu_pmdomain_of_match,
+ },
+};
+module_platform_driver(airoha_cpu_pmdomain_driver);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("CPU PM domain driver for Airoha SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
index 0e4bd749d067..82df7e44250b 100644
--- a/drivers/pmdomain/ti/ti_sci_pm_domains.c
+++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <dt-bindings/soc/ti,sci_pm_domain.h>
@@ -51,6 +53,56 @@ struct ti_sci_pm_domain {
#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
+static inline bool ti_sci_pd_is_valid_constraint(s32 val)
+{
+ return val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void ti_sci_pd_set_lat_constraint(struct device *dev, s32 val)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(genpd);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
+ u16 val_ms;
+ int ret;
+
+ /* PM QoS latency unit is usecs, TI SCI uses msecs */
+ val_ms = val / USEC_PER_MSEC;
+ ret = ti_sci->ops.pm_ops.set_latency_constraint(ti_sci, val_ms, TISCI_MSG_CONSTRAINT_SET);
+ if (ret)
+ dev_err(dev, "ti_sci_pd: set latency constraint failed: ret=%d\n",
+ ret);
+ else
+ dev_dbg(dev, "ti_sci_pd: ID:%d set latency constraint %d\n",
+ pd->idx, val);
+}
+#endif
+
+static inline void ti_sci_pd_set_wkup_constraint(struct device *dev)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(genpd);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
+ int ret;
+
+ if (device_may_wakeup(dev)) {
+ /*
+ * If device can wakeup using IO daisy chain wakeups,
+ * we do not want to set a constraint.
+ */
+ if (dev->power.wakeirq) {
+ dev_dbg(dev, "%s: has wake IRQ, not setting constraints\n", __func__);
+ return;
+ }
+
+ ret = ti_sci->ops.pm_ops.set_device_constraint(ti_sci, pd->idx,
+ TISCI_MSG_CONSTRAINT_SET);
+ if (!ret)
+ dev_dbg(dev, "ti_sci_pd: ID:%d set device constraint.\n", pd->idx);
+ }
+}
+
/*
* ti_sci_pd_power_off(): genpd power down hook
* @domain: pointer to the powerdomain to power off
@@ -79,6 +131,28 @@ static int ti_sci_pd_power_on(struct generic_pm_domain *domain)
return ti_sci->ops.dev_ops.get_device(ti_sci, pd->idx);
}
+#ifdef CONFIG_PM_SLEEP
+static int ti_sci_pd_suspend(struct device *dev)
+{
+ int ret;
+ s32 val;
+
+ ret = pm_generic_suspend(dev);
+ if (ret)
+ return ret;
+
+ val = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
+ if (ti_sci_pd_is_valid_constraint(val))
+ ti_sci_pd_set_lat_constraint(dev, val);
+
+ ti_sci_pd_set_wkup_constraint(dev);
+
+ return 0;
+}
+#else
+#define ti_sci_pd_suspend NULL
+#endif
+
/*
* ti_sci_pd_xlate(): translation service for TI SCI genpds
* @genpdspec: DT identification data for the genpd
@@ -182,6 +256,13 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
pd->pd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
pd->idx = args.args[0];
pd->parent = pd_provider;
+ /*
+ * If SCI constraint functions are present, then firmware
+ * supports the constraints API.
+ */
+ if (pd_provider->ti_sci->ops.pm_ops.set_device_constraint &&
+ pd_provider->ti_sci->ops.pm_ops.set_latency_constraint)
+ pd->pd.domain.ops.suspend = ti_sci_pd_suspend;
pm_genpd_init(&pd->pd, NULL, true);
diff --git a/drivers/power/sequencing/pwrseq-qcom-wcn.c b/drivers/power/sequencing/pwrseq-qcom-wcn.c
index 682a9beac69e..e8f5030f2639 100644
--- a/drivers/power/sequencing/pwrseq-qcom-wcn.c
+++ b/drivers/power/sequencing/pwrseq-qcom-wcn.c
@@ -272,6 +272,24 @@ static const struct pwrseq_qcom_wcn_pdata pwrseq_qca6390_of_data = {
.targets = pwrseq_qcom_wcn_targets,
};
+static const char *const pwrseq_wcn6750_vregs[] = {
+ "vddaon",
+ "vddasd",
+ "vddpmu",
+ "vddrfa0p8",
+ "vddrfa1p2",
+ "vddrfa1p7",
+ "vddrfa2p2",
+};
+
+static const struct pwrseq_qcom_wcn_pdata pwrseq_wcn6750_of_data = {
+ .vregs = pwrseq_wcn6750_vregs,
+ .num_vregs = ARRAY_SIZE(pwrseq_wcn6750_vregs),
+ .pwup_delay_ms = 50,
+ .gpio_enable_delay_ms = 5,
+ .targets = pwrseq_qcom_wcn_targets,
+};
+
static const char *const pwrseq_wcn6855_vregs[] = {
"vddio",
"vddaon",
@@ -378,6 +396,13 @@ static int pwrseq_qcom_wcn_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(ctx->bt_gpio),
"Failed to get the Bluetooth enable GPIO\n");
+ /*
+ * FIXME: This should actually be GPIOD_OUT_LOW, but doing so would
+ * cause the WLAN power to be toggled, resulting in PCIe link down.
+ * Since the PCIe controller driver is not handling link down currently,
+ * the device becomes unusable. So we need to keep this workaround until
+ * the link down handling is implemented in the controller driver.
+ */
ctx->wlan_gpio = devm_gpiod_get_optional(dev, "wlan-enable",
GPIOD_ASIS);
if (IS_ERR(ctx->wlan_gpio))
@@ -431,6 +456,10 @@ static const struct of_device_id pwrseq_qcom_wcn_of_match[] = {
.compatible = "qcom,wcn7850-pmu",
.data = &pwrseq_wcn7850_of_data,
},
+ {
+ .compatible = "qcom,wcn6750-pmu",
+ .data = &pwrseq_wcn6750_of_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, pwrseq_qcom_wcn_of_match);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 5e793b80fd6b..77d75e1f14a9 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1265,6 +1265,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ARROWLAKE, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ARROWLAKE_U, &rapl_defaults_core),
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 77a36e7bddd5..b932425ddc6a 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -296,7 +296,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (ptp->info->do_aux_work) {
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
- ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
+ ptp->kworker = kthread_run_worker(0, "ptp%d", ptp->index);
if (IS_ERR(ptp->kworker)) {
err = PTR_ERR(ptp->kworker);
pr_err("failed to create ptp aux_worker %d\n", err);
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 5feecaadde8e..7f08c70d8123 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -3692,7 +3692,7 @@ DEVICE_FREQ_GROUP(freq4, 3);
static ssize_t
disciplining_config_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3727,7 +3727,7 @@ out:
static ssize_t
disciplining_config_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3750,11 +3750,11 @@ disciplining_config_write(struct file *filp, struct kobject *kobj,
return err;
}
-static BIN_ATTR_RW(disciplining_config, OCP_ART_CONFIG_SIZE);
+static const BIN_ATTR_RW(disciplining_config, OCP_ART_CONFIG_SIZE);
static ssize_t
temperature_table_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3789,7 +3789,7 @@ out:
static ssize_t
temperature_table_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3812,7 +3812,7 @@ temperature_table_write(struct file *filp, struct kobject *kobj,
return err;
}
-static BIN_ATTR_RW(temperature_table, OCP_ART_TEMP_TABLE_SIZE);
+static const BIN_ATTR_RW(temperature_table, OCP_ART_TEMP_TABLE_SIZE);
static struct attribute *fb_timecard_attrs[] = {
&dev_attr_serialnum.attr,
@@ -3867,7 +3867,7 @@ static struct attribute *art_timecard_attrs[] = {
NULL,
};
-static struct bin_attribute *bin_art_timecard_attrs[] = {
+static const struct bin_attribute *const bin_art_timecard_attrs[] = {
&bin_attr_disciplining_config,
&bin_attr_temperature_table,
NULL,
@@ -3875,7 +3875,7 @@ static struct bin_attribute *bin_art_timecard_attrs[] = {
static const struct attribute_group art_timecard_group = {
.attrs = art_timecard_attrs,
- .bin_attrs = bin_art_timecard_attrs,
+ .bin_attrs_new = bin_art_timecard_attrs,
};
static const struct ocp_attr_group art_timecard_groups[] = {
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index fb3eadf6fbc4..b6c16139ce4a 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -66,20 +66,16 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
pci_set_master(pci);
- ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
- if (ret)
- return dev_err_probe(dev, ret, "Failed to iomap PCI BAR\n");
-
info = (const struct dwc_pwm_info *)id->driver_data;
ddata = devm_kzalloc(dev, struct_size(ddata, chips, info->nr), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
- /*
- * No need to check for pcim_iomap_table() failure,
- * pcim_iomap_regions() already does it for us.
- */
- ddata->io_base = pcim_iomap_table(pci)[0];
+ ddata->io_base = pcim_iomap_region(pci, 0, "pwm-dwc");
+ if (IS_ERR(ddata->io_base))
+ return dev_err_probe(dev, PTR_ERR(ddata->io_base),
+ "Failed to request / iomap PCI BAR\n");
+
ddata->info = info;
for (idx = 0; idx < ddata->info->nr; idx++) {
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index ddc2a4ca90fd..ae25d9321d75 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -18,6 +18,7 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const struct pwm_lpss_boardinfo *info;
+ void __iomem *io_base;
struct pwm_chip *chip;
int err;
@@ -25,12 +26,12 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
if (err < 0)
return err;
- err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
- if (err)
- return err;
+ io_base = pcim_iomap_region(pdev, 0, "pwm-lpss");
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
info = (struct pwm_lpss_boardinfo *)id->driver_data;
- chip = devm_pwm_lpss_probe(&pdev->dev, pcim_iomap_table(pdev)[0], info);
+ chip = devm_pwm_lpss_probe(&pdev->dev, io_base, info);
if (IS_ERR(chip))
return PTR_ERR(chip);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 989731256f50..5832dce8ed9d 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -167,8 +167,12 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
state->enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
/* Keep PWM counter clock refcount in sync with PWM initial state */
- if (state->enabled)
- clk_enable(priv->clk);
+ if (state->enabled) {
+ int ret = clk_enable(priv->clk);
+
+ if (ret)
+ return ret;
+ }
regmap_read(priv->regmap, STM32_LPTIM_CFGR, &val);
presc = FIELD_GET(STM32_LPTIM_PRESC, val);
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 17e591f61efb..a59de4de18b6 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -858,8 +858,11 @@ static int stm32_pwm_probe(struct platform_device *pdev)
chip->ops = &stm32pwm_ops;
/* Initialize clock refcount to number of enabled PWM channels. */
- for (i = 0; i < num_enabled; i++)
- clk_enable(priv->clk);
+ for (i = 0; i < num_enabled; i++) {
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
+ }
ret = devm_pwmchip_add(dev, chip);
if (ret < 0)
diff --git a/drivers/ras/amd/atl/Kconfig b/drivers/ras/amd/atl/Kconfig
index 551680073e43..6e03942cd7da 100644
--- a/drivers/ras/amd/atl/Kconfig
+++ b/drivers/ras/amd/atl/Kconfig
@@ -10,6 +10,7 @@
config AMD_ATL
tristate "AMD Address Translation Library"
depends on AMD_NB && X86_64 && RAS
+ depends on AMD_NODE
depends on MEMORY_FAILURE
default N
help
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index 143d04c779a8..f9be26d25348 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -18,6 +18,7 @@
#include <linux/ras.h>
#include <asm/amd_nb.h>
+#include <asm/amd_node.h>
#include "reg_fields.h"
diff --git a/drivers/regulator/bd96801-regulator.c b/drivers/regulator/bd96801-regulator.c
index 9876cc05867e..3a9d772491a8 100644
--- a/drivers/regulator/bd96801-regulator.c
+++ b/drivers/regulator/bd96801-regulator.c
@@ -5,12 +5,7 @@
/*
* This version of the "BD86801 scalable PMIC"'s driver supports only very
* basic set of the PMIC features. Most notably, there is no support for
- * the ERRB interrupt and the configurations which should be done when the
- * PMIC is in STBY mode.
- *
- * Supporting the ERRB interrupt would require dropping the regmap-IRQ
- * usage or working around (or accepting a presense of) a naming conflict
- * in debugFS IRQs.
+ * the configurations which should be done when the PMIC is in STBY mode.
*
* Being able to reliably do the configurations like changing the
* regulator safety limits (like limits for the over/under -voltages, over
@@ -22,16 +17,14 @@
* be the need to configure these safety limits. Hence it's not simple to
* come up with a generic solution.
*
- * Users who require the ERRB handling and STBY state configurations can
- * have a look at the original RFC:
+ * Users who require the STBY state configurations can have a look at the
+ * original RFC:
* https://lore.kernel.org/all/cover.1712920132.git.mazziesaccount@gmail.com/
- * which implements a workaround to debugFS naming conflict and some of
- * the safety limit configurations - but leaves the state change handling
- * and synchronization to be implemented.
+ * which implements some of the safety limit configurations - but leaves the
+ * state change handling and synchronization to be implemented.
*
* It would be great to hear (and receive a patch!) if you implement the
- * STBY configuration support or a proper fix to the debugFS naming
- * conflict in your downstream driver ;)
+ * STBY configuration support in your downstream driver ;)
*/
#include <linux/cleanup.h>
@@ -728,6 +721,95 @@ static int initialize_pmic_data(struct device *dev,
return 0;
}
+static int bd96801_map_event_all(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int i;
+
+ for (i = 0; i < rid->num_states; i++) {
+ rid->states[i].notifs = REGULATOR_EVENT_FAIL;
+ rid->states[i].errors = REGULATOR_ERROR_FAIL;
+ *dev_mask |= BIT(i);
+ }
+
+ return 0;
+}
+
+static int bd96801_rdev_errb_irqs(struct platform_device *pdev,
+ struct regulator_dev *rdev)
+{
+ int i;
+ void *retp;
+ static const char * const single_out_errb_irqs[] = {
+ "bd96801-%s-pvin-err", "bd96801-%s-ovp-err",
+ "bd96801-%s-uvp-err", "bd96801-%s-shdn-err",
+ };
+
+ for (i = 0; i < ARRAY_SIZE(single_out_errb_irqs); i++) {
+ struct regulator_irq_desc id = {
+ .map_event = bd96801_map_event_all,
+ .irq_off_ms = 1000,
+ };
+ struct regulator_dev *rdev_arr[1];
+ char tmp[255];
+ int irq;
+
+ snprintf(tmp, 255, single_out_errb_irqs[i], rdev->desc->name);
+ tmp[254] = 0;
+ id.name = tmp;
+
+ irq = platform_get_irq_byname(pdev, tmp);
+ if (irq < 0)
+ continue;
+
+ rdev_arr[0] = rdev;
+ retp = devm_regulator_irq_helper(&pdev->dev, &id, irq, 0,
+ REGULATOR_ERROR_FAIL, NULL,
+ rdev_arr, 1);
+ if (IS_ERR(retp))
+ return PTR_ERR(retp);
+
+ }
+ return 0;
+}
+
+static int bd96801_global_errb_irqs(struct platform_device *pdev,
+ struct regulator_dev **rdev, int num_rdev)
+{
+ int i, num_irqs;
+ void *retp;
+ static const char * const global_errb_irqs[] = {
+ "bd96801-otp-err", "bd96801-dbist-err", "bd96801-eep-err",
+ "bd96801-abist-err", "bd96801-prstb-err", "bd96801-drmoserr1",
+ "bd96801-drmoserr2", "bd96801-slave-err", "bd96801-vref-err",
+ "bd96801-tsd", "bd96801-uvlo-err", "bd96801-ovlo-err",
+ "bd96801-osc-err", "bd96801-pon-err", "bd96801-poff-err",
+ "bd96801-cmd-shdn-err", "bd96801-int-shdn-err"
+ };
+
+ num_irqs = ARRAY_SIZE(global_errb_irqs);
+ for (i = 0; i < num_irqs; i++) {
+ int irq;
+ struct regulator_irq_desc id = {
+ .name = global_errb_irqs[i],
+ .map_event = bd96801_map_event_all,
+ .irq_off_ms = 1000,
+ };
+
+ irq = platform_get_irq_byname(pdev, global_errb_irqs[i]);
+ if (irq < 0)
+ continue;
+
+ retp = devm_regulator_irq_helper(&pdev->dev, &id, irq, 0,
+ REGULATOR_ERROR_FAIL, NULL,
+ rdev, num_rdev);
+ if (IS_ERR(retp))
+ return PTR_ERR(retp);
+ }
+
+ return 0;
+}
+
static int bd96801_rdev_intb_irqs(struct platform_device *pdev,
struct bd96801_pmic_data *pdata,
struct bd96801_irqinfo *iinfo,
@@ -783,11 +865,10 @@ static int bd96801_rdev_intb_irqs(struct platform_device *pdev,
return 0;
}
-
-
static int bd96801_probe(struct platform_device *pdev)
{
struct regulator_dev *ldo_errs_rdev_arr[BD96801_NUM_LDOS];
+ struct regulator_dev *all_rdevs[BD96801_NUM_REGULATORS];
struct bd96801_regulator_data *rdesc;
struct regulator_config config = {};
int ldo_errs_arr[BD96801_NUM_LDOS];
@@ -795,6 +876,7 @@ static int bd96801_probe(struct platform_device *pdev)
int temp_notif_ldos = 0;
struct device *parent;
int i, ret;
+ bool use_errb;
void *retp;
parent = pdev->dev.parent;
@@ -819,6 +901,13 @@ static int bd96801_probe(struct platform_device *pdev)
config.regmap = pdata->regmap;
config.dev = parent;
+ ret = of_property_match_string(pdev->dev.parent->of_node,
+ "interrupt-names", "errb");
+ if (ret < 0)
+ use_errb = false;
+ else
+ use_errb = true;
+
ret = bd96801_walk_regulator_dt(&pdev->dev, pdata->regmap, rdesc,
BD96801_NUM_REGULATORS);
if (ret)
@@ -837,6 +926,7 @@ static int bd96801_probe(struct platform_device *pdev)
rdesc[i].desc.name);
return PTR_ERR(rdev);
}
+ all_rdevs[i] = rdev;
/*
* LDOs don't have own temperature monitoring. If temperature
* notification was requested for this LDO from DT then we will
@@ -856,6 +946,12 @@ static int bd96801_probe(struct platform_device *pdev)
if (ret)
return ret;
}
+ /* Register per regulator ERRB notifiers */
+ if (use_errb) {
+ ret = bd96801_rdev_errb_irqs(pdev, rdev);
+ if (ret)
+ return ret;
+ }
}
if (temp_notif_ldos) {
int irq;
@@ -877,6 +973,10 @@ static int bd96801_probe(struct platform_device *pdev)
return PTR_ERR(retp);
}
+ if (use_errb)
+ return bd96801_global_errb_irqs(pdev, all_rdevs,
+ ARRAY_SIZE(all_rdevs));
+
return 0;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 8cb948a91e60..6c0ef1182248 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -917,6 +917,26 @@ static ssize_t bypass_show(struct device *dev,
}
static DEVICE_ATTR_RO(bypass);
+static ssize_t power_budget_milliwatt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", rdev->constraints->pw_budget_mW);
+}
+static DEVICE_ATTR_RO(power_budget_milliwatt);
+
+static ssize_t power_requested_milliwatt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", rdev->pw_requested_mW);
+}
+static DEVICE_ATTR_RO(power_requested_milliwatt);
+
#define REGULATOR_ERROR_ATTR(name, bit) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
@@ -1149,6 +1169,10 @@ static void print_constraints_debug(struct regulator_dev *rdev)
if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
count += scnprintf(buf + count, len - count, "standby ");
+ if (constraints->pw_budget_mW)
+ count += scnprintf(buf + count, len - count, "%d mW budget",
+ constraints->pw_budget_mW);
+
if (!count)
count = scnprintf(buf, len, "no parameters");
else
@@ -1627,6 +1651,9 @@ static int set_machine_constraints(struct regulator_dev *rdev)
rdev->last_off = ktime_get();
}
+ if (!rdev->constraints->pw_budget_mW)
+ rdev->constraints->pw_budget_mW = INT_MAX;
+
print_constraints(rdev);
return 0;
}
@@ -1936,6 +1963,20 @@ static struct regulator_dev *regulator_lookup_by_name(const char *name)
return dev ? dev_to_rdev(dev) : NULL;
}
+static struct regulator_dev *regulator_dt_lookup(struct device *dev,
+ const char *supply)
+{
+ struct regulator_dev *r = NULL;
+
+ if (dev_of_node(dev)) {
+ r = of_regulator_dev_lookup(dev, dev_of_node(dev), supply);
+ if (PTR_ERR(r) == -ENODEV)
+ r = NULL;
+ }
+
+ return r;
+}
+
/**
* regulator_dev_lookup - lookup a regulator device.
* @dev: device for regulator "consumer".
@@ -1960,16 +2001,9 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
regulator_supply_alias(&dev, &supply);
/* first do a dt based lookup */
- if (dev_of_node(dev)) {
- r = of_regulator_dev_lookup(dev, dev_of_node(dev), supply);
- if (!IS_ERR(r))
- return r;
- if (PTR_ERR(r) == -EPROBE_DEFER)
- return r;
-
- if (PTR_ERR(r) == -ENODEV)
- r = NULL;
- }
+ r = regulator_dt_lookup(dev, supply);
+ if (r)
+ return r;
/* if not found, try doing it non-dt way */
if (dev)
@@ -2015,7 +2049,17 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (rdev->supply)
return 0;
- r = regulator_dev_lookup(dev, rdev->supply_name);
+ /* first do a dt based lookup on the node described in the virtual
+ * device.
+ */
+ r = regulator_dt_lookup(&rdev->dev, rdev->supply_name);
+
+ /* If regulator not found use usual search path in the parent
+ * device.
+ */
+ if (!r)
+ r = regulator_dev_lookup(dev, rdev->supply_name);
+
if (IS_ERR(r)) {
ret = PTR_ERR(r);
@@ -4585,6 +4629,87 @@ int regulator_get_current_limit(struct regulator *regulator)
EXPORT_SYMBOL_GPL(regulator_get_current_limit);
/**
+ * regulator_get_unclaimed_power_budget - get regulator unclaimed power budget
+ * @regulator: regulator source
+ *
+ * Return: Unclaimed power budget of the regulator in mW.
+ */
+int regulator_get_unclaimed_power_budget(struct regulator *regulator)
+{
+ return regulator->rdev->constraints->pw_budget_mW -
+ regulator->rdev->pw_requested_mW;
+}
+EXPORT_SYMBOL_GPL(regulator_get_unclaimed_power_budget);
+
+/**
+ * regulator_request_power_budget - request power budget on a regulator
+ * @regulator: regulator source
+ * @pw_req: Power requested
+ *
+ * Return: 0 on success or a negative error number on failure.
+ */
+int regulator_request_power_budget(struct regulator *regulator,
+ unsigned int pw_req)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ int ret = 0, pw_tot_req;
+
+ regulator_lock(rdev);
+ if (rdev->supply) {
+ ret = regulator_request_power_budget(rdev->supply, pw_req);
+ if (ret < 0)
+ goto out;
+ }
+
+ pw_tot_req = rdev->pw_requested_mW + pw_req;
+ if (pw_tot_req > rdev->constraints->pw_budget_mW) {
+ rdev_warn(rdev, "power requested %d mW out of budget %d mW",
+ pw_req,
+ rdev->constraints->pw_budget_mW - rdev->pw_requested_mW);
+ regulator_notifier_call_chain(rdev,
+ REGULATOR_EVENT_OVER_CURRENT_WARN,
+ NULL);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ rdev->pw_requested_mW = pw_tot_req;
+out:
+ regulator_unlock(rdev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_request_power_budget);
+
+/**
+ * regulator_free_power_budget - free power budget on a regulator
+ * @regulator: regulator source
+ * @pw: Power to be released.
+ *
+ * Return: Power budget of the regulator in mW.
+ */
+void regulator_free_power_budget(struct regulator *regulator,
+ unsigned int pw)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ int pw_tot_req;
+
+ regulator_lock(rdev);
+ if (rdev->supply)
+ regulator_free_power_budget(rdev->supply, pw);
+
+ pw_tot_req = rdev->pw_requested_mW - pw;
+ if (pw_tot_req >= 0)
+ rdev->pw_requested_mW = pw_tot_req;
+ else
+ rdev_warn(rdev,
+ "too much power freed %d mW (already requested %d mW)",
+ pw, rdev->pw_requested_mW);
+
+ regulator_unlock(rdev);
+}
+EXPORT_SYMBOL_GPL(regulator_free_power_budget);
+
+/**
* regulator_set_mode - set regulator operating mode
* @regulator: regulator source
* @mode: operating mode - one of the REGULATOR_MODE constants
@@ -5222,6 +5347,8 @@ static struct attribute *regulator_dev_attrs[] = {
&dev_attr_suspend_standby_mode.attr,
&dev_attr_suspend_mem_mode.attr,
&dev_attr_suspend_disk_mode.attr,
+ &dev_attr_power_budget_milliwatt.attr,
+ &dev_attr_power_requested_milliwatt.attr,
NULL
};
@@ -5303,6 +5430,10 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj,
attr == &dev_attr_suspend_disk_mode.attr)
return ops->set_suspend_mode ? mode : 0;
+ if (attr == &dev_attr_power_budget_milliwatt.attr ||
+ attr == &dev_attr_power_requested_milliwatt.attr)
+ return rdev->constraints->pw_budget_mW != INT_MAX ? mode : 0;
+
return mode;
}
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index e5b4b93c07e3..011088c57891 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -125,6 +125,9 @@ static int of_get_regulation_constraints(struct device *dev,
if (constraints->min_uA != constraints->max_uA)
constraints->valid_ops_mask |= REGULATOR_CHANGE_CURRENT;
+ if (!of_property_read_u32(np, "regulator-power-budget-milliwatt", &pval))
+ constraints->pw_budget_mW = pval;
+
constraints->boot_on = of_property_read_bool(np, "regulator-boot-on");
constraints->always_on = of_property_read_bool(np, "regulator-always-on");
if (!constraints->always_on) /* status change should be possible. */
@@ -446,7 +449,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
"failed to parse DT for regulator %pOFn\n",
child);
of_node_put(child);
- return -EINVAL;
+ goto err_put;
}
match->of_node = of_node_get(child);
count++;
@@ -455,6 +458,18 @@ int of_regulator_match(struct device *dev, struct device_node *node,
}
return count;
+
+err_put:
+ for (i = 0; i < num_matches; i++) {
+ struct of_regulator_match *match = &matches[i];
+
+ match->init_data = NULL;
+ if (match->of_node) {
+ of_node_put(match->of_node);
+ match->of_node = NULL;
+ }
+ }
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(of_regulator_match);
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index 9714afe347dc..faa6b79c27d7 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -247,6 +247,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.ramp_mask = BUCK1_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
.n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -272,6 +273,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK2_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ_STBYREQ,
.ramp_reg = PCA9450_REG_BUCK2CTRL,
.ramp_mask = BUCK2_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -301,6 +303,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK3OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK3CTRL,
.enable_mask = BUCK3_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.ramp_reg = PCA9450_REG_BUCK3CTRL,
.ramp_mask = BUCK3_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -330,6 +333,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK4OUT_MASK,
.enable_reg = PCA9450_REG_BUCK4CTRL,
.enable_mask = BUCK4_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -348,6 +352,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK5OUT_MASK,
.enable_reg = PCA9450_REG_BUCK5CTRL,
.enable_mask = BUCK5_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -366,6 +371,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK6OUT_MASK,
.enable_reg = PCA9450_REG_BUCK6CTRL,
.enable_mask = BUCK6_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -481,6 +487,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK1OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK1CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.ramp_reg = PCA9450_REG_BUCK1CTRL,
.ramp_mask = BUCK1_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -510,6 +517,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK2_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ_STBYREQ,
.ramp_reg = PCA9450_REG_BUCK2CTRL,
.ramp_mask = BUCK2_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -539,6 +547,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK4OUT_MASK,
.enable_reg = PCA9450_REG_BUCK4CTRL,
.enable_mask = BUCK4_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -557,6 +566,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK5OUT_MASK,
.enable_reg = PCA9450_REG_BUCK5CTRL,
.enable_mask = BUCK5_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -575,6 +585,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK6OUT_MASK,
.enable_reg = PCA9450_REG_BUCK6CTRL,
.enable_mask = BUCK6_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -806,6 +817,24 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = {
},
{
.desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO3,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO3_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo34_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
+ .vsel_reg = PCA9450_REG_LDO3CTRL,
+ .vsel_mask = LDO3OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO3CTRL,
+ .enable_mask = LDO3_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
.name = "ldo4",
.of_match = of_match_ptr("LDO4"),
.regulators_node = of_match_ptr("regulators"),
@@ -905,6 +934,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450->rcnt = ARRAY_SIZE(pca9450bc_regulators);
break;
case PCA9450_TYPE_PCA9451A:
+ case PCA9450_TYPE_PCA9452:
regulator_desc = pca9451a_regulators;
pca9450->rcnt = ARRAY_SIZE(pca9451a_regulators);
break;
@@ -921,25 +951,21 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450->regmap = devm_regmap_init_i2c(i2c,
&pca9450_regmap_config);
- if (IS_ERR(pca9450->regmap)) {
- dev_err(&i2c->dev, "regmap initialization failed\n");
- return PTR_ERR(pca9450->regmap);
- }
+ if (IS_ERR(pca9450->regmap))
+ return dev_err_probe(&i2c->dev, PTR_ERR(pca9450->regmap),
+ "regmap initialization failed\n");
ret = regmap_read(pca9450->regmap, PCA9450_REG_DEV_ID, &device_id);
- if (ret) {
- dev_err(&i2c->dev, "Read device id error\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Read device id error\n");
/* Check your board and dts for match the right pmic */
if (((device_id >> 4) != 0x1 && type == PCA9450_TYPE_PCA9450A) ||
((device_id >> 4) != 0x3 && type == PCA9450_TYPE_PCA9450BC) ||
- ((device_id >> 4) != 0x9 && type == PCA9450_TYPE_PCA9451A)) {
- dev_err(&i2c->dev, "Device id(%x) mismatched\n",
- device_id >> 4);
- return -EINVAL;
- }
+ ((device_id >> 4) != 0x9 && type == PCA9450_TYPE_PCA9451A) ||
+ ((device_id >> 4) != 0x9 && type == PCA9450_TYPE_PCA9452))
+ return dev_err_probe(&i2c->dev, -EINVAL,
+ "Device id(%x) mismatched\n", device_id >> 4);
for (i = 0; i < pca9450->rcnt; i++) {
const struct regulator_desc *desc;
@@ -949,17 +975,16 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
r = &regulator_desc[i];
desc = &r->desc;
+ if (type == PCA9450_TYPE_PCA9451A && !strcmp(desc->name, "ldo3"))
+ continue;
+
config.regmap = pca9450->regmap;
config.dev = pca9450->dev;
rdev = devm_regulator_register(pca9450->dev, desc, &config);
- if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
- dev_err(pca9450->dev,
- "Failed to register regulator(%s): %d\n",
- desc->name, ret);
- return ret;
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(pca9450->dev, PTR_ERR(rdev),
+ "Failed to register regulator(%s)\n", desc->name);
}
if (pca9450->irq) {
@@ -967,29 +992,24 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450_irq_handler,
(IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
"pca9450-irq", pca9450);
- if (ret != 0) {
- dev_err(pca9450->dev, "Failed to request IRQ: %d\n",
- pca9450->irq);
- return ret;
- }
+ if (ret != 0)
+ return dev_err_probe(pca9450->dev, ret, "Failed to request IRQ: %d\n",
+ pca9450->irq);
+
/* Unmask all interrupt except PWRON/WDOG/RSVD */
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_INT1_MSK,
IRQ_VR_FLT1 | IRQ_VR_FLT2 | IRQ_LOWVSYS |
IRQ_THERM_105 | IRQ_THERM_125,
IRQ_PWRON | IRQ_WDOGB | IRQ_RSVD);
- if (ret) {
- dev_err(&i2c->dev, "Unmask irq error\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
}
/* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */
ret = regmap_clear_bits(pca9450->regmap, PCA9450_REG_BUCK123_DVS,
BUCK123_PRESET_EN);
- if (ret) {
- dev_err(&i2c->dev, "Failed to clear PRESET_EN bit: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Failed to clear PRESET_EN bit\n");
if (of_property_read_bool(i2c->dev.of_node, "nxp,wdog_b-warm-reset"))
reset_ctrl = WDOG_B_CFG_WARM;
@@ -999,20 +1019,16 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
/* Set reset behavior on assertion of WDOG_B signal */
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL,
WDOG_B_CFG_MASK, reset_ctrl);
- if (ret) {
- dev_err(&i2c->dev, "Failed to set WDOG_B reset behavior\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Failed to set WDOG_B reset behavior\n");
if (of_property_read_bool(i2c->dev.of_node, "nxp,i2c-lt-enable")) {
/* Enable I2C Level Translator */
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_CONFIG2,
I2C_LT_MASK, I2C_LT_ON_STANDBY_RUN);
- if (ret) {
- dev_err(&i2c->dev,
- "Failed to enable I2C level translator\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret,
+ "Failed to enable I2C level translator\n");
}
/*
@@ -1022,10 +1038,9 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
*/
pca9450->sd_vsel_gpio = gpiod_get_optional(pca9450->dev, "sd-vsel", GPIOD_OUT_HIGH);
- if (IS_ERR(pca9450->sd_vsel_gpio)) {
- dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n");
- return PTR_ERR(pca9450->sd_vsel_gpio);
- }
+ if (IS_ERR(pca9450->sd_vsel_gpio))
+ return dev_err_probe(&i2c->dev, PTR_ERR(pca9450->sd_vsel_gpio),
+ "Failed to get SD_VSEL GPIO\n");
dev_info(&i2c->dev, "%s probed.\n",
type == PCA9450_TYPE_PCA9450A ? "pca9450a" :
@@ -1051,6 +1066,10 @@ static const struct of_device_id pca9450_of_match[] = {
.compatible = "nxp,pca9451a",
.data = (void *)PCA9450_TYPE_PCA9451A,
},
+ {
+ .compatible = "nxp,pca9452",
+ .data = (void *)PCA9450_TYPE_PCA9452,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, pca9450_of_match);
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
index b4065356392f..aa65077f9d41 100644
--- a/drivers/regulator/tps65219-regulator.c
+++ b/drivers/regulator/tps65219-regulator.c
@@ -287,21 +287,6 @@ static irqreturn_t tps65219_regulator_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int tps65219_get_rdev_by_name(const char *regulator_name,
- struct regulator_dev *rdevtbl[7],
- struct regulator_dev **dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(regulators); i++) {
- if (strcmp(regulator_name, regulators[i].name) == 0) {
- *dev = rdevtbl[i];
- return 0;
- }
- }
- return -EINVAL;
-}
-
static int tps65219_regulator_probe(struct platform_device *pdev)
{
struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
@@ -312,23 +297,18 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
int irq;
struct tps65219_regulator_irq_data *irq_data;
struct tps65219_regulator_irq_type *irq_type;
- struct regulator_dev *rdevtbl[7];
config.dev = tps->dev;
config.driver_data = tps;
config.regmap = tps->regmap;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
- dev_dbg(tps->dev, "%s regul i= %d START", __func__, i);
rdev = devm_regulator_register(&pdev->dev, &regulators[i],
&config);
- if (IS_ERR(rdev)) {
- dev_err(tps->dev, "failed to register %s regulator\n",
- regulators[i].name);
- return PTR_ERR(rdev);
- }
- rdevtbl[i] = rdev;
- dev_dbg(tps->dev, "%s regul i= %d COMPLETED", __func__, i);
+ if (IS_ERR(rdev))
+ return dev_err_probe(tps->dev, PTR_ERR(rdev),
+ "Failed to register %s regulator\n",
+ regulators[i].name);
}
irq_data = devm_kmalloc(tps->dev,
@@ -348,14 +328,6 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
irq_data[i].dev = tps->dev;
irq_data[i].type = irq_type;
- tps65219_get_rdev_by_name(irq_type->regulator_name, rdevtbl, &rdev);
- if (IS_ERR(rdev)) {
- dev_err(tps->dev, "Failed to get rdev for %s\n",
- irq_type->regulator_name);
- return -EINVAL;
- }
- irq_data[i].rdev = rdev;
-
error = devm_request_threaded_irq(tps->dev, irq, NULL,
tps65219_regulator_irq_handler,
IRQF_ONESHOT,
@@ -379,7 +351,7 @@ MODULE_DEVICE_TABLE(platform, tps65219_regulator_id_table);
static struct platform_driver tps65219_regulator_driver = {
.driver = {
- .name = "tps65219-pmic",
+ .name = "tps65219-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = tps65219_regulator_probe,
@@ -390,5 +362,4 @@ module_platform_driver(tps65219_regulator_driver);
MODULE_AUTHOR("Jerome Neanne <j-neanne@baylibre.com>");
MODULE_DESCRIPTION("TPS65219 voltage regulator driver");
-MODULE_ALIAS("platform:tps65219-pmic");
MODULE_LICENSE("GPL");
diff --git a/drivers/reset/amlogic/reset-meson-aux.c b/drivers/reset/amlogic/reset-meson-aux.c
index 61ce515d92a2..33c06013439e 100644
--- a/drivers/reset/amlogic/reset-meson-aux.c
+++ b/drivers/reset/amlogic/reset-meson-aux.c
@@ -11,20 +11,20 @@
#include <linux/auxiliary_bus.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
-#include <linux/slab.h>
#include "reset-meson.h"
-#include <soc/amlogic/reset-meson-aux.h>
-static DEFINE_IDA(meson_rst_aux_ida);
-
-struct meson_reset_adev {
- struct auxiliary_device adev;
- struct regmap *map;
+static const struct meson_reset_param meson_a1_audio_param = {
+ .reset_ops = &meson_reset_toggle_ops,
+ .reset_num = 32,
+ .level_offset = 0x28,
};
-#define to_meson_reset_adev(_adev) \
- container_of((_adev), struct meson_reset_adev, adev)
+static const struct meson_reset_param meson_a1_audio_vad_param = {
+ .reset_ops = &meson_reset_toggle_ops,
+ .reset_num = 6,
+ .level_offset = 0x8,
+};
static const struct meson_reset_param meson_g12a_audio_param = {
.reset_ops = &meson_reset_toggle_ops,
@@ -40,6 +40,12 @@ static const struct meson_reset_param meson_sm1_audio_param = {
static const struct auxiliary_device_id meson_reset_aux_ids[] = {
{
+ .name = "a1-audio-clkc.rst-a1",
+ .driver_data = (kernel_ulong_t)&meson_a1_audio_param,
+ }, {
+ .name = "a1-audio-clkc.rst-a1-vad",
+ .driver_data = (kernel_ulong_t)&meson_a1_audio_vad_param,
+ }, {
.name = "axg-audio-clkc.rst-g12a",
.driver_data = (kernel_ulong_t)&meson_g12a_audio_param,
}, {
@@ -54,10 +60,13 @@ static int meson_reset_aux_probe(struct auxiliary_device *adev,
{
const struct meson_reset_param *param =
(const struct meson_reset_param *)(id->driver_data);
- struct meson_reset_adev *raux =
- to_meson_reset_adev(adev);
+ struct regmap *map;
+
+ map = dev_get_regmap(adev->dev.parent, NULL);
+ if (!map)
+ return -EINVAL;
- return meson_reset_controller_register(&adev->dev, raux->map, param);
+ return meson_reset_controller_register(&adev->dev, map, param);
}
static struct auxiliary_driver meson_reset_aux_driver = {
@@ -66,70 +75,6 @@ static struct auxiliary_driver meson_reset_aux_driver = {
};
module_auxiliary_driver(meson_reset_aux_driver);
-static void meson_rst_aux_release(struct device *dev)
-{
- struct auxiliary_device *adev = to_auxiliary_dev(dev);
- struct meson_reset_adev *raux =
- to_meson_reset_adev(adev);
-
- ida_free(&meson_rst_aux_ida, adev->id);
- kfree(raux);
-}
-
-static void meson_rst_aux_unregister_adev(void *_adev)
-{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
-}
-
-int devm_meson_rst_aux_register(struct device *dev,
- struct regmap *map,
- const char *adev_name)
-{
- struct meson_reset_adev *raux;
- struct auxiliary_device *adev;
- int ret;
-
- raux = kzalloc(sizeof(*raux), GFP_KERNEL);
- if (!raux)
- return -ENOMEM;
-
- ret = ida_alloc(&meson_rst_aux_ida, GFP_KERNEL);
- if (ret < 0)
- goto raux_free;
-
- raux->map = map;
-
- adev = &raux->adev;
- adev->id = ret;
- adev->name = adev_name;
- adev->dev.parent = dev;
- adev->dev.release = meson_rst_aux_release;
- device_set_of_node_from_dev(&adev->dev, dev);
-
- ret = auxiliary_device_init(adev);
- if (ret)
- goto ida_free;
-
- ret = __auxiliary_device_add(adev, dev->driver->name);
- if (ret) {
- auxiliary_device_uninit(adev);
- return ret;
- }
-
- return devm_add_action_or_reset(dev, meson_rst_aux_unregister_adev,
- adev);
-
-ida_free:
- ida_free(&meson_rst_aux_ida, adev->id);
-raux_free:
- kfree(raux);
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_meson_rst_aux_register);
-
MODULE_DESCRIPTION("Amlogic Meson Reset Auxiliary driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 12d0535a874b..8a7f167e405e 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -176,6 +176,7 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
vdev->dev.parent = dev;
priv->vdev = vdev;
+ device_set_of_node_from_dev(&vdev->dev, dev);
error = platform_device_add(vdev);
if (error)
goto err_device_put;
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 6da47a65af61..28e92fad0ca1 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -56,7 +56,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
block->tag_set.nr_hw_queues = nr_hw_queues;
block->tag_set.queue_depth = queue_depth;
- block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
block->tag_set.numa_node = NUMA_NO_NODE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
if (rc)
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 3fcfe029db1b..91bbe9d2e5ac 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -461,7 +461,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
bdev->tag_set.cmd_size = sizeof(blk_status_t);
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
- bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
bdev->tag_set.numa_node = NUMA_NO_NODE;
ret = blk_mq_alloc_tag_set(&bdev->tag_set);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6c91e422927f..07a6e8a7f05a 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -85,13 +85,6 @@ typedef unsigned int sclp_cmdw_t;
typedef u64 sccb_mask_t;
-struct sccb_header {
- u16 length;
- u8 function_code;
- u8 control_mask[3];
- u16 response_code;
-} __attribute__((packed));
-
struct init_sccb {
struct sccb_header header;
u16 _reserved;
@@ -196,7 +189,9 @@ struct read_info_sccb {
u8 byte_134; /* 134 */
u8 cpudirq; /* 135 */
u16 cbl; /* 136-137 */
- u8 _pad_138[EXT_SCCB_READ_SCP - 138];
+ u8 byte_138; /* 138 */
+ u8 byte_139; /* 139 */
+ u8 _pad_140[EXT_SCCB_READ_SCP - 140];
} __packed __aligned(PAGE_SIZE);
struct read_storage_sccb {
@@ -238,13 +233,6 @@ struct gds_vector {
u16 gds_id;
} __attribute__((packed));
-struct evbuf_header {
- u16 length;
- u8 type;
- u8 flags;
- u16 _reserved;
-} __attribute__((packed));
-
struct sclp_req {
struct list_head list; /* list_head for request queueing. */
sclp_cmdw_t command; /* sclp command to execute */
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index f56ea9b60e08..ae5d28987177 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -128,7 +128,7 @@ out:
}
static ssize_t sysfs_ofb_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
int rc;
@@ -142,7 +142,7 @@ static const struct bin_attribute ofb_bin_attr = {
.name = "event_data",
.mode = S_IWUSR,
},
- .write = sysfs_ofb_data_write,
+ .write_new = sysfs_ofb_data_write,
};
#endif
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 29156455970e..d9d6edaf8de8 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -55,6 +55,7 @@ static void __init sclp_early_facilities_detect(void)
if (sccb->fac91 & 0x40)
get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_GUEST;
sclp.has_diag204_bif = !!(sccb->fac98 & 0x80);
+ sclp.has_diag310 = !!(sccb->fac91 & 0x80);
if (sccb->cpuoff > 134) {
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
sclp.has_diag320 = !!(sccb->byte_134 & 0x04);
@@ -64,6 +65,8 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_sipl = !!(sccb->cbl & 0x4000);
sclp.has_sipl_eckd = !!(sccb->cbl & 0x2000);
}
+ if (sccb->cpuoff > 139)
+ sclp.has_diag324 = !!(sccb->byte_139 & 0x80);
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index c3466a8c56bb..56400886f7fc 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -24,30 +24,11 @@
#define SCLP_ATYPE_PCI 2
-#define SCLP_ERRNOTIFY_AQ_RESET 0
-#define SCLP_ERRNOTIFY_AQ_REPAIR 1
-#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
-#define SCLP_ERRNOTIFY_AQ_OPTICS_DATA 3
-
static DEFINE_MUTEX(sclp_pci_mutex);
static struct sclp_register sclp_pci_event = {
.send_mask = EVTYP_ERRNOTIFY_MASK,
};
-struct err_notify_evbuf {
- struct evbuf_header header;
- u8 action;
- u8 atype;
- u32 fh;
- u32 fid;
- u8 data[];
-} __packed;
-
-struct err_notify_sccb {
- struct sccb_header header;
- struct err_notify_evbuf evbuf;
-} __packed;
-
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
index c2dc9aadb7d2..8524c14affed 100644
--- a/drivers/s390/char/sclp_sd.c
+++ b/drivers/s390/char/sclp_sd.c
@@ -476,7 +476,7 @@ static struct kobj_type sclp_sd_file_ktype = {
* on EOF.
*/
static ssize_t data_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buffer,
+ const struct bin_attribute *attr, char *buffer,
loff_t off, size_t size)
{
struct sclp_sd_file *sd_file = to_sd_file(kobj);
@@ -539,7 +539,7 @@ static __init struct sclp_sd_file *sclp_sd_file_create(const char *name, u8 di)
sysfs_bin_attr_init(&sd_file->data_attr);
sd_file->data_attr.attr.name = "data";
sd_file->data_attr.attr.mode = 0444;
- sd_file->data_attr.read = data_read;
+ sd_file->data_attr.read_new = data_read;
rc = sysfs_create_bin_file(&sd_file->kobj, &sd_file->data_attr);
if (rc) {
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index acd6790dba4d..61c07b4a0fe8 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -445,7 +445,7 @@ struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
- return cdev->private->dma_area->senseid.ciw + ciw_cnt;
+ return &cdev->private->dma_area->senseid.ciw[ciw_cnt];
return NULL;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 641f0dbb65a9..4bd4c00c9c0c 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -210,11 +210,10 @@ struct qdio_q {
qdio_handler_t (*handler);
struct qdio_irq *irq_ptr;
+
+ /* memory page (PAGE_SIZE) used to place slib and sl on */
+ void *sl_page;
struct sl *sl;
- /*
- * A page is allocated under this pointer and used for slib and sl.
- * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
- */
struct slib *slib;
} __attribute__ ((aligned(256)));
@@ -266,7 +265,7 @@ struct qdio_irq {
#define is_thinint_irq(irq) \
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
- css_general_characteristics.aif_osa)
+ css_general_characteristics.aif_qdio)
#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 99c0fd23022d..ea09aadaae4e 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -83,7 +83,7 @@ static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
for (i = 0; i < count; i++) {
q = queues[i];
- free_page((unsigned long) q->slib);
+ free_page((unsigned long)q->sl_page);
kmem_cache_free(qdio_q_cache, q);
}
}
@@ -109,12 +109,16 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
- q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
- if (!q->slib) {
+ q->sl_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!q->sl_page) {
kmem_cache_free(qdio_q_cache, q);
__qdio_free_queues(irq_ptr_qs, i);
return -ENOMEM;
}
+ q->slib = q->sl_page;
+ /* As per architecture: SLIB is 2K bytes long, and SL 1K. */
+ q->sl = (struct sl *)(q->slib + 1);
+
irq_ptr_qs[i] = q;
}
return 0;
@@ -142,11 +146,15 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
- struct slib *slib = q->slib;
+ struct slib *const slib = q->slib;
+ void *const sl_page = q->sl_page;
+ struct sl *const sl = q->sl;
/* queue must be cleared for qdio_establish */
memset(q, 0, sizeof(*q));
- memset(slib, 0, PAGE_SIZE);
+ memset(sl_page, 0, PAGE_SIZE);
+ q->sl_page = sl_page;
+ q->sl = sl;
q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
@@ -161,7 +169,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
int j;
DBF_HEX(&q, sizeof(void *));
- q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
@@ -423,7 +430,7 @@ int __init qdio_setup_init(void)
/* Check for OSA/FCP thin interrupts (bit 67). */
DBF_EVENT("thinint:%1d",
- (css_general_characteristics.aif_osa) ? 1 : 0);
+ (css_general_characteristics.aif_qdio) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c
index a4eb45803f5e..57edc97bafd2 100644
--- a/drivers/s390/crypto/pkey_sysfs.c
+++ b/drivers/s390/crypto/pkey_sysfs.c
@@ -184,7 +184,7 @@ static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf,
static ssize_t protkey_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -194,7 +194,7 @@ static ssize_t protkey_aes_128_read(struct file *filp,
static ssize_t protkey_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -204,7 +204,7 @@ static ssize_t protkey_aes_192_read(struct file *filp,
static ssize_t protkey_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -214,7 +214,7 @@ static ssize_t protkey_aes_256_read(struct file *filp,
static ssize_t protkey_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -224,7 +224,7 @@ static ssize_t protkey_aes_128_xts_read(struct file *filp,
static ssize_t protkey_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -234,7 +234,7 @@ static ssize_t protkey_aes_256_xts_read(struct file *filp,
static ssize_t protkey_aes_xts_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -244,7 +244,7 @@ static ssize_t protkey_aes_xts_128_read(struct file *filp,
static ssize_t protkey_aes_xts_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -254,7 +254,7 @@ static ssize_t protkey_aes_xts_256_read(struct file *filp,
static ssize_t protkey_hmac_512_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -264,7 +264,7 @@ static ssize_t protkey_hmac_512_read(struct file *filp,
static ssize_t protkey_hmac_1024_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -272,17 +272,17 @@ static ssize_t protkey_hmac_1024_read(struct file *filp,
buf, off, count);
}
-static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
-static BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
-static BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
-static BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
-
-static struct bin_attribute *protkey_attrs[] = {
+static const BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
+static const BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
+static const BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
+static const BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
+
+static const struct bin_attribute *const protkey_attrs[] = {
&bin_attr_protkey_aes_128,
&bin_attr_protkey_aes_192,
&bin_attr_protkey_aes_256,
@@ -295,9 +295,9 @@ static struct bin_attribute *protkey_attrs[] = {
NULL
};
-static struct attribute_group protkey_attr_group = {
- .name = "protkey",
- .bin_attrs = protkey_attrs,
+static const struct attribute_group protkey_attr_group = {
+ .name = "protkey",
+ .bin_attrs_new = protkey_attrs,
};
/*
@@ -341,7 +341,7 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
static ssize_t ccadata_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -351,7 +351,7 @@ static ssize_t ccadata_aes_128_read(struct file *filp,
static ssize_t ccadata_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -361,7 +361,7 @@ static ssize_t ccadata_aes_192_read(struct file *filp,
static ssize_t ccadata_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -371,7 +371,7 @@ static ssize_t ccadata_aes_256_read(struct file *filp,
static ssize_t ccadata_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -381,7 +381,7 @@ static ssize_t ccadata_aes_128_xts_read(struct file *filp,
static ssize_t ccadata_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -389,13 +389,13 @@ static ssize_t ccadata_aes_256_xts_read(struct file *filp,
off, count);
}
-static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
-static struct bin_attribute *ccadata_attrs[] = {
+static const struct bin_attribute *const ccadata_attrs[] = {
&bin_attr_ccadata_aes_128,
&bin_attr_ccadata_aes_192,
&bin_attr_ccadata_aes_256,
@@ -404,9 +404,9 @@ static struct bin_attribute *ccadata_attrs[] = {
NULL
};
-static struct attribute_group ccadata_attr_group = {
- .name = "ccadata",
- .bin_attrs = ccadata_attrs,
+static const struct attribute_group ccadata_attr_group = {
+ .name = "ccadata",
+ .bin_attrs_new = ccadata_attrs,
};
#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
@@ -455,7 +455,7 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
static ssize_t ccacipher_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -465,7 +465,7 @@ static ssize_t ccacipher_aes_128_read(struct file *filp,
static ssize_t ccacipher_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -475,7 +475,7 @@ static ssize_t ccacipher_aes_192_read(struct file *filp,
static ssize_t ccacipher_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -485,7 +485,7 @@ static ssize_t ccacipher_aes_256_read(struct file *filp,
static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -495,7 +495,7 @@ static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -503,13 +503,13 @@ static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
off, count);
}
-static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
-static struct bin_attribute *ccacipher_attrs[] = {
+static const struct bin_attribute *const ccacipher_attrs[] = {
&bin_attr_ccacipher_aes_128,
&bin_attr_ccacipher_aes_192,
&bin_attr_ccacipher_aes_256,
@@ -518,9 +518,9 @@ static struct bin_attribute *ccacipher_attrs[] = {
NULL
};
-static struct attribute_group ccacipher_attr_group = {
- .name = "ccacipher",
- .bin_attrs = ccacipher_attrs,
+static const struct attribute_group ccacipher_attr_group = {
+ .name = "ccacipher",
+ .bin_attrs_new = ccacipher_attrs,
};
/*
@@ -570,7 +570,7 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
static ssize_t ep11_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -580,7 +580,7 @@ static ssize_t ep11_aes_128_read(struct file *filp,
static ssize_t ep11_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -590,7 +590,7 @@ static ssize_t ep11_aes_192_read(struct file *filp,
static ssize_t ep11_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -600,7 +600,7 @@ static ssize_t ep11_aes_256_read(struct file *filp,
static ssize_t ep11_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -610,7 +610,7 @@ static ssize_t ep11_aes_128_xts_read(struct file *filp,
static ssize_t ep11_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -618,13 +618,13 @@ static ssize_t ep11_aes_256_xts_read(struct file *filp,
off, count);
}
-static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-static struct bin_attribute *ep11_attrs[] = {
+static const struct bin_attribute *const ep11_attrs[] = {
&bin_attr_ep11_aes_128,
&bin_attr_ep11_aes_192,
&bin_attr_ep11_aes_256,
@@ -633,9 +633,9 @@ static struct bin_attribute *ep11_attrs[] = {
NULL
};
-static struct attribute_group ep11_attr_group = {
+static const struct attribute_group ep11_attr_group = {
.name = "ep11",
- .bin_attrs = ep11_attrs,
+ .bin_attrs_new = ep11_attrs,
};
const struct attribute_group *pkey_attr_groups[] = {
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
index 5533bdcb0458..c424d36e89a6 100644
--- a/drivers/scsi/cxlflash/Kconfig
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -4,10 +4,12 @@
#
config CXLFLASH
- tristate "Support for IBM CAPI Flash"
+ tristate "Support for IBM CAPI Flash (DEPRECATED)"
depends on PCI && SCSI && (CXL || OCXL) && EEH
select IRQ_POLL
- default m
help
+ The cxlflash driver is deprecated and will be removed in a future
+ kernel release.
+
Allows CAPI Accelerated IO to Flash
If unsure, say N.
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 60d62b93d624..62806f5e32e6 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3651,6 +3651,8 @@ static int cxlflash_probe(struct pci_dev *pdev,
int rc = 0;
int k;
+ dev_err_once(&pdev->dev, "DEPRECATION: cxlflash is deprecated and will be removed in a future kernel release\n");
+
dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
__func__, pdev->irq);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index adec0df24bc4..1cb517f731f4 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -16,7 +16,6 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/if_ether.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -601,7 +600,7 @@ void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
return;
}
- blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET);
+ blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET);
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index a44768bceb9a..4101447bb8eb 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -9,7 +9,6 @@
#include <linux/acpi.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dmapool.h>
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 5db931663ae4..35501d0aa655 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3328,8 +3328,8 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
- BASE_VECTORS_V3_HW);
+ blk_mq_map_hw_queues(qmap, hisi_hba->dev,
+ BASE_VECTORS_V3_HW);
qoff += qmap->nr_queues;
}
}
@@ -3345,7 +3345,7 @@ static const struct scsi_host_template sht_v3_hw = {
.slave_alloc = hisi_sas_slave_alloc,
.shost_groups = host_v3_hw_groups,
.sdev_groups = sdev_groups_v3_hw,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
.mq_poll = queue_complete_v3_hw,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 50f1dcb6d584..49abd7dd75a7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,7 +37,6 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -3193,7 +3192,7 @@ static void megasas_map_queues(struct Scsi_Host *shost)
map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
map->nr_queues = instance->msix_vectors - offset;
map->queue_offset = 0;
- blk_mq_pci_map_queues(map, instance->pdev, offset);
+ blk_mq_map_hw_queues(map, &instance->pdev->dev, offset);
qoff += map->nr_queues;
offset += map->nr_queues;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0c3e1ac076b5..0d72b5f1b69d 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -12,7 +12,6 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/delay.h>
#include <linux/dmapool.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 1bef88130d0c..1e8735538b23 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -4042,7 +4042,7 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset);
else
blk_mq_map_queues(map);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index f2a55aa5fe65..9599d7a50028 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -53,7 +53,6 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/raid_class.h>
-#include <linux/blk-mq-pci.h>
#include <linux/unaligned.h>
#include "mpt3sas_base.h"
@@ -11890,7 +11889,7 @@ static void scsih_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, ioc->pdev, offset);
+ blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
else
blk_mq_map_queues(map);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f8c81e53e93f..2a7822fd613e 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -105,7 +105,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (pm8001_ha->number_of_intr > 1) {
- blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+ blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1);
return;
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 42c7b3f7afbf..d3bd8683f344 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -56,7 +56,6 @@
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 8f4cc136a9c9..8ee2e337c9e1 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -8,7 +8,6 @@
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@@ -841,7 +840,7 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
{
struct scsi_qla_host *vha = lport->private;
- blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
+ blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7ab717ed7232..31535beaaa16 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,7 +13,6 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
-#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
#include <linux/crash_dump.h>
#include <linux/trace_events.h>
@@ -8071,7 +8070,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost)
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
+ blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev,
+ vha->irq_offset);
}
struct scsi_host_template qla2xxx_driver_template = {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index adee6f60c966..4411426a7894 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -210,6 +210,9 @@ static int scsi_check_passthrough(struct scsi_cmnd *scmd,
struct scsi_sense_hdr sshdr;
enum sam_status status;
+ if (!scmd->result)
+ return 0;
+
if (!failures)
return 0;
@@ -2065,9 +2068,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->queue_depth = shost->can_queue;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev);
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
- tag_set->flags |=
- BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
+ if (shost->hostt->tag_alloc_policy_rr)
+ tag_set->flags |= BLK_MQ_F_TAG_RR;
if (shost->queuecommand_may_block)
tag_set->flags |= BLK_MQ_F_BLOCKING;
tag_set->driver_data = shost;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fde7de3b1e55..9b47f91c5b97 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -4104,7 +4104,7 @@ iscsi_if_rx(struct sk_buff *skb)
}
do {
/*
- * special case for GET_STATS:
+ * special case for GET_STATS, GET_CHAP and GET_HOST_STATS:
* on success - sending reply and stats from
* inside of if_recv_msg(),
* on error - fall through.
@@ -4113,6 +4113,8 @@ iscsi_if_rx(struct sk_buff *skb)
break;
if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
break;
+ if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err)
+ break;
err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
ev, sizeof(*ev));
if (err == -EAGAIN && --retries < 0) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8947dab132d7..950d8c9fb884 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -177,9 +177,8 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
lim = queue_limits_start_update(sdkp->disk->queue);
sd_set_flush_flag(sdkp, &lim);
- blk_mq_freeze_queue(sdkp->disk->queue);
- ret = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ ret = queue_limits_commit_update_frozen(sdkp->disk->queue,
+ &lim);
if (ret)
return ret;
return count;
@@ -483,9 +482,7 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_discard(sdkp, &lim, mode);
- blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
if (err)
return err;
return count;
@@ -594,9 +591,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_write_same(sdkp, &lim);
- blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
if (err)
return err;
return count;
@@ -814,14 +809,14 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
- if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ if (bio_integrity_flagged(bio, BIP_CHECK_GUARD))
scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
}
if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
- if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ if (bio_integrity_flagged(bio, BIP_CHECK_REFTAG))
scmd->prot_flags |= SCSI_PROT_REF_CHECK;
}
@@ -996,6 +991,7 @@ static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim)
lim->atomic_write_hw_boundary = 0;
lim->atomic_write_hw_unit_min = unit_min * logical_block_size;
lim->atomic_write_hw_unit_max = unit_max * logical_block_size;
+ lim->features |= BLK_FEAT_ATOMIC_WRITES;
}
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
@@ -3803,9 +3799,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_config_write_same(sdkp, &lim);
kfree(buffer);
- blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
if (err)
return err;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 870f37b70546..04fb24d77e9b 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -19,7 +19,6 @@
#include <linux/bcd.h>
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -6547,10 +6546,10 @@ static void pqi_map_queues(struct Scsi_Host *shost)
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
if (!ctrl_info->disable_managed_interrupts)
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ctrl_info->pci_dev, 0);
+ blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ &ctrl_info->pci_dev->dev, 0);
else
- return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 198bec87bb8e..b17796d5ee66 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -797,10 +797,7 @@ static int get_sectorsize(struct scsi_cd *cd)
lim = queue_limits_start_update(q);
lim.logical_block_size = sector_size;
- blk_mq_freeze_queue(q);
- err = queue_limits_commit_update(q, &lim);
- blk_mq_unfreeze_queue(q);
- return err;
+ return queue_limits_commit_update_frozen(q, &lim);
}
static int get_capabilities(struct scsi_cd *cd)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8471f38b730e..60be1a0c6183 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -29,7 +29,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_devinfo.h>
#include <linux/seqlock.h>
-#include <linux/blk-mq-virtio.h>
#include "sd.h"
@@ -746,7 +745,7 @@ static void virtscsi_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(map);
else
- blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
+ blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
}
}
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 2a42b28931c9..298b542dd1c0 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -399,7 +399,7 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = {
static int __init atmel_soc_device_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
if (!of_match_node(at91_soc_allowed_list, np))
return 0;
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index b7e8e5ec884c..f4d3c2146f4f 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -108,14 +108,12 @@ static int on_all_cpus(int (*fn)(void))
.fn = fn,
.started = ATOMIC_INIT(0)
};
- struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
- "hotpotato%d", cpu);
+ struct task_struct *k = kthread_run_on_cpu(bstrap_fn, &bstrap,
+ cpu, "hotpotato%d");
int ret;
if (IS_ERR(k))
return -ENOMEM;
- kthread_bind(k, cpu);
- wake_up_process(k);
/*
* If we call kthread_stop() before the "wake up" has had an
* effect, then the thread may exit with -EINTR without ever
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 3ad321ca608a..ca6a5fa1618f 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -3,4 +3,4 @@ ifeq ($(CONFIG_ARM),y)
obj-$(CONFIG_ARCH_MXC) += soc-imx.o
endif
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
-obj-$(CONFIG_SOC_IMX9) += imx93-src.o
+obj-$(CONFIG_SOC_IMX9) += imx93-src.o soc-imx9.o
diff --git a/drivers/soc/imx/soc-imx9.c b/drivers/soc/imx/soc-imx9.c
new file mode 100644
index 000000000000..b46d22cf0212
--- /dev/null
+++ b/drivers/soc/imx/soc-imx9.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define IMX_SIP_GET_SOC_INFO 0xc2000006
+#define SOC_ID(x) (((x) & 0xFFFF) >> 8)
+#define SOC_REV_MAJOR(x) ((((x) >> 28) & 0xF) - 0x9)
+#define SOC_REV_MINOR(x) (((x) >> 24) & 0xF)
+
+static int imx9_soc_probe(struct platform_device *pdev)
+{
+ struct soc_device_attribute *attr;
+ struct arm_smccc_res res;
+ struct soc_device *sdev;
+ u32 soc_id, rev_major, rev_minor;
+ u64 uid127_64, uid63_0;
+ int err;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ err = of_property_read_string(of_root, "model", &attr->machine);
+ if (err) {
+ pr_err("%s: missing model property: %d\n", __func__, err);
+ goto attr;
+ }
+
+ attr->family = kasprintf(GFP_KERNEL, "Freescale i.MX");
+
+ /*
+ * Retrieve the soc id, rev & uid info:
+ * res.a1[31:16]: soc revision;
+ * res.a1[15:0]: soc id;
+ * res.a2: uid[127:64];
+ * res.a3: uid[63:0];
+ */
+ arm_smccc_smc(IMX_SIP_GET_SOC_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_err("%s: SMC failed: 0x%lx\n", __func__, res.a0);
+ err = -EINVAL;
+ goto family;
+ }
+
+ soc_id = SOC_ID(res.a1);
+ rev_major = SOC_REV_MAJOR(res.a1);
+ rev_minor = SOC_REV_MINOR(res.a1);
+
+ attr->soc_id = kasprintf(GFP_KERNEL, "i.MX%2x", soc_id);
+ attr->revision = kasprintf(GFP_KERNEL, "%d.%d", rev_major, rev_minor);
+
+ uid127_64 = res.a2;
+ uid63_0 = res.a3;
+ attr->serial_number = kasprintf(GFP_KERNEL, "%016llx%016llx", uid127_64, uid63_0);
+
+ sdev = soc_device_register(attr);
+ if (IS_ERR(sdev)) {
+ err = PTR_ERR(sdev);
+ pr_err("%s failed to register SoC as a device: %d\n", __func__, err);
+ goto serial_number;
+ }
+
+ return 0;
+
+serial_number:
+ kfree(attr->serial_number);
+ kfree(attr->revision);
+ kfree(attr->soc_id);
+family:
+ kfree(attr->family);
+attr:
+ kfree(attr);
+ return err;
+}
+
+static __maybe_unused const struct of_device_id imx9_soc_match[] = {
+ { .compatible = "fsl,imx93", },
+ { .compatible = "fsl,imx95", },
+ { }
+};
+
+#define IMX_SOC_DRIVER "imx9-soc"
+
+static struct platform_driver imx9_soc_driver = {
+ .probe = imx9_soc_probe,
+ .driver = {
+ .name = IMX_SOC_DRIVER,
+ },
+};
+
+static int __init imx9_soc_init(void)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ /* No match means it is not an i.MX 9 series SoC, do nothing. */
+ if (!of_match_node(imx9_soc_match, of_root))
+ return 0;
+
+ ret = platform_driver_register(&imx9_soc_driver);
+ if (ret) {
+ pr_err("failed to register imx9_soc platform driver: %d\n", ret);
+ return ret;
+ }
+
+ pdev = platform_device_register_simple(IMX_SOC_DRIVER, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("failed to register imx9_soc platform device: %ld\n", PTR_ERR(pdev));
+ platform_driver_unregister(&imx9_soc_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+device_initcall(imx9_soc_init);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP i.MX9 SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index d08bfc8ef7be..104a5f9bfd26 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -69,14 +69,11 @@ static int litex_check_csr_access(void __iomem *reg_addr)
struct litex_soc_ctrl_device {
void __iomem *base;
- struct notifier_block reset_nb;
};
-static int litex_reset_handler(struct notifier_block *this, unsigned long mode,
- void *cmd)
+static int litex_reset_handler(struct sys_off_data *data)
{
- struct litex_soc_ctrl_device *soc_ctrl_dev =
- container_of(this, struct litex_soc_ctrl_device, reset_nb);
+ struct litex_soc_ctrl_device *soc_ctrl_dev = data->cb_data;
litex_write32(soc_ctrl_dev->base + RESET_REG_OFF, RESET_REG_VALUE);
return NOTIFY_DONE;
@@ -105,11 +102,9 @@ static int litex_soc_ctrl_probe(struct platform_device *pdev)
if (error)
return error;
- platform_set_drvdata(pdev, soc_ctrl_dev);
-
- soc_ctrl_dev->reset_nb.notifier_call = litex_reset_handler;
- soc_ctrl_dev->reset_nb.priority = 128;
- error = register_restart_handler(&soc_ctrl_dev->reset_nb);
+ error = devm_register_restart_handler(&pdev->dev,
+ litex_reset_handler,
+ soc_ctrl_dev);
if (error) {
dev_warn(&pdev->dev, "cannot register restart handler: %d\n",
error);
@@ -118,20 +113,12 @@ static int litex_soc_ctrl_probe(struct platform_device *pdev)
return 0;
}
-static void litex_soc_ctrl_remove(struct platform_device *pdev)
-{
- struct litex_soc_ctrl_device *soc_ctrl_dev = platform_get_drvdata(pdev);
-
- unregister_restart_handler(&soc_ctrl_dev->reset_nb);
-}
-
static struct platform_driver litex_soc_ctrl_driver = {
.driver = {
.name = "litex-soc-controller",
.of_match_table = litex_soc_ctrl_of_match,
},
.probe = litex_soc_ctrl_probe,
- .remove = litex_soc_ctrl_remove,
};
module_platform_driver(litex_soc_ctrl_driver);
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
index 2a1adcb87d4e..f54c966138b5 100644
--- a/drivers/soc/mediatek/mtk-devapc.c
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -273,23 +273,31 @@ static int mtk_devapc_probe(struct platform_device *pdev)
return -EINVAL;
devapc_irq = irq_of_parse_and_map(node, 0);
- if (!devapc_irq)
- return -EINVAL;
+ if (!devapc_irq) {
+ ret = -EINVAL;
+ goto err;
+ }
ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock");
- if (IS_ERR(ctx->infra_clk))
- return -EINVAL;
+ if (IS_ERR(ctx->infra_clk)) {
+ ret = -EINVAL;
+ goto err;
+ }
ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
IRQF_TRIGGER_NONE, "devapc", ctx);
if (ret)
- return ret;
+ goto err;
platform_set_drvdata(pdev, ctx);
start_devapc(ctx);
return 0;
+
+err:
+ iounmap(ctx->infra_base);
+ return ret;
}
static void mtk_devapc_remove(struct platform_device *pdev)
@@ -297,6 +305,7 @@ static void mtk_devapc_remove(struct platform_device *pdev)
struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
stop_devapc(ctx);
+ iounmap(ctx->infra_base);
}
static struct platform_driver mtk_devapc_driver = {
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 9fdc0ef79202..0bcd85826375 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -2518,8 +2518,8 @@ static int pwrap_probe(struct platform_device *pdev)
}
}
- ret = devm_clk_bulk_get_all_enable(wrp->dev, &clk);
- if (ret)
+ ret = devm_clk_bulk_get_all_enabled(wrp->dev, &clk);
+ if (ret < 0)
return dev_err_probe(wrp->dev, ret,
"failed to get clocks\n");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 74b9121240f8..58e63cf0036b 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -139,7 +139,7 @@ config QCOM_RAMP_CTRL
config QCOM_RMTFS_MEM
tristate "Qualcomm Remote Filesystem memory driver"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
select QCOM_SCM
help
The Qualcomm remote filesystem memory driver is used for allocating
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 32c3bc887cef..56823b6a2fac 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -142,6 +142,7 @@ struct qcom_llcc_config {
bool skip_llcc_cfg;
bool no_edac;
bool irq_configured;
+ bool no_broadcast_register;
};
struct qcom_sct_config {
@@ -154,6 +155,38 @@ enum llcc_reg_offset {
LLCC_COMMON_STATUS0,
};
+static const struct llcc_slice_config ipq5424_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0xFFFF,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ .stale_cap_en = true,
+ .alloc_oneway_en = true,
+ .ovcap_en = true,
+ .ovcap_prio = true,
+ .vict_prio = true,
+ },
+ {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 256,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xF000,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ .stale_cap_en = true,
+ },
+};
+
static const struct llcc_slice_config sa8775p_data[] = {
{
.usecase_id = LLCC_CPUSS,
@@ -3004,6 +3037,7 @@ static const struct llcc_slice_config x1e80100_data[] = {
.fixed_size = true,
.bonus_ways = 0xfff,
.cache_mode = 0,
+ .activate_on_init = true,
}, {
.usecase_id = LLCC_CAMEXP0,
.slice_id = 4,
@@ -3185,6 +3219,16 @@ static const struct qcom_llcc_config qdu1000_cfg[] = {
},
};
+static const struct qcom_llcc_config ipq5424_cfg[] = {
+ {
+ .sct_data = ipq5424_data,
+ .size = ARRAY_SIZE(ipq5424_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .no_broadcast_register = true,
+ },
+};
+
static const struct qcom_llcc_config sa8775p_cfg[] = {
{
.sct_data = sa8775p_data,
@@ -3360,6 +3404,11 @@ static const struct qcom_sct_config qdu1000_cfgs = {
.num_config = ARRAY_SIZE(qdu1000_cfg),
};
+static const struct qcom_sct_config ipq5424_cfgs = {
+ .llcc_config = ipq5424_cfg,
+ .num_config = ARRAY_SIZE(ipq5424_cfg),
+};
+
static const struct qcom_sct_config sa8775p_cfgs = {
.llcc_config = sa8775p_cfg,
.num_config = ARRAY_SIZE(sa8775p_cfg),
@@ -3957,8 +4006,12 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
if (IS_ERR(drv_data->bcast_regmap)) {
- ret = PTR_ERR(drv_data->bcast_regmap);
- goto err;
+ if (cfg->no_broadcast_register) {
+ drv_data->bcast_regmap = regmap;
+ } else {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
}
/* Extract version of the IP */
@@ -4029,6 +4082,7 @@ err:
}
static const struct of_device_id qcom_llcc_of_match[] = {
+ { .compatible = "qcom,ipq5424-llcc", .data = &ipq5424_cfgs},
{ .compatible = "qcom,qcs615-llcc", .data = &qcs615_cfgs},
{ .compatible = "qcom,qcs8300-llcc", .data = &qcs8300_cfgs},
{ .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs},
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index caf3f63d940e..052c292eeda6 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -4,6 +4,7 @@
* Copyright (c) 2022, Linaro Ltd
*/
#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -100,15 +101,13 @@ void pmic_glink_client_register(struct pmic_glink_client *client)
struct pmic_glink *pg = client->pg;
unsigned long flags;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
spin_lock_irqsave(&pg->client_lock, flags);
list_add(&client->node, &pg->clients);
client->pdr_notify(client->priv, pg->client_state);
spin_unlock_irqrestore(&pg->client_lock, flags);
- mutex_unlock(&pg->state_lock);
-
}
EXPORT_SYMBOL_GPL(pmic_glink_client_register);
@@ -119,26 +118,25 @@ int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
unsigned long start;
int ret;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
if (!pg->ept) {
- ret = -ECONNRESET;
- } else {
- start = jiffies;
- for (;;) {
- ret = rpmsg_send(pg->ept, data, len);
- if (ret != -EAGAIN)
- break;
-
- if (timeout_reached) {
- ret = -ETIMEDOUT;
- break;
- }
-
- usleep_range(1000, 5000);
- timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT);
+ return -ECONNRESET;
+ }
+
+ start = jiffies;
+ for (;;) {
+ ret = rpmsg_send(pg->ept, data, len);
+ if (ret != -EAGAIN)
+ break;
+
+ if (timeout_reached) {
+ ret = -ETIMEDOUT;
+ break;
}
+
+ usleep_range(1000, 5000);
+ timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT);
}
- mutex_unlock(&pg->state_lock);
return ret;
}
@@ -227,51 +225,42 @@ static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv)
{
struct pmic_glink *pg = priv;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->pdr_state = state;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
}
static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct pmic_glink *pg = __pmic_glink;
- int ret = 0;
- mutex_lock(&__pmic_glink_lock);
- if (!pg) {
- ret = dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
- goto out_unlock;
- }
+ guard(mutex)(&__pmic_glink_lock);
+ pg = __pmic_glink;
+ if (!pg)
+ return dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
dev_set_drvdata(&rpdev->dev, pg);
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->ept = rpdev->ept;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
-out_unlock:
- mutex_unlock(&__pmic_glink_lock);
- return ret;
+ return 0;
}
static void pmic_glink_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct pmic_glink *pg;
- mutex_lock(&__pmic_glink_lock);
+ guard(mutex)(&__pmic_glink_lock);
pg = __pmic_glink;
if (!pg)
- goto out_unlock;
+ return;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->ept = NULL;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
-out_unlock:
- mutex_unlock(&__pmic_glink_lock);
}
static const struct rpmsg_device_id pmic_glink_rpmsg_id_match[] = {
@@ -378,9 +367,8 @@ static void pmic_glink_remove(struct platform_device *pdev)
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
- mutex_lock(&__pmic_glink_lock);
+ guard(mutex)(&__pmic_glink_lock);
__pmic_glink = NULL;
- mutex_unlock(&__pmic_glink_lock);
}
static const unsigned long pmic_glink_sc8280xp_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 463b1c528831..bd06ce161804 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -5,6 +5,7 @@
*/
#include <linux/auxiliary_bus.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -114,7 +115,7 @@ static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cm
* The USBC_CMD_WRITE_REQ ack doesn't identify the request, so wait for
* one ack at a time.
*/
- mutex_lock(&altmode->lock);
+ guard(mutex)(&altmode->lock);
req.hdr.owner = cpu_to_le32(altmode->owner_id);
req.hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP);
@@ -125,18 +126,16 @@ static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cm
ret = pmic_glink_send(altmode->client, &req, sizeof(req));
if (ret) {
dev_err(altmode->dev, "failed to send altmode request: %#x (%d)\n", cmd, ret);
- goto out_unlock;
+ return ret;
}
left = wait_for_completion_timeout(&altmode->pan_ack, 5 * HZ);
if (!left) {
dev_err(altmode->dev, "timeout waiting for altmode request ack for: %#x\n", cmd);
- ret = -ETIMEDOUT;
+ return -ETIMEDOUT;
}
-out_unlock:
- mutex_unlock(&altmode->lock);
- return ret;
+ return 0;
}
static void pmic_glink_altmode_enable_dp(struct pmic_glink_altmode *altmode,
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 6e30f08761aa..154ca5beb471 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -553,6 +553,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm4250", .data = sm6115_domains, },
{ .compatible = "qcom,sm6115", .data = sm6115_domains, },
{ .compatible = "qcom,sm6350", .data = sm6350_domains, },
+ { .compatible = "qcom,sm7225", .data = sm6350_domains, },
{ .compatible = "qcom,sm7325", .data = sc7280_domains, },
{ .compatible = "qcom,sm8150", .data = sm8150_domains, },
{ .compatible = "qcom,sm8250", .data = sm8250_domains, },
@@ -561,6 +562,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm8550", .data = sm8550_domains, },
{ .compatible = "qcom,sm8650", .data = sm8550_domains, },
{ .compatible = "qcom,x1e80100", .data = x1e80100_domains, },
+ { .compatible = "qcom,x1p42100", .data = x1e80100_domains, },
{},
};
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 33603b8fd8f3..1b32469f2789 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -125,7 +125,7 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct class rmtfs_class = {
+static const struct class rmtfs_class = {
.name = "rmtfs",
};
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index d9bfac6c54fb..cc5be8019b6a 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -112,7 +112,8 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
if (args.args_count != 1) {
dev_err(dev, "invalid #qcom,smem-state-cells\n");
- return ERR_PTR(-EINVAL);
+ state = ERR_PTR(-EINVAL);
+ goto put;
}
state = of_node_to_state(args.np);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 62fadfe44a09..18d7f1be9093 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -451,6 +451,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(QCS9100) },
{ qcom_board_id(QCS8300) },
{ qcom_board_id(QCS8275) },
+ { qcom_board_id(QCS9075) },
{ qcom_board_id(QCS615) },
};
@@ -796,7 +797,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
if (!qs->attr.soc_id || !qs->attr.revision)
return -ENOMEM;
- if (offsetof(struct socinfo, serial_num) <= item_size) {
+ if (offsetofend(struct socinfo, serial_num) <= item_size) {
qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%u",
le32_to_cpu(info->serial_num));
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 9f7fe02310b9..6d2e135eed89 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -345,6 +345,11 @@ config ARCH_R9A09G011
help
This enables support for the Renesas RZ/V2M SoC.
+config ARCH_R9A09G047
+ bool "ARM64 Platform support for RZ/G3E"
+ help
+ This enables support for the Renesas RZ/G3E SoC variants.
+
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
select RENESAS_RZV2H_ICU
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index d8c53cec7f37..dd5256e5aae1 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -126,7 +126,7 @@ static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
if (ret)
return ret;
}
- return ret;
+ return 0;
}
static bool tensor_is_atomic(unsigned int reg)
diff --git a/drivers/soc/tegra/cbb/tegra-cbb.c b/drivers/soc/tegra/cbb/tegra-cbb.c
index 84ab46c9d9f5..6215c6a84fbe 100644
--- a/drivers/soc/tegra/cbb/tegra-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra-cbb.c
@@ -69,19 +69,12 @@ static int tegra_cbb_err_show(struct seq_file *file, void *data)
}
DEFINE_SHOW_ATTRIBUTE(tegra_cbb_err);
-static int tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
+static void tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
{
static struct dentry *root;
- if (!root) {
+ if (!root)
root = debugfs_create_file("tegra_cbb_err", 0444, NULL, cbb, &tegra_cbb_err_fops);
- if (IS_ERR_OR_NULL(root)) {
- pr_err("%s(): could not create debugfs node\n", __func__);
- return PTR_ERR(root);
- }
- }
-
- return 0;
}
void tegra_cbb_stall_enable(struct tegra_cbb *cbb)
@@ -148,13 +141,8 @@ int tegra_cbb_register(struct tegra_cbb *cbb)
{
int ret;
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- ret = tegra_cbb_err_debugfs_init(cbb);
- if (ret) {
- dev_err(cbb->dev, "failed to create debugfs\n");
- return ret;
- }
- }
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_cbb_err_debugfs_init(cbb);
/* register interrupt handler for errors due to different initiators */
ret = cbb->ops->interrupt_enable(cbb);
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
index 5cf0e8c34164..c74629af9bb5 100644
--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
@@ -277,7 +277,7 @@ static void tegra234_lookup_slave_timeout(struct seq_file *file, struct tegra234
* which timed out.
* a) Get block number from the index of set bit in
* <FABRIC>_SN_AXI2APB_<>_BLOCK_TMO_STATUS_0 register.
- * b) Get address of register repective to block number i.e.
+ * b) Get address of register respective to block number i.e.
* <FABRIC>_SN_AXI2APB_<>_BLOCK<index-set-bit>_TMO_0.
* c) Read the register in above step to get client_id which
* timed out as per the set bits.
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index eb14e5ff5a0a..e24ab5f7d2bf 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -647,15 +647,20 @@ static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
};
static const struct nvmem_keepout tegra234_fuse_keepouts[] = {
- { .start = 0x01c, .end = 0x0c8 },
- { .start = 0x12c, .end = 0x184 },
+ { .start = 0x01c, .end = 0x064 },
+ { .start = 0x084, .end = 0x0a0 },
+ { .start = 0x0a4, .end = 0x0c8 },
+ { .start = 0x12c, .end = 0x164 },
+ { .start = 0x16c, .end = 0x184 },
{ .start = 0x190, .end = 0x198 },
{ .start = 0x1a0, .end = 0x204 },
- { .start = 0x21c, .end = 0x250 },
- { .start = 0x25c, .end = 0x2f0 },
+ { .start = 0x21c, .end = 0x2f0 },
{ .start = 0x310, .end = 0x3d8 },
- { .start = 0x400, .end = 0x4f0 },
- { .start = 0x4f8, .end = 0x7e8 },
+ { .start = 0x400, .end = 0x420 },
+ { .start = 0x444, .end = 0x490 },
+ { .start = 0x4bc, .end = 0x4f0 },
+ { .start = 0x4f8, .end = 0x54c },
+ { .start = 0x57c, .end = 0x7e8 },
{ .start = 0x8d0, .end = 0x8d8 },
{ .start = 0xacc, .end = 0xf00 }
};
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f51f9466e518..ea8a31032927 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -542,6 +542,18 @@ config SPI_JCORE
This enables support for the SPI master controller in the J-Core
synthesizable, open source SoC.
+config SPI_KSPI2
+ tristate "Support for KEBA SPI master type 2 hardware"
+ depends on HAS_IOMEM
+ depends on KEBA_CP500 || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This driver supports KEBA SPI master type 2 FPGA implementation,
+ as found on CP500 devices for example.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-kspi2.
+
config SPI_LM70_LLP
tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
depends on PARPORT
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index aea5e54de195..9db7554c1864 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o
obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
+obj-$(CONFIG_SPI_KSPI2) += spi-kspi2.o
obj-$(CONFIG_SPI_LJCA) += spi-ljca.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LOONGSON_CORE) += spi-loongson-core.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 316bce577081..abdc49d9d940 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -11,11 +11,15 @@
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -34,6 +38,7 @@
#define QSPI_IDR 0x0018 /* Interrupt Disable Register */
#define QSPI_IMR 0x001c /* Interrupt Mask Register */
#define QSPI_SCR 0x0020 /* Serial Clock Register */
+#define QSPI_SR2 0x0024 /* SAMA7G5 Status Register */
#define QSPI_IAR 0x0030 /* Instruction Address Register */
#define QSPI_ICR 0x0034 /* Instruction Code Register */
@@ -44,16 +49,32 @@
#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
#define QSPI_SKR 0x0044 /* Scrambling Key Register */
+#define QSPI_REFRESH 0x0050 /* Refresh Register */
+#define QSPI_WRACNT 0x0054 /* Write Access Counter Register */
+#define QSPI_DLLCFG 0x0058 /* DLL Configuration Register */
+#define QSPI_PCALCFG 0x005C /* Pad Calibration Configuration Register */
+#define QSPI_PCALBP 0x0060 /* Pad Calibration Bypass Register */
+#define QSPI_TOUT 0x0064 /* Timeout Register */
+
#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
#define QSPI_VERSION 0x00FC /* Version Register */
+#define SAMA7G5_QSPI0_MAX_SPEED_HZ 200000000
+#define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ 133000000
/* Bitfields in QSPI_CR (Control Register) */
#define QSPI_CR_QSPIEN BIT(0)
#define QSPI_CR_QSPIDIS BIT(1)
+#define QSPI_CR_DLLON BIT(2)
+#define QSPI_CR_DLLOFF BIT(3)
+#define QSPI_CR_STPCAL BIT(4)
+#define QSPI_CR_SRFRSH BIT(5)
#define QSPI_CR_SWRST BIT(7)
+#define QSPI_CR_UPDCFG BIT(8)
+#define QSPI_CR_STTFR BIT(9)
+#define QSPI_CR_RTOUT BIT(10)
#define QSPI_CR_LASTXFER BIT(24)
/* Bitfields in QSPI_MR (Mode Register) */
@@ -61,12 +82,14 @@
#define QSPI_MR_LLB BIT(1)
#define QSPI_MR_WDRBT BIT(2)
#define QSPI_MR_SMRM BIT(3)
+#define QSPI_MR_DQSDLYEN BIT(3)
#define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
#define QSPI_MR_CSMODE_LASTXFER (1 << 4)
#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
#define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
+#define QSPI_MR_OENSD BIT(15)
#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
#define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
@@ -80,6 +103,13 @@
#define QSPI_SR_CSR BIT(8)
#define QSPI_SR_CSS BIT(9)
#define QSPI_SR_INSTRE BIT(10)
+#define QSPI_SR_LWRA BIT(11)
+#define QSPI_SR_QITF BIT(12)
+#define QSPI_SR_QITR BIT(13)
+#define QSPI_SR_CSFA BIT(14)
+#define QSPI_SR_CSRA BIT(15)
+#define QSPI_SR_RFRSHD BIT(16)
+#define QSPI_SR_TOUT BIT(17)
#define QSPI_SR_QSPIENS BIT(24)
#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
@@ -92,9 +122,22 @@
#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
+/* Bitfields in QSPI_SR2 (SAMA7G5 Status Register) */
+#define QSPI_SR2_SYNCBSY BIT(0)
+#define QSPI_SR2_QSPIENS BIT(1)
+#define QSPI_SR2_CSS BIT(2)
+#define QSPI_SR2_RBUSY BIT(3)
+#define QSPI_SR2_HIDLE BIT(4)
+#define QSPI_SR2_DLOCK BIT(5)
+#define QSPI_SR2_CALBSY BIT(6)
+
+/* Bitfields in QSPI_IAR (Instruction Address Register) */
+#define QSPI_IAR_ADDR GENMASK(31, 0)
+
/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
#define QSPI_ICR_INST_MASK GENMASK(7, 0)
#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
+#define QSPI_ICR_INST_MASK_SAMA7G5 GENMASK(15, 0)
#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
@@ -107,6 +150,9 @@
#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
+#define QSPI_IFR_WIDTH_OCT_OUTPUT (7 << 0)
+#define QSPI_IFR_WIDTH_OCT_IO (8 << 0)
+#define QSPI_IFR_WIDTH_OCT_CMD (9 << 0)
#define QSPI_IFR_INSTEN BIT(4)
#define QSPI_IFR_ADDREN BIT(5)
#define QSPI_IFR_OPTEN BIT(6)
@@ -117,19 +163,60 @@
#define QSPI_IFR_OPTL_4BIT (2 << 8)
#define QSPI_IFR_OPTL_8BIT (3 << 8)
#define QSPI_IFR_ADDRL BIT(10)
+#define QSPI_IFR_ADDRL_SAMA7G5 GENMASK(11, 10)
#define QSPI_IFR_TFRTYP_MEM BIT(12)
#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
#define QSPI_IFR_CRM BIT(14)
+#define QSPI_IFR_DDREN BIT(15)
#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+#define QSPI_IFR_END BIT(22)
+#define QSPI_IFR_SMRM BIT(23)
#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
+#define QSPI_IFR_DQSEN BIT(25)
+#define QSPI_IFR_DDRCMDEN BIT(26)
+#define QSPI_IFR_HFWBEN BIT(27)
+#define QSPI_IFR_PROTTYP GENMASK(29, 28)
+#define QSPI_IFR_PROTTYP_STD_SPI 0
+#define QSPI_IFR_PROTTYP_TWIN_QUAD 1
+#define QSPI_IFR_PROTTYP_OCTAFLASH 2
+#define QSPI_IFR_PROTTYP_HYPERFLASH 3
/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
#define QSPI_SMR_SCREN BIT(0)
#define QSPI_SMR_RVDIS BIT(1)
+#define QSPI_SMR_SCRKL BIT(2)
+
+/* Bitfields in QSPI_REFRESH (Refresh Register) */
+#define QSPI_REFRESH_DELAY_COUNTER GENMASK(31, 0)
+
+/* Bitfields in QSPI_WRACNT (Write Access Counter Register) */
+#define QSPI_WRACNT_NBWRA GENMASK(31, 0)
+
+/* Bitfields in QSPI_DLLCFG (DLL Configuration Register) */
+#define QSPI_DLLCFG_RANGE BIT(0)
+
+/* Bitfields in QSPI_PCALCFG (DLL Pad Calibration Configuration Register) */
+#define QSPI_PCALCFG_AAON BIT(0)
+#define QSPI_PCALCFG_DAPCAL BIT(1)
+#define QSPI_PCALCFG_DIFFPM BIT(2)
+#define QSPI_PCALCFG_CLKDIV GENMASK(6, 4)
+#define QSPI_PCALCFG_CALCNT GENMASK(16, 8)
+#define QSPI_PCALCFG_CALP GENMASK(27, 24)
+#define QSPI_PCALCFG_CALN GENMASK(31, 28)
+
+/* Bitfields in QSPI_PCALBP (DLL Pad Calibration Bypass Register) */
+#define QSPI_PCALBP_BPEN BIT(0)
+#define QSPI_PCALBP_CALPBP GENMASK(11, 8)
+#define QSPI_PCALBP_CALNBP GENMASK(19, 16)
+
+/* Bitfields in QSPI_TOUT (Timeout Register) */
+#define QSPI_TOUT_TCNTM GENMASK(15, 0)
/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
#define QSPI_WPMR_WPEN BIT(0)
+#define QSPI_WPMR_WPITEN BIT(1)
+#define QSPI_WPMR_WPCREN BIT(2)
#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
@@ -138,23 +225,74 @@
#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+#define ATMEL_QSPI_TIMEOUT 1000 /* ms */
+#define ATMEL_QSPI_SYNC_TIMEOUT 300 /* ms */
+#define QSPI_DLLCFG_THRESHOLD_FREQ 90000000U
+#define QSPI_CALIB_TIME 2000 /* 2 us */
+
+/* Use PIO for small transfers. */
+#define ATMEL_QSPI_DMA_MIN_BYTES 16
+/**
+ * struct atmel_qspi_pcal - Pad Calibration Clock Division
+ * @pclk_rate: peripheral clock rate.
+ * @pclkdiv: calibration clock division. The clock applied to the calibration
+ * cell is divided by pclkdiv + 1.
+ */
+struct atmel_qspi_pcal {
+ u32 pclk_rate;
+ u8 pclk_div;
+};
+
+#define ATMEL_QSPI_PCAL_ARRAY_SIZE 8
+static const struct atmel_qspi_pcal pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE] = {
+ {25000000, 0},
+ {50000000, 1},
+ {75000000, 2},
+ {100000000, 3},
+ {125000000, 4},
+ {150000000, 5},
+ {175000000, 6},
+ {200000000, 7},
+};
+
struct atmel_qspi_caps {
+ u32 max_speed_hz;
bool has_qspick;
+ bool has_gclk;
bool has_ricr;
+ bool octal;
+ bool has_dma;
};
+struct atmel_qspi_ops;
+
struct atmel_qspi {
void __iomem *regs;
void __iomem *mem;
struct clk *pclk;
struct clk *qspick;
+ struct clk *gclk;
struct platform_device *pdev;
const struct atmel_qspi_caps *caps;
+ const struct atmel_qspi_ops *ops;
resource_size_t mmap_size;
u32 pending;
+ u32 irq_mask;
u32 mr;
u32 scr;
+ u32 target_max_speed_hz;
struct completion cmd_completion;
+ struct completion dma_completion;
+ dma_addr_t mmap_phys_base;
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+};
+
+struct atmel_qspi_ops {
+ int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
+ u32 *offset);
+ int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
+ u32 offset);
};
struct atmel_qspi_mode {
@@ -174,6 +312,19 @@ static const struct atmel_qspi_mode atmel_qspi_modes[] = {
{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
+static const struct atmel_qspi_mode atmel_qspi_sama7g5_modes[] = {
+ { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
+ { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
+ { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
+ { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
+ { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
+ { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
+ { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
+ { 1, 1, 8, QSPI_IFR_WIDTH_OCT_OUTPUT },
+ { 1, 8, 8, QSPI_IFR_WIDTH_OCT_IO },
+ { 8, 8, 8, QSPI_IFR_WIDTH_OCT_CMD },
+};
+
#ifdef VERBOSE_DEBUG
static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
{
@@ -196,6 +347,8 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
return "IMR";
case QSPI_SCR:
return "SCR";
+ case QSPI_SR2:
+ return "SR2";
case QSPI_IAR:
return "IAR";
case QSPI_ICR:
@@ -208,6 +361,18 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
return "SMR";
case QSPI_SKR:
return "SKR";
+ case QSPI_REFRESH:
+ return "REFRESH";
+ case QSPI_WRACNT:
+ return "WRACNT";
+ case QSPI_DLLCFG:
+ return "DLLCFG";
+ case QSPI_PCALCFG:
+ return "PCALCFG";
+ case QSPI_PCALBP:
+ return "PCALBP";
+ case QSPI_TOUT:
+ return "TOUT";
case QSPI_WPMR:
return "WPMR";
case QSPI_WPSR:
@@ -249,6 +414,28 @@ static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
writel_relaxed(value, aq->regs + offset);
}
+static int atmel_qspi_reg_sync(struct atmel_qspi *aq)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_SYNCBSY), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ return ret;
+}
+
+static int atmel_qspi_update_config(struct atmel_qspi *aq)
+{
+ int ret;
+
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_UPDCFG, aq, QSPI_CR);
+ return atmel_qspi_reg_sync(aq);
+}
+
static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
const struct atmel_qspi_mode *mode)
{
@@ -275,12 +462,31 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
return -EOPNOTSUPP;
}
+static int atmel_qspi_sama7g5_find_mode(const struct spi_mem_op *op)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(atmel_qspi_sama7g5_modes); i++)
+ if (atmel_qspi_is_compatible(op, &atmel_qspi_sama7g5_modes[i]))
+ return i;
+
+ return -EOPNOTSUPP;
+}
+
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
if (!spi_mem_default_supports_op(mem, op))
return false;
+ if (aq->caps->octal) {
+ if (atmel_qspi_sama7g5_find_mode(op) < 0)
+ return false;
+ else
+ return true;
+ }
+
if (atmel_qspi_find_mode(op) < 0)
return false;
@@ -292,6 +498,25 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
return true;
}
+/*
+ * If the QSPI controller is set in regular SPI mode, set it in
+ * Serial Memory Mode (SMM).
+ */
+static int atmel_qspi_set_serial_memory_mode(struct atmel_qspi *aq)
+{
+ int ret = 0;
+
+ if (!(aq->mr & QSPI_MR_SMM)) {
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->mr, aq, QSPI_MR);
+
+ if (aq->caps->has_gclk)
+ ret = atmel_qspi_update_config(aq);
+ }
+
+ return ret;
+}
+
static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
const struct spi_mem_op *op, u32 *offset)
{
@@ -371,14 +596,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
ifr |= QSPI_IFR_TFRTYP_MEM;
}
- /*
- * If the QSPI controller is set in regular SPI mode, set it in
- * Serial Memory Mode (SMM).
- */
- if (!(aq->mr & QSPI_MR_SMM)) {
- aq->mr |= QSPI_MR_SMM;
- atmel_qspi_write(aq->mr, aq, QSPI_MR);
- }
+ mode = atmel_qspi_set_serial_memory_mode(aq);
+ if (mode < 0)
+ return mode;
/* Clear pending interrupts */
(void)atmel_qspi_read(aq, QSPI_SR);
@@ -404,10 +624,326 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
return 0;
}
+static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
+{
+ int err = 0;
+ u32 sr;
+
+ /* Poll INSTRuction End status */
+ sr = atmel_qspi_read(aq, QSPI_SR);
+ if ((sr & irq_mask) == irq_mask)
+ return 0;
+
+ /* Wait for INSTRuction End interrupt */
+ reinit_completion(&aq->cmd_completion);
+ aq->pending = sr & irq_mask;
+ aq->irq_mask = irq_mask;
+ atmel_qspi_write(irq_mask, aq, QSPI_IER);
+ if (!wait_for_completion_timeout(&aq->cmd_completion,
+ msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
+ err = -ETIMEDOUT;
+ atmel_qspi_write(irq_mask, aq, QSPI_IDR);
+
+ return err;
+}
+
+static int atmel_qspi_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, u32 offset)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
+
+ /* Skip to the final steps if there is no data */
+ if (!op->data.nbytes)
+ return atmel_qspi_wait_for_completion(aq,
+ QSPI_SR_CMD_COMPLETED);
+
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)atmel_qspi_read(aq, QSPI_IFR);
+
+ /* Send/Receive data */
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
+
+ /* Synchronize AHB and APB accesses again */
+ rmb();
+ } else {
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
+
+ /* Synchronize AHB and APB accesses again */
+ wmb();
+ }
+
+ /* Release the chip-select */
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
+}
+
+static int atmel_qspi_sama7g5_set_cfg(struct atmel_qspi *aq,
+ const struct spi_mem_op *op, u32 *offset)
+{
+ u32 iar, icr, ifr;
+ int mode, ret;
+
+ iar = 0;
+ icr = FIELD_PREP(QSPI_ICR_INST_MASK_SAMA7G5, op->cmd.opcode);
+ ifr = QSPI_IFR_INSTEN;
+
+ mode = atmel_qspi_sama7g5_find_mode(op);
+ if (mode < 0)
+ return mode;
+ ifr |= atmel_qspi_sama7g5_modes[mode].config;
+
+ if (op->dummy.buswidth && op->dummy.nbytes) {
+ if (op->addr.dtr && op->dummy.dtr && op->data.dtr)
+ ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
+ (2 * op->dummy.buswidth));
+ else
+ ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ }
+
+ if (op->addr.buswidth && op->addr.nbytes) {
+ ifr |= FIELD_PREP(QSPI_IFR_ADDRL_SAMA7G5, op->addr.nbytes - 1) |
+ QSPI_IFR_ADDREN;
+ iar = FIELD_PREP(QSPI_IAR_ADDR, op->addr.val);
+ }
+
+ if (op->addr.dtr && op->dummy.dtr && op->data.dtr) {
+ ifr |= QSPI_IFR_DDREN;
+ if (op->cmd.dtr)
+ ifr |= QSPI_IFR_DDRCMDEN;
+
+ ifr |= QSPI_IFR_DQSEN;
+ }
+
+ if (op->cmd.buswidth == 8 || op->addr.buswidth == 8 ||
+ op->data.buswidth == 8)
+ ifr |= FIELD_PREP(QSPI_IFR_PROTTYP, QSPI_IFR_PROTTYP_OCTAFLASH);
+
+ /* offset of the data access in the QSPI memory space */
+ *offset = iar;
+
+ /* Set data enable */
+ if (op->data.nbytes) {
+ ifr |= QSPI_IFR_DATAEN;
+
+ if (op->addr.nbytes)
+ ifr |= QSPI_IFR_TFRTYP_MEM;
+ }
+
+ ret = atmel_qspi_set_serial_memory_mode(aq);
+ if (ret < 0)
+ return ret;
+
+ /* Clear pending interrupts */
+ (void)atmel_qspi_read(aq, QSPI_SR);
+
+ /* Set QSPI Instruction Frame registers */
+ if (op->addr.nbytes && !op->data.nbytes)
+ atmel_qspi_write(iar, aq, QSPI_IAR);
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ atmel_qspi_write(icr, aq, QSPI_RICR);
+ } else {
+ atmel_qspi_write(icr, aq, QSPI_WICR);
+ if (op->data.nbytes)
+ atmel_qspi_write(FIELD_PREP(QSPI_WRACNT_NBWRA,
+ op->data.nbytes),
+ aq, QSPI_WRACNT);
+ }
+
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
+
+ return atmel_qspi_update_config(aq);
+}
+
+static void atmel_qspi_dma_callback(void *param)
+{
+ struct atmel_qspi *aq = param;
+
+ complete(&aq->dma_completion);
+}
+
+static int atmel_qspi_dma_xfer(struct atmel_qspi *aq, struct dma_chan *chan,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ unsigned int len)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ int ret;
+
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx) {
+ dev_err(&aq->pdev->dev, "device_prep_dma_memcpy error\n");
+ return -EIO;
+ }
+
+ reinit_completion(&aq->dma_completion);
+ tx->callback = atmel_qspi_dma_callback;
+ tx->callback_param = aq;
+ cookie = tx->tx_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(&aq->pdev->dev, "dma_submit_error %d\n", cookie);
+ return ret;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&aq->dma_completion,
+ msecs_to_jiffies(20 * ATMEL_QSPI_TIMEOUT));
+ if (ret == 0) {
+ dmaengine_terminate_sync(chan);
+ dev_err(&aq->pdev->dev, "DMA wait_for_completion_timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_dma_rx_xfer(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt, loff_t loff)
+{
+ struct atmel_qspi *aq =
+ spi_controller_get_devdata(mem->spi->controller);
+ struct scatterlist *sg;
+ dma_addr_t dma_src;
+ unsigned int i, len;
+ int ret;
+
+ dma_src = aq->mmap_phys_base + loff;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ len = sg_dma_len(sg);
+ ret = atmel_qspi_dma_xfer(aq, aq->rx_chan, sg_dma_address(sg),
+ dma_src, len);
+ if (ret)
+ return ret;
+ dma_src += len;
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_dma_tx_xfer(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt, loff_t loff)
+{
+ struct atmel_qspi *aq =
+ spi_controller_get_devdata(mem->spi->controller);
+ struct scatterlist *sg;
+ dma_addr_t dma_dst;
+ unsigned int i, len;
+ int ret;
+
+ dma_dst = aq->mmap_phys_base + loff;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ len = sg_dma_len(sg);
+ ret = atmel_qspi_dma_xfer(aq, aq->tx_chan, dma_dst,
+ sg_dma_address(sg), len);
+ if (ret)
+ return ret;
+ dma_dst += len;
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_dma_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, loff_t loff)
+{
+ struct sg_table sgt;
+ int ret;
+
+ ret = spi_controller_dma_map_mem_op_data(mem->spi->controller, op,
+ &sgt);
+ if (ret)
+ return ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ ret = atmel_qspi_dma_rx_xfer(mem, op, &sgt, loff);
+ else
+ ret = atmel_qspi_dma_tx_xfer(mem, op, &sgt, loff);
+
+ spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt);
+
+ return ret;
+}
+
+static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, u32 offset)
+{
+ struct atmel_qspi *aq =
+ spi_controller_get_devdata(mem->spi->controller);
+ u32 val;
+ int ret;
+
+ if (!op->data.nbytes) {
+ /* Start the transfer. */
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_STTFR, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
+ }
+
+ /* Send/Receive data. */
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (aq->rx_chan && op->addr.nbytes &&
+ op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
+ ret = atmel_qspi_dma_transfer(mem, op, offset);
+ if (ret)
+ return ret;
+ } else {
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
+ }
+
+ if (op->addr.nbytes) {
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_RBUSY), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (aq->tx_chan && op->addr.nbytes &&
+ op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
+ ret = atmel_qspi_dma_transfer(mem, op, offset);
+ if (ret)
+ return ret;
+ } else {
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
+ }
+
+ ret = atmel_qspi_wait_for_completion(aq, QSPI_SR_LWRA);
+ if (ret)
+ return ret;
+ }
+
+ /* Release the chip-select. */
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret) {
+ pm_runtime_mark_last_busy(&aq->pdev->dev);
+ pm_runtime_put_autosuspend(&aq->pdev->dev);
+ return ret;
+ }
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
+}
+
static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
- u32 sr, offset;
+ u32 offset;
int err;
/*
@@ -416,46 +952,20 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* when the flash memories overrun the controller's memory space.
*/
if (op->addr.val + op->data.nbytes > aq->mmap_size)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+
+ if (op->addr.nbytes > 4)
+ return -EOPNOTSUPP;
err = pm_runtime_resume_and_get(&aq->pdev->dev);
if (err < 0)
return err;
- err = atmel_qspi_set_cfg(aq, op, &offset);
+ err = aq->ops->set_cfg(aq, op, &offset);
if (err)
goto pm_runtime_put;
- /* Skip to the final steps if there is no data */
- if (op->data.nbytes) {
- /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
- (void)atmel_qspi_read(aq, QSPI_IFR);
-
- /* Send/Receive data */
- if (op->data.dir == SPI_MEM_DATA_IN)
- memcpy_fromio(op->data.buf.in, aq->mem + offset,
- op->data.nbytes);
- else
- memcpy_toio(aq->mem + offset, op->data.buf.out,
- op->data.nbytes);
-
- /* Release the chip-select */
- atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
- }
-
- /* Poll INSTRuction End status */
- sr = atmel_qspi_read(aq, QSPI_SR);
- if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
- goto pm_runtime_put;
-
- /* Wait for INSTRuction End interrupt */
- reinit_completion(&aq->cmd_completion);
- aq->pending = sr & QSPI_SR_CMD_COMPLETED;
- atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
- if (!wait_for_completion_timeout(&aq->cmd_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
- atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
+ err = aq->ops->transfer(mem, op, offset);
pm_runtime_put:
pm_runtime_mark_last_busy(&aq->pdev->dev);
@@ -474,6 +984,159 @@ static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
.get_name = atmel_qspi_get_name
};
+static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq)
+{
+ unsigned long pclk_rate;
+ u32 status, val;
+ int i, ret;
+ u8 pclk_div = 0;
+
+ pclk_rate = clk_get_rate(aq->pclk);
+ if (!pclk_rate)
+ return -EINVAL;
+
+ for (i = 0; i < ATMEL_QSPI_PCAL_ARRAY_SIZE; i++) {
+ if (pclk_rate <= pcal[i].pclk_rate) {
+ pclk_div = pcal[i].pclk_div;
+ break;
+ }
+ }
+
+ /*
+ * Use the biggest divider in case the peripheral clock exceeds
+ * 200MHZ.
+ */
+ if (pclk_rate > pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_rate)
+ pclk_div = pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_div;
+
+ /* Disable QSPI while configuring the pad calibration. */
+ status = atmel_qspi_read(aq, QSPI_SR2);
+ if (status & QSPI_SR2_QSPIENS) {
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+ }
+
+ /*
+ * The analog circuitry is not shut down at the end of the calibration
+ * and the start-up time is only required for the first calibration
+ * sequence, thus increasing performance. Set the delay between the Pad
+ * calibration analog circuitry and the calibration request to 2us.
+ */
+ atmel_qspi_write(QSPI_PCALCFG_AAON |
+ FIELD_PREP(QSPI_PCALCFG_CLKDIV, pclk_div) |
+ FIELD_PREP(QSPI_PCALCFG_CALCNT,
+ 2 * (pclk_rate / 1000000)),
+ aq, QSPI_PCALCFG);
+
+ /* DLL On + start calibration. */
+ atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR);
+
+ /* Check synchronization status before updating configuration. */
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ (val & QSPI_SR2_DLOCK) &&
+ !(val & QSPI_SR2_CALBSY), 40,
+ ATMEL_QSPI_TIMEOUT);
+
+ /* Refresh analogic blocks every 1 ms.*/
+ atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER,
+ aq->target_max_speed_hz / 1000),
+ aq, QSPI_REFRESH);
+
+ return ret;
+}
+
+static int atmel_qspi_set_gclk(struct atmel_qspi *aq)
+{
+ u32 status, val;
+ int ret;
+
+ /* Disable DLL before setting GCLK */
+ status = atmel_qspi_read(aq, QSPI_SR2);
+ if (status & QSPI_SR2_DLOCK) {
+ atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ)
+ atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG);
+ else
+ atmel_qspi_write(0, aq, QSPI_DLLCFG);
+
+ ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz);
+ if (ret) {
+ dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n");
+ return ret;
+ }
+
+ /* Enable the QSPI generic clock */
+ ret = clk_prepare_enable(aq->gclk);
+ if (ret)
+ dev_err(&aq->pdev->dev, "Failed to enable generic clock.\n");
+
+ return ret;
+}
+
+static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq)
+{
+ u32 val;
+ int ret;
+
+ ret = atmel_qspi_set_gclk(aq);
+ if (ret)
+ return ret;
+
+ if (aq->caps->octal) {
+ ret = atmel_qspi_set_pad_calibration(aq);
+ if (ret)
+ return ret;
+ } else {
+ atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ (val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ }
+
+ /* Set the QSPI controller by default in Serial Memory Mode */
+ aq->mr |= QSPI_MR_DQSDLYEN;
+ ret = atmel_qspi_set_serial_memory_mode(aq);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the QSPI controller. */
+ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ val & QSPI_SR2_QSPIENS, 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (aq->caps->octal) {
+ ret = readl_poll_timeout(aq->regs + QSPI_SR, val,
+ val & QSPI_SR_RFRSHD, 40,
+ ATMEL_QSPI_TIMEOUT);
+ }
+
+ atmel_qspi_write(QSPI_TOUT_TCNTM, aq, QSPI_TOUT);
+ return ret;
+}
+
+static int atmel_qspi_sama7g5_setup(struct spi_device *spi)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(spi->controller);
+
+ /* The controller can communicate with a single peripheral device (target). */
+ aq->target_max_speed_hz = spi->max_speed_hz;
+
+ return atmel_qspi_sama7g5_init(aq);
+}
+
static int atmel_qspi_setup(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->controller;
@@ -488,6 +1151,9 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
+ if (aq->caps->has_gclk)
+ return atmel_qspi_sama7g5_setup(spi);
+
src_rate = clk_get_rate(aq->pclk);
if (!src_rate)
return -EINVAL;
@@ -573,17 +1239,29 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi)
return 0;
}
-static void atmel_qspi_init(struct atmel_qspi *aq)
+static int atmel_qspi_init(struct atmel_qspi *aq)
{
+ int ret;
+
+ if (aq->caps->has_gclk) {
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
+ return 0;
+ }
+
/* Reset the QSPI controller */
atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
- aq->mr |= QSPI_MR_SMM;
- atmel_qspi_write(aq->mr, aq, QSPI_MR);
+ ret = atmel_qspi_set_serial_memory_mode(aq);
+ if (ret < 0)
+ return ret;
/* Enable the QSPI controller */
atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
+ return 0;
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
@@ -599,12 +1277,65 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
return IRQ_NONE;
aq->pending |= pending;
- if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ if ((aq->pending & aq->irq_mask) == aq->irq_mask)
complete(&aq->cmd_completion);
return IRQ_HANDLED;
}
+static int atmel_qspi_dma_init(struct spi_controller *ctrl)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx");
+ if (IS_ERR(aq->rx_chan)) {
+ aq->rx_chan = NULL;
+ return dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan),
+ "RX DMA channel is not available\n");
+ }
+
+ aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx");
+ if (IS_ERR(aq->tx_chan)) {
+ ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan),
+ "TX DMA channel is not available\n");
+ goto release_rx_chan;
+ }
+
+ ctrl->dma_rx = aq->rx_chan;
+ ctrl->dma_tx = aq->tx_chan;
+ init_completion(&aq->dma_completion);
+
+ dev_info(&aq->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan));
+
+ return 0;
+
+release_rx_chan:
+ dma_release_channel(aq->rx_chan);
+ aq->rx_chan = NULL;
+ aq->tx_chan = NULL;
+ return ret;
+}
+
+static void atmel_qspi_dma_release(struct atmel_qspi *aq)
+{
+ if (aq->rx_chan)
+ dma_release_channel(aq->rx_chan);
+ if (aq->tx_chan)
+ dma_release_channel(aq->tx_chan);
+}
+
+static const struct atmel_qspi_ops atmel_qspi_ops = {
+ .set_cfg = atmel_qspi_set_cfg,
+ .transfer = atmel_qspi_transfer,
+};
+
+static const struct atmel_qspi_ops atmel_qspi_sama7g5_ops = {
+ .set_cfg = atmel_qspi_sama7g5_set_cfg,
+ .transfer = atmel_qspi_sama7g5_transfer,
+};
+
static int atmel_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
@@ -616,7 +1347,27 @@ static int atmel_qspi_probe(struct platform_device *pdev)
if (!ctrl)
return -ENOMEM;
+ aq = spi_controller_get_devdata(ctrl);
+
+ aq->caps = of_device_get_match_data(&pdev->dev);
+ if (!aq->caps) {
+ dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
+ return -EINVAL;
+ }
+
+ init_completion(&aq->cmd_completion);
+ aq->pdev = pdev;
+
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ if (aq->caps->octal)
+ ctrl->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
+
+ if (aq->caps->has_gclk)
+ aq->ops = &atmel_qspi_sama7g5_ops;
+ else
+ aq->ops = &atmel_qspi_ops;
+
+ ctrl->max_speed_hz = aq->caps->max_speed_hz;
ctrl->setup = atmel_qspi_setup;
ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
ctrl->bus_num = -1;
@@ -625,11 +1376,6 @@ static int atmel_qspi_probe(struct platform_device *pdev)
ctrl->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, ctrl);
- aq = spi_controller_get_devdata(ctrl);
-
- init_completion(&aq->cmd_completion);
- aq->pdev = pdev;
-
/* Map the registers */
aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
if (IS_ERR(aq->regs))
@@ -644,57 +1390,52 @@ static int atmel_qspi_probe(struct platform_device *pdev)
"missing AHB memory\n");
aq->mmap_size = resource_size(res);
+ aq->mmap_phys_base = (dma_addr_t)res->start;
/* Get the peripheral clock */
- aq->pclk = devm_clk_get(&pdev->dev, "pclk");
+ aq->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
if (IS_ERR(aq->pclk))
- aq->pclk = devm_clk_get(&pdev->dev, NULL);
+ aq->pclk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(aq->pclk))
return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
"missing peripheral clock\n");
- /* Enable the peripheral clock */
- err = clk_prepare_enable(aq->pclk);
- if (err)
- return dev_err_probe(&pdev->dev, err,
- "failed to enable the peripheral clock\n");
-
- aq->caps = of_device_get_match_data(&pdev->dev);
- if (!aq->caps) {
- dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
- err = -EINVAL;
- goto disable_pclk;
- }
-
if (aq->caps->has_qspick) {
/* Get the QSPI system clock */
- aq->qspick = devm_clk_get(&pdev->dev, "qspick");
+ aq->qspick = devm_clk_get_enabled(&pdev->dev, "qspick");
if (IS_ERR(aq->qspick)) {
dev_err(&pdev->dev, "missing system clock\n");
err = PTR_ERR(aq->qspick);
- goto disable_pclk;
+ return err;
}
- /* Enable the QSPI system clock */
- err = clk_prepare_enable(aq->qspick);
- if (err) {
- dev_err(&pdev->dev,
- "failed to enable the QSPI system clock\n");
- goto disable_pclk;
+ } else if (aq->caps->has_gclk) {
+ /* Get the QSPI generic clock */
+ aq->gclk = devm_clk_get(&pdev->dev, "gclk");
+ if (IS_ERR(aq->gclk)) {
+ dev_err(&pdev->dev, "missing Generic clock\n");
+ err = PTR_ERR(aq->gclk);
+ return err;
}
}
+ if (aq->caps->has_dma) {
+ err = atmel_qspi_dma_init(ctrl);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
/* Request the IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
- goto disable_qspick;
+ goto dma_release;
}
err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
0, dev_name(&pdev->dev), aq);
if (err)
- goto disable_qspick;
+ goto dma_release;
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -702,7 +1443,9 @@ static int atmel_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
- atmel_qspi_init(aq);
+ err = atmel_qspi_init(aq);
+ if (err)
+ goto dma_release;
err = spi_register_controller(ctrl);
if (err) {
@@ -710,21 +1453,57 @@ static int atmel_qspi_probe(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- goto disable_qspick;
+ goto dma_release;
}
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
-disable_qspick:
- clk_disable_unprepare(aq->qspick);
-disable_pclk:
- clk_disable_unprepare(aq->pclk);
+dma_release:
+ if (aq->caps->has_dma)
+ atmel_qspi_dma_release(aq);
return err;
}
+static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_RBUSY) &&
+ (val & QSPI_SR2_HIDLE), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_QSPIENS), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(aq->gclk);
+
+ atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_CALBSY), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void atmel_qspi_remove(struct platform_device *pdev)
{
struct spi_controller *ctrl = platform_get_drvdata(pdev);
@@ -735,9 +1514,17 @@ static void atmel_qspi_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret >= 0) {
+ if (aq->caps->has_dma)
+ atmel_qspi_dma_release(aq);
+
+ if (aq->caps->has_gclk) {
+ ret = atmel_qspi_sama7g5_suspend(aq);
+ if (ret)
+ dev_warn(&pdev->dev, "Failed to de-init device on remove: %d\n", ret);
+ return;
+ }
+
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
- clk_disable(aq->qspick);
- clk_disable(aq->pclk);
} else {
/*
* atmel_qspi_runtime_{suspend,resume} just disable and enable
@@ -747,9 +1534,6 @@ static void atmel_qspi_remove(struct platform_device *pdev)
dev_warn(&pdev->dev, "Failed to resume device on remove\n");
}
- clk_unprepare(aq->qspick);
- clk_unprepare(aq->pclk);
-
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -765,6 +1549,12 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
if (ret < 0)
return ret;
+ if (aq->caps->has_gclk) {
+ ret = atmel_qspi_sama7g5_suspend(aq);
+ clk_disable_unprepare(aq->pclk);
+ return ret;
+ }
+
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
pm_runtime_mark_last_busy(dev);
@@ -792,6 +1582,9 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
return ret;
}
+ if (aq->caps->has_gclk)
+ return atmel_qspi_sama7g5_init(aq);
+
ret = pm_runtime_force_resume(dev);
if (ret < 0)
return ret;
@@ -847,6 +1640,19 @@ static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
.has_ricr = true,
};
+static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = {
+ .max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
+ .has_gclk = true,
+ .octal = true,
+ .has_dma = true,
+};
+
+static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = {
+ .max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
+ .has_gclk = true,
+ .has_dma = true,
+};
+
static const struct of_device_id atmel_qspi_dt_ids[] = {
{
.compatible = "atmel,sama5d2-qspi",
@@ -856,6 +1662,15 @@ static const struct of_device_id atmel_qspi_dt_ids[] = {
.compatible = "microchip,sam9x60-qspi",
.data = &atmel_sam9x60_qspi_caps,
},
+ {
+ .compatible = "microchip,sama7g5-ospi",
+ .data = &atmel_sama7g5_ospi_caps,
+ },
+ {
+ .compatible = "microchip,sama7g5-qspi",
+ .data = &atmel_sama7g5_qspi_caps,
+ },
+
{ /* sentinel */ }
};
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index d30a21b0b05f..c85997478b81 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -298,19 +298,16 @@ static const struct amd_spi_freq amd_spi_freq[] = {
{ AMD_SPI_MIN_HZ, F_800KHz, 0},
};
-static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
+static void amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
{
unsigned int i, spd7_val, alt_spd;
- if (speed_hz < AMD_SPI_MIN_HZ)
- return -EINVAL;
-
for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
if (speed_hz >= amd_spi_freq[i].speed_hz)
break;
if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz)
- return 0;
+ return;
amd_spi->speed_hz = amd_spi_freq[i].speed_hz;
@@ -329,8 +326,6 @@ static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val,
AMD_SPI_SPD7_MASK);
}
-
- return 0;
}
static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
@@ -479,6 +474,9 @@ static bool amd_spi_supports_op(struct spi_mem *mem,
return false;
}
+ if (op->max_freq < mem->spi->controller->min_speed_hz)
+ return false;
+
return spi_mem_default_supports_op(mem, op);
}
@@ -672,13 +670,10 @@ static int amd_spi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct amd_spi *amd_spi;
- int ret;
amd_spi = spi_controller_get_devdata(mem->spi->controller);
- ret = amd_set_spi_freq(amd_spi, mem->spi->max_speed_hz);
- if (ret)
- return ret;
+ amd_set_spi_freq(amd_spi, op->max_freq);
if (amd_spi->version == AMD_SPI_V2)
amd_set_spi_addr_mode(amd_spi, op);
@@ -693,10 +688,10 @@ static int amd_spi_exec_mem_op(struct spi_mem *mem,
amd_spi_mem_data_out(amd_spi, op);
break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- return ret;
+ return 0;
}
static const struct spi_controller_mem_ops amd_spi_mem_ops = {
@@ -705,6 +700,10 @@ static const struct spi_controller_mem_ops amd_spi_mem_ops = {
.supports_op = amd_spi_supports_op,
};
+static const struct spi_controller_mem_caps amd_spi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int amd_spi_host_transfer(struct spi_controller *host,
struct spi_message *msg)
{
@@ -782,6 +781,7 @@ static int amd_spi_probe(struct platform_device *pdev)
host->setup = amd_spi_host_setup;
host->transfer_one_message = amd_spi_host_transfer;
host->mem_ops = &amd_spi_mem_ops;
+ host->mem_caps = &amd_spi_mem_caps;
host->max_transfer_size = amd_spi_max_transfer_size;
host->max_message_size = amd_spi_max_transfer_size;
diff --git a/drivers/spi/spi-amlogic-spifc-a1.c b/drivers/spi/spi-amlogic-spifc-a1.c
index fadf6667cd51..18c9aa2cbc29 100644
--- a/drivers/spi/spi-amlogic-spifc-a1.c
+++ b/drivers/spi/spi-amlogic-spifc-a1.c
@@ -259,7 +259,7 @@ static int amlogic_spifc_a1_exec_op(struct spi_mem *mem,
size_t data_size = op->data.nbytes;
int ret;
- ret = amlogic_spifc_a1_set_freq(spifc, mem->spi->max_speed_hz);
+ ret = amlogic_spifc_a1_set_freq(spifc, op->max_freq);
if (ret)
return ret;
@@ -320,6 +320,10 @@ static const struct spi_controller_mem_ops amlogic_spifc_a1_mem_ops = {
.adjust_op_size = amlogic_spifc_a1_adjust_op_size,
};
+static const struct spi_controller_mem_caps amlogic_spifc_a1_mem_caps = {
+ .per_op_freq = true,
+};
+
static int amlogic_spifc_a1_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
@@ -356,6 +360,7 @@ static int amlogic_spifc_a1_probe(struct platform_device *pdev)
ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
ctrl->auto_runtime_pm = true;
ctrl->mem_ops = &amlogic_spifc_a1_mem_ops;
+ ctrl->mem_caps = &amlogic_spifc_a1_mem_caps;
ctrl->min_speed_hz = SPIFC_A1_MIN_HZ;
ctrl->max_speed_hz = SPIFC_A1_MAX_HZ;
ctrl->mode_bits = (SPI_RX_DUAL | SPI_TX_DUAL |
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index a031ecb358e0..0cd37a7436d5 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -43,10 +43,13 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX);
#define CQSPI_SLOW_SRAM BIT(4)
#define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5)
#define CQSPI_RD_NO_IRQ BIT(6)
-#define CQSPI_DISABLE_STIG_MODE BIT(7)
+#define CQSPI_DMA_SET_MASK BIT(7)
+#define CQSPI_SUPPORT_DEVICE_RESET BIT(8)
+#define CQSPI_DISABLE_STIG_MODE BIT(9)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
+#define CQSPI_SUPPORTS_QUAD BIT(1)
#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
@@ -111,7 +114,7 @@ struct cqspi_st {
struct cqspi_driver_platdata {
u32 hwcaps_mask;
- u8 quirks;
+ u16 quirks;
int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
u_char *rxbuf, loff_t from_addr, size_t n_rx);
u32 (*get_dma_status)(struct cqspi_st *cqspi);
@@ -146,6 +149,8 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CONFIG_IDLE_LSB 31
#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
+#define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5)
+#define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6)
#define CQSPI_REG_RD_INSTR 0x04
#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
@@ -832,6 +837,25 @@ failrd:
return ret;
}
+static void cqspi_device_reset(struct cqspi_st *cqspi)
+{
+ u32 reg;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+ /*
+ * NOTE: Delay timing implementation is derived from
+ * spi_nor_hw_reset()
+ */
+ writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
+ usleep_range(1, 5);
+ writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
+ usleep_range(100, 150);
+ writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
+ usleep_range(1000, 1200);
+}
+
static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
{
void __iomem *reg_base = cqspi->iobase;
@@ -1409,7 +1433,7 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
struct cqspi_flash_pdata *f_pdata;
f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)];
- cqspi_configure(f_pdata, mem->spi->max_speed_hz);
+ cqspi_configure(f_pdata, op->max_freq);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
/*
@@ -1658,6 +1682,7 @@ static const struct spi_controller_mem_ops cqspi_mem_ops = {
static const struct spi_controller_mem_caps cqspi_mem_caps = {
.dtr = true,
+ .per_op_freq = true,
};
static int cqspi_setup_flash(struct cqspi_st *cqspi)
@@ -1865,6 +1890,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->master_ref_clk_hz);
if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
+ if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD)
+ host->mode_bits |= SPI_TX_QUAD;
if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
cqspi->use_direct_mode = true;
cqspi->use_direct_mode_wr = true;
@@ -1886,8 +1913,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (ddata->quirks & CQSPI_DISABLE_STIG_MODE)
cqspi->disable_stig_mode = true;
- if (of_device_is_compatible(pdev->dev.of_node,
- "xlnx,versal-ospi-1.0")) {
+ if (ddata->quirks & CQSPI_DMA_SET_MASK) {
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
goto probe_reset_failed;
@@ -1917,6 +1943,9 @@ static int cqspi_probe(struct platform_device *pdev)
host->num_chipselect = cqspi->num_chipselect;
+ if (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)
+ cqspi_device_reset(cqspi);
+
if (cqspi->use_direct_mode) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER)
@@ -2037,7 +2066,7 @@ static const struct cqspi_driver_platdata k2g_qspi = {
};
static const struct cqspi_driver_platdata am654_ospi = {
- .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD,
.quirks = CQSPI_NEEDS_WR_DELAY,
};
@@ -2054,7 +2083,17 @@ static const struct cqspi_driver_platdata socfpga_qspi = {
static const struct cqspi_driver_platdata versal_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
- .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA
+ | CQSPI_DMA_SET_MASK,
+ .indirect_read_dma = cqspi_versal_indirect_read_dma,
+ .get_dma_status = cqspi_get_versal_dma_status,
+};
+
+static const struct cqspi_driver_platdata versal2_ospi = {
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA
+ | CQSPI_DMA_SET_MASK
+ | CQSPI_SUPPORT_DEVICE_RESET,
.indirect_read_dma = cqspi_versal_indirect_read_dma,
.get_dma_status = cqspi_get_versal_dma_status,
};
@@ -2111,6 +2150,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "mobileye,eyeq5-ospi",
.data = &mobileye_eyeq5_ospi,
},
+ {
+ .compatible = "amd,versal2-ospi",
+ .data = &versal2_ospi,
+ },
{ /* end of table */ }
};
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index ea517af9435f..941ecc6f59f8 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -677,7 +677,7 @@ static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
* operation. Transmit-only mode is suitable for the rest of them.
*/
cfg.dfs = 8;
- cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
+ cfg.freq = clamp(op->max_freq, 0U, dws->max_mem_freq);
if (op->data.dir == SPI_MEM_DATA_IN) {
cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
cfg.ndf = op->data.nbytes;
@@ -894,6 +894,10 @@ static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
}
+static const struct spi_controller_mem_caps dw_spi_mem_caps = {
+ .per_op_freq = true,
+};
+
int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
{
struct spi_controller *host;
@@ -941,8 +945,10 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
host->set_cs = dw_spi_set_cs;
host->transfer_one = dw_spi_transfer_one;
host->handle_err = dw_spi_handle_err;
- if (dws->mem_ops.exec_op)
+ if (dws->mem_ops.exec_op) {
host->mem_ops = &dws->mem_ops;
+ host->mem_caps = &dw_spi_mem_caps;
+ }
host->max_speed_hz = dws->max_freq;
host->flags = SPI_CONTROLLER_GPIO_SS;
host->auto_runtime_pm = true;
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 9ec53bf0dda8..355e6a39fb41 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -522,9 +522,10 @@ static void fsl_qspi_invalidate(struct fsl_qspi *q)
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
}
-static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
+static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
- unsigned long rate = spi->max_speed_hz;
+ unsigned long rate = op->max_freq;
int ret;
if (q->selected == spi_get_chipselect(spi, 0))
@@ -652,7 +653,7 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
- fsl_qspi_select_mem(q, mem->spi);
+ fsl_qspi_select_mem(q, mem->spi, op);
if (needs_amba_base_offset(q))
addr_offset = q->memmap_phy;
@@ -839,6 +840,10 @@ static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
.get_name = fsl_qspi_get_name,
};
+static const struct spi_controller_mem_caps fsl_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int fsl_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -923,6 +928,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ctlr->bus_num = -1;
ctlr->num_chipselect = 4;
ctlr->mem_ops = &fsl_qspi_mem_ops;
+ ctlr->mem_caps = &fsl_qspi_mem_caps;
fsl_qspi_default_setup(q);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 856a4a9def66..2f2082652a1a 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -618,7 +618,7 @@ static struct spi_controller *fsl_spi_probe(struct device *dev,
if (ret < 0)
goto err_probe;
- dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
+ dev_info(dev, "at MMIO %pa (irq = %d), %s mode\n", &mem->start,
mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
return host;
diff --git a/drivers/spi/spi-kspi2.c b/drivers/spi/spi-kspi2.c
new file mode 100644
index 000000000000..ca73ec52ce63
--- /dev/null
+++ b/drivers/spi/spi-kspi2.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) KEBA Industrial Automation Gmbh 2024
+ *
+ * Driver for KEBA SPI host controller type 2 FPGA IP core
+ */
+
+#include <linux/iopoll.h>
+#include <linux/misc/keba.h>
+#include <linux/spi/spi.h>
+
+#define KSPI2 "kspi2"
+
+#define KSPI2_CLK_FREQ_REG 0x03
+#define KSPI2_CLK_FREQ_MASK 0x0f
+#define KSPI2_CLK_FREQ_62_5M 0x0
+#define KSPI2_CLK_FREQ_33_3M 0x1
+#define KSPI2_CLK_FREQ_125M 0x2
+#define KSPI2_CLK_FREQ_50M 0x3
+#define KSPI2_CLK_FREQ_100M 0x4
+
+#define KSPI2_CONTROL_REG 0x04
+#define KSPI2_CONTROL_CLK_DIV_MAX 0x0f
+#define KSPI2_CONTROL_CLK_DIV_MASK 0x0f
+#define KSPI2_CONTROL_CPHA 0x10
+#define KSPI2_CONTROL_CPOL 0x20
+#define KSPI2_CONTROL_CLK_MODE_MASK 0x30
+#define KSPI2_CONTROL_INIT KSPI2_CONTROL_CLK_DIV_MAX
+
+#define KSPI2_STATUS_REG 0x08
+#define KSPI2_STATUS_IN_USE 0x01
+#define KSPI2_STATUS_BUSY 0x02
+
+#define KSPI2_DATA_REG 0x0c
+
+#define KSPI2_CS_NR_REG 0x10
+#define KSPI2_CS_NR_NONE 0xff
+
+#define KSPI2_MODE_BITS (SPI_CPHA | SPI_CPOL)
+#define KSPI2_NUM_CS 255
+
+#define KSPI2_SPEED_HZ_MIN(kspi) (kspi->base_speed_hz / 65536)
+#define KSPI2_SPEED_HZ_MAX(kspi) (kspi->base_speed_hz / 2)
+
+/* timeout is 10 times the time to transfer one byte at slowest clock */
+#define KSPI2_XFER_TIMEOUT_US(kspi) (USEC_PER_SEC / \
+ KSPI2_SPEED_HZ_MIN(kspi) * 8 * 10)
+
+#define KSPI2_INUSE_SLEEP_US (2 * USEC_PER_MSEC)
+#define KSPI2_INUSE_TIMEOUT_US (10 * USEC_PER_SEC)
+
+struct kspi2 {
+ struct keba_spi_auxdev *auxdev;
+ void __iomem *base;
+ struct spi_controller *host;
+
+ u32 base_speed_hz; /* SPI base clock frequency in HZ */
+ u8 control_shadow;
+
+ struct spi_device **device;
+ int device_size;
+};
+
+static int kspi2_inuse_lock(struct kspi2 *kspi)
+{
+ u8 sts;
+ int ret;
+
+ /*
+ * The SPI controller has an IN_USE bit for locking access to the
+ * controller. This enables the use of the SPI controller by other none
+ * Linux processors.
+ *
+ * If the SPI controller is free, then the first read returns
+ * IN_USE == 0. After that the SPI controller is locked and further
+ * reads of IN_USE return 1.
+ *
+ * The SPI controller is unlocked by writing 1 into IN_USE.
+ *
+ * The IN_USE bit acts as a hardware semaphore for the SPI controller.
+ * Poll for semaphore, but sleep while polling to free the CPU.
+ */
+ ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG,
+ sts, (sts & KSPI2_STATUS_IN_USE) == 0,
+ KSPI2_INUSE_SLEEP_US, KSPI2_INUSE_TIMEOUT_US);
+ if (ret != 0)
+ dev_warn(&kspi->auxdev->auxdev.dev, "%s err!\n", __func__);
+
+ return ret;
+}
+
+static void kspi2_inuse_unlock(struct kspi2 *kspi)
+{
+ /* unlock the controller by writing 1 into IN_USE */
+ iowrite8(KSPI2_STATUS_IN_USE, kspi->base + KSPI2_STATUS_REG);
+}
+
+static int kspi2_prepare_hardware(struct spi_controller *host)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+
+ /* lock hardware semaphore before actual use of controller */
+ return kspi2_inuse_lock(kspi);
+}
+
+static int kspi2_unprepare_hardware(struct spi_controller *host)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+
+ /* unlock hardware semaphore after actual use of controller */
+ kspi2_inuse_unlock(kspi);
+
+ return 0;
+}
+
+static u8 kspi2_calc_minimal_divider(struct kspi2 *kspi, u32 max_speed_hz)
+{
+ u8 div;
+
+ /*
+ * Divider values 2, 4, 8, 16, ..., 65536 are possible. They are coded
+ * as 0, 1, 2, 3, ..., 15 in the CONTROL_CLK_DIV bit.
+ */
+ for (div = 0; div < KSPI2_CONTROL_CLK_DIV_MAX; div++) {
+ if ((kspi->base_speed_hz >> (div + 1)) <= max_speed_hz)
+ return div;
+ }
+
+ /* return divider for slowest clock if loop fails to find one */
+ return KSPI2_CONTROL_CLK_DIV_MAX;
+}
+
+static void kspi2_write_control_reg(struct kspi2 *kspi, u8 val, u8 mask)
+{
+ /* write control register only when necessary to improve performance */
+ if (val != (kspi->control_shadow & mask)) {
+ kspi->control_shadow = (kspi->control_shadow & ~mask) | val;
+ iowrite8(kspi->control_shadow, kspi->base + KSPI2_CONTROL_REG);
+ }
+}
+
+static int kspi2_txrx_byte(struct kspi2 *kspi, u8 tx, u8 *rx)
+{
+ u8 sts;
+ int ret;
+
+ /* start transfer by writing TX byte */
+ iowrite8(tx, kspi->base + KSPI2_DATA_REG);
+
+ /* wait till finished (BUSY == 0) */
+ ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG,
+ sts, (sts & KSPI2_STATUS_BUSY) == 0,
+ 0, KSPI2_XFER_TIMEOUT_US(kspi));
+ if (ret != 0)
+ return ret;
+
+ /* read RX byte */
+ if (rx)
+ *rx = ioread8(kspi->base + KSPI2_DATA_REG);
+
+ return 0;
+}
+
+static int kspi2_process_transfer(struct kspi2 *kspi, struct spi_transfer *t)
+{
+ u8 tx = 0;
+ u8 rx;
+ int i;
+ int ret;
+
+ for (i = 0; i < t->len; i++) {
+ if (t->tx_buf)
+ tx = ((const u8 *)t->tx_buf)[i];
+
+ ret = kspi2_txrx_byte(kspi, tx, &rx);
+ if (ret)
+ return ret;
+
+ if (t->rx_buf)
+ ((u8 *)t->rx_buf)[i] = rx;
+ }
+
+ return 0;
+}
+
+static int kspi2_setup_transfer(struct kspi2 *kspi,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ u32 max_speed_hz = spi->max_speed_hz;
+ u8 clk_div;
+
+ /*
+ * spi_device (spi) has default parameters. Some of these can be
+ * overwritten by parameters in spi_transfer (t).
+ */
+ if (t->bits_per_word && ((t->bits_per_word % 8) != 0)) {
+ dev_err(&spi->dev, "Word width %d not supported!\n",
+ t->bits_per_word);
+
+ return -EINVAL;
+ }
+
+ if (t->speed_hz && (t->speed_hz < max_speed_hz))
+ max_speed_hz = t->speed_hz;
+
+ clk_div = kspi2_calc_minimal_divider(kspi, max_speed_hz);
+ kspi2_write_control_reg(kspi, clk_div, KSPI2_CONTROL_CLK_DIV_MASK);
+
+ return 0;
+}
+
+static int kspi2_transfer_one(struct spi_controller *host,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+ int ret;
+
+ ret = kspi2_setup_transfer(kspi, spi, t);
+ if (ret != 0)
+ return ret;
+
+ if (t->len) {
+ ret = kspi2_process_transfer(kspi, t);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kspi2_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_controller *host = spi->controller;
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+
+ /* controller is using active low chip select signals by design */
+ if (!enable)
+ iowrite8(spi_get_chipselect(spi, 0), kspi->base + KSPI2_CS_NR_REG);
+ else
+ iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG);
+}
+
+static int kspi2_prepare_message(struct spi_controller *host,
+ struct spi_message *msg)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+ struct spi_device *spi = msg->spi;
+ u8 mode = 0;
+
+ /* setup SPI clock phase and polarity */
+ if (spi->mode & SPI_CPHA)
+ mode |= KSPI2_CONTROL_CPHA;
+ if (spi->mode & SPI_CPOL)
+ mode |= KSPI2_CONTROL_CPOL;
+ kspi2_write_control_reg(kspi, mode, KSPI2_CONTROL_CLK_MODE_MASK);
+
+ return 0;
+}
+
+static int kspi2_setup(struct spi_device *spi)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(spi->controller);
+
+ /*
+ * Check only parameters. Actual setup is done in kspi2_prepare_message
+ * and directly before the SPI transfer starts.
+ */
+
+ if (spi->mode & ~KSPI2_MODE_BITS) {
+ dev_err(&spi->dev, "Mode %d not supported!\n", spi->mode);
+
+ return -EINVAL;
+ }
+
+ if ((spi->bits_per_word % 8) != 0) {
+ dev_err(&spi->dev, "Word width %d not supported!\n",
+ spi->bits_per_word);
+
+ return -EINVAL;
+ }
+
+ if ((spi->max_speed_hz == 0) ||
+ (spi->max_speed_hz > KSPI2_SPEED_HZ_MAX(kspi)))
+ spi->max_speed_hz = KSPI2_SPEED_HZ_MAX(kspi);
+
+ if (spi->max_speed_hz < KSPI2_SPEED_HZ_MIN(kspi)) {
+ dev_err(&spi->dev, "Requested speed of %d Hz is too low!\n",
+ spi->max_speed_hz);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void kspi2_unregister_devices(struct kspi2 *kspi)
+{
+ int i;
+
+ for (i = 0; i < kspi->device_size; i++) {
+ struct spi_device *device = kspi->device[i];
+
+ if (device)
+ spi_unregister_device(device);
+ }
+}
+
+static int kspi2_register_devices(struct kspi2 *kspi)
+{
+ struct spi_board_info *info = kspi->auxdev->info;
+ int i;
+
+ /* register all known SPI devices */
+ for (i = 0; i < kspi->auxdev->info_size; i++) {
+ struct spi_device *device = spi_new_device(kspi->host, &info[i]);
+
+ if (!device) {
+ kspi2_unregister_devices(kspi);
+
+ return -ENODEV;
+ }
+ kspi->device[i] = device;
+ }
+
+ return 0;
+}
+
+static void kspi2_init(struct kspi2 *kspi)
+{
+ iowrite8(KSPI2_CONTROL_INIT, kspi->base + KSPI2_CONTROL_REG);
+ kspi->control_shadow = KSPI2_CONTROL_INIT;
+
+ iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG);
+}
+
+static int kspi2_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &auxdev->dev;
+ struct spi_controller *host;
+ struct kspi2 *kspi;
+ u8 clk_reg;
+ int ret;
+
+ host = devm_spi_alloc_host(dev, sizeof(struct kspi2));
+ if (!host)
+ return -ENOMEM;
+ kspi = spi_controller_get_devdata(host);
+ kspi->auxdev = container_of(auxdev, struct keba_spi_auxdev, auxdev);
+ kspi->host = host;
+ kspi->device = devm_kcalloc(dev, kspi->auxdev->info_size,
+ sizeof(*kspi->device), GFP_KERNEL);
+ if (!kspi->device)
+ return -ENOMEM;
+ kspi->device_size = kspi->auxdev->info_size;
+ auxiliary_set_drvdata(auxdev, kspi);
+
+ kspi->base = devm_ioremap_resource(dev, &kspi->auxdev->io);
+ if (IS_ERR(kspi->base))
+ return PTR_ERR(kspi->base);
+
+ /* read the SPI base clock frequency */
+ clk_reg = ioread8(kspi->base + KSPI2_CLK_FREQ_REG);
+ switch (clk_reg & KSPI2_CLK_FREQ_MASK) {
+ case KSPI2_CLK_FREQ_62_5M:
+ kspi->base_speed_hz = 62500000; break;
+ case KSPI2_CLK_FREQ_33_3M:
+ kspi->base_speed_hz = 33333333; break;
+ case KSPI2_CLK_FREQ_125M:
+ kspi->base_speed_hz = 125000000; break;
+ case KSPI2_CLK_FREQ_50M:
+ kspi->base_speed_hz = 50000000; break;
+ case KSPI2_CLK_FREQ_100M:
+ kspi->base_speed_hz = 100000000; break;
+ default:
+ dev_err(dev, "Undefined SPI base clock frequency!\n");
+ return -ENODEV;
+ }
+
+ kspi2_init(kspi);
+
+ host->bus_num = -1;
+ host->num_chipselect = KSPI2_NUM_CS;
+ host->mode_bits = KSPI2_MODE_BITS;
+ host->setup = kspi2_setup;
+ host->prepare_transfer_hardware = kspi2_prepare_hardware;
+ host->unprepare_transfer_hardware = kspi2_unprepare_hardware;
+ host->prepare_message = kspi2_prepare_message;
+ host->set_cs = kspi2_set_cs;
+ host->transfer_one = kspi2_transfer_one;
+ ret = devm_spi_register_controller(dev, host);
+ if (ret) {
+ dev_err(dev, "Failed to register host (%d)!\n", ret);
+ return ret;
+ }
+
+ ret = kspi2_register_devices(kspi);
+ if (ret) {
+ dev_err(dev, "Failed to register devices (%d)!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kspi2_remove(struct auxiliary_device *auxdev)
+{
+ struct kspi2 *kspi = auxiliary_get_drvdata(auxdev);
+
+ kspi2_unregister_devices(kspi);
+}
+
+static const struct auxiliary_device_id kspi2_devtype_aux[] = {
+ { .name = "keba.spi" },
+ { },
+};
+MODULE_DEVICE_TABLE(auxiliary, kspi2_devtype_aux);
+
+static struct auxiliary_driver kspi2_driver_aux = {
+ .name = KSPI2,
+ .id_table = kspi2_devtype_aux,
+ .probe = kspi2_probe,
+ .remove = kspi2_remove,
+};
+module_auxiliary_driver(kspi2_driver_aux);
+
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA SPI host controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index abc6792e738c..a9f0f47f4759 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -187,6 +187,16 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
return false;
}
+ if (op->max_freq && mem->spi->controller->min_speed_hz &&
+ op->max_freq < mem->spi->controller->min_speed_hz)
+ return false;
+
+ if (op->max_freq &&
+ op->max_freq < mem->spi->max_speed_hz) {
+ if (!spi_mem_controller_is_capable(ctlr, per_op_freq))
+ return false;
+ }
+
return spi_mem_check_buswidth(mem, op);
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
@@ -364,6 +374,9 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
u8 *tmpbuf;
int ret;
+ /* Make sure the operation frequency is correct before going futher */
+ spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
+
ret = spi_mem_check_op(op);
if (ret)
return ret;
@@ -410,6 +423,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf;
xfers[xferpos].len = op->cmd.nbytes;
xfers[xferpos].tx_nbits = op->cmd.buswidth;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen++;
@@ -424,6 +438,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf + 1;
xfers[xferpos].len = op->addr.nbytes;
xfers[xferpos].tx_nbits = op->addr.buswidth;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->addr.nbytes;
@@ -435,6 +450,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
xfers[xferpos].dummy_data = 1;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
@@ -450,6 +466,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
}
xfers[xferpos].len = op->data.nbytes;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->data.nbytes;
@@ -528,6 +545,53 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+/**
+ * spi_mem_adjust_op_freq() - Adjust the frequency of a SPI mem operation to
+ * match controller, PCB and chip limitations
+ * @mem: the SPI memory
+ * @op: the operation to adjust
+ *
+ * Some chips have per-op frequency limitations and must adapt the maximum
+ * speed. This function allows SPI mem drivers to set @op->max_freq to the
+ * maximum supported value.
+ */
+void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ if (!op->max_freq || op->max_freq > mem->spi->max_speed_hz)
+ op->max_freq = mem->spi->max_speed_hz;
+}
+EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq);
+
+/**
+ * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an
+ * operation. This helps finding the best variant
+ * among a list of possible choices.
+ * @op: the operation to benchmark
+ *
+ * Some chips have per-op frequency limitations, PCBs usually have their own
+ * limitations as well, and controllers can support dual, quad or even octal
+ * modes, sometimes in DTR. All these combinations make it impossible to
+ * statically list the best combination for all situations. If we want something
+ * accurate, all these combinations should be rated (eg. with a time estimate)
+ * and the best pick should be taken based on these calculations.
+ *
+ * Returns a ns estimate for the time this op would take.
+ */
+u64 spi_mem_calc_op_duration(struct spi_mem_op *op)
+{
+ u64 ncycles = 0;
+ u32 ns_per_cycles;
+
+ ns_per_cycles = 1000000000 / op->max_freq;
+ ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1);
+ ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1);
+ ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+ ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1);
+
+ return ncycles * ns_per_cycles;
+}
+EXPORT_SYMBOL_GPL(spi_mem_calc_op_duration);
+
static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
index ad2b5ffa6153..fa828fcaaef2 100644
--- a/drivers/spi/spi-microchip-core-qspi.c
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -265,7 +265,8 @@ static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
return ret;
}
-static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi)
+static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
unsigned long clk_hz;
u32 control, baud_rate_val = 0;
@@ -274,11 +275,11 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi
if (!clk_hz)
return -EINVAL;
- baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz);
+ baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq);
if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
dev_err(&spi->dev,
"could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
- spi->max_speed_hz, clk_hz);
+ op->max_freq, clk_hz);
return -EINVAL;
}
@@ -399,7 +400,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
if (err)
goto error;
- err = mchp_coreqspi_setup_clock(qspi, mem->spi);
+ err = mchp_coreqspi_setup_clock(qspi, mem->spi, op);
if (err)
goto error;
@@ -457,6 +458,10 @@ error:
static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata(mem->spi->controller);
+ unsigned long clk_hz;
+ u32 baud_rate_val;
+
if (!spi_mem_default_supports_op(mem, op))
return false;
@@ -479,6 +484,14 @@ static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_
return false;
}
+ clk_hz = clk_get_rate(qspi->clk);
+ if (!clk_hz)
+ return false;
+
+ baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq);
+ if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER)
+ return false;
+
return true;
}
@@ -498,6 +511,10 @@ static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
.exec_op = mchp_coreqspi_exec_op,
};
+static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int mchp_coreqspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -540,6 +557,7 @@ static int mchp_coreqspi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &mchp_coreqspi_mem_ops;
+ ctlr->mem_caps = &mchp_coreqspi_mem_caps;
ctlr->setup = mchp_coreqspi_setup_op;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 85f3bafc975d..197bf2dbe5de 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -961,7 +961,7 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem,
mtk_spi_reset(mdata);
mtk_spi_hw_init(mem->spi->controller, mem->spi);
- mtk_spi_prepare_transfer(mem->spi->controller, mem->spi->max_speed_hz);
+ mtk_spi_prepare_transfer(mem->spi->controller, op->max_freq);
reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
/* opcode byte len */
@@ -1122,6 +1122,10 @@ static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
.exec_op = mtk_spi_mem_exec_op,
};
+static const struct spi_controller_mem_caps mtk_spi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int mtk_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1160,6 +1164,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
if (mdata->dev_comp->ipm_design) {
mdata->dev = dev;
host->mem_ops = &mtk_spi_mem_ops;
+ host->mem_caps = &mtk_spi_mem_caps;
init_completion(&mdata->spimem_done);
}
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index 809767d3145c..eeaea6a5e310 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -522,7 +522,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
int i, ret;
u8 addr[8], cmd[2];
- ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
+ ret = mxic_spi_set_freq(mxic, op->max_freq);
if (ret)
return ret;
@@ -582,6 +582,7 @@ static const struct spi_controller_mem_caps mxic_spi_mem_caps = {
.dtr = true,
.ecc = true,
.swap16 = true,
+ .per_op_freq = true,
};
static void mxic_spi_set_cs(struct spi_device *spi, bool lvl)
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index e6d955d964f4..43455305fdf4 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -381,6 +381,8 @@ static int mxs_spi_transfer_one(struct spi_controller *host,
if (status)
break;
+ t->effective_speed_hz = ssp->clk_rate;
+
/* De-assert on last transfer, inverted by cs_change flag */
flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
TXRX_DEASSERT_CS : 0;
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 1161b9e5a4dc..bad6b30bab0e 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -705,9 +705,10 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
* Value for rest of the CS FLSHxxCR0 register would be zero.
*
*/
-static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
+static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
- unsigned long rate = spi->max_speed_hz;
+ unsigned long rate = op->max_freq;
int ret;
uint64_t size_kb;
@@ -931,7 +932,7 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true);
WARN_ON(err);
- nxp_fspi_select_mem(f, mem->spi);
+ nxp_fspi_select_mem(f, mem->spi, op);
nxp_fspi_prepare_lut(f, op);
/*
@@ -1149,6 +1150,10 @@ static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
.get_name = nxp_fspi_get_name,
};
+static const struct spi_controller_mem_caps nxp_fspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int nxp_fspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -1246,6 +1251,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
ctlr->bus_num = -1;
ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
ctlr->mem_ops = &nxp_fspi_mem_ops;
+ ctlr->mem_caps = &nxp_fspi_mem_caps;
nxp_fspi_default_setup(f);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 903d76145272..5f9cac41baff 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -73,8 +73,9 @@ struct chip_data {
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
#define LPSS_PRIV_CLOCK_GATE 0x38
-#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
-#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_OFF 0x0
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
@@ -321,6 +322,20 @@ static void __lpss_ssp_write_priv(struct driver_data *drv_data,
writel(value, drv_data->lpss_base + offset);
}
+static bool __lpss_ssp_update_priv(struct driver_data *drv_data, unsigned int offset,
+ u32 mask, u32 value)
+{
+ u32 new, curr;
+
+ curr = __lpss_ssp_read_priv(drv_data, offset);
+ new = (curr & ~mask) | (value & mask);
+ if (new == curr)
+ return false;
+
+ __lpss_ssp_write_priv(drv_data, offset, new);
+ return true;
+}
+
/*
* lpss_ssp_setup - perform LPSS SSP specific setup
* @drv_data: pointer to the driver private data
@@ -337,21 +352,16 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
drv_data->lpss_base = drv_data->ssp->mmio_base + config->offset;
/* Enable software chip select control */
- value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
- value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
- value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
- __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ value = LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, value, value);
/* Enable multiblock DMA transfers */
if (drv_data->controller_info->enable_dma) {
- __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
+ __lpss_ssp_update_priv(drv_data, config->reg_ssp, BIT(0), BIT(0));
if (config->reg_general >= 0) {
- value = __lpss_ssp_read_priv(drv_data,
- config->reg_general);
- value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
- __lpss_ssp_write_priv(drv_data,
- config->reg_general, value);
+ value = LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+ __lpss_ssp_update_priv(drv_data, config->reg_general, value, value);
}
}
}
@@ -361,30 +371,19 @@ static void lpss_ssp_select_cs(struct spi_device *spi,
{
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
- u32 value, cs;
+ u32 cs;
- if (!config->cs_sel_mask)
+ cs = spi_get_chipselect(spi, 0) << config->cs_sel_shift;
+ if (!__lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, config->cs_sel_mask, cs))
return;
- value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
-
- cs = spi_get_chipselect(spi, 0);
- cs <<= config->cs_sel_shift;
- if (cs != (value & config->cs_sel_mask)) {
- /*
- * When switching another chip select output active the
- * output must be selected first and wait 2 ssp_clk cycles
- * before changing state to active. Otherwise a short
- * glitch will occur on the previous chip select since
- * output select is latched but state control is not.
- */
- value &= ~config->cs_sel_mask;
- value |= cs;
- __lpss_ssp_write_priv(drv_data,
- config->reg_cs_ctrl, value);
- ndelay(1000000000 /
- (drv_data->controller->max_speed_hz / 2));
- }
+ /*
+ * When switching another chip select output active the output must be
+ * selected first and wait 2 ssp_clk cycles before changing state to
+ * active. Otherwise a short glitch will occur on the previous chip
+ * select since output select is latched but state control is not.
+ */
+ ndelay(1000000000 / (drv_data->controller->max_speed_hz / 2));
}
static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
@@ -392,34 +391,27 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
const struct lpss_config *config;
- u32 value;
+ u32 mask;
config = lpss_get_config(drv_data);
if (enable)
lpss_ssp_select_cs(spi, config);
- value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
- if (enable)
- value &= ~LPSS_CS_CONTROL_CS_HIGH;
- else
- value |= LPSS_CS_CONTROL_CS_HIGH;
- __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ mask = LPSS_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, mask, enable ? mask : 0);
if (config->cs_clk_stays_gated) {
- u32 clkgate;
-
/*
* Changing CS alone when dynamic clock gating is on won't
* actually flip CS at that time. This ruins SPI transfers
* that specify delays, or have no data. Toggle the clock mode
* to force on briefly to poke the CS pin to move.
*/
- clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
- value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
- LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
-
- __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
- __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
+ mask = LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK;
+ if (__lpss_ssp_update_priv(drv_data, LPSS_PRIV_CLOCK_GATE, mask,
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON))
+ __lpss_ssp_update_priv(drv_data, LPSS_PRIV_CLOCK_GATE, mask,
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_OFF);
}
}
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index 70bbb459caa4..f3fe10eddb6a 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -13,12 +13,14 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
+#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <linux/interrupt.h>
#include <linux/spi/spi-mem.h>
/* System control */
@@ -110,6 +112,7 @@
#define SFC_VER_3 0x3
#define SFC_VER_4 0x4
#define SFC_VER_5 0x5
+#define SFC_VER_8 0x8
/* Delay line controller register */
#define SFC_DLL_CTRL0 0x3C
@@ -150,16 +153,13 @@
/* Data */
#define SFC_DATA 0x108
-/* The controller and documentation reports that it supports up to 4 CS
- * devices (0-3), however I have only been able to test a single CS (CS 0)
- * due to the configuration of my device.
- */
-#define SFC_MAX_CHIPSELECT_NUM 4
+#define SFC_CS1_REG_OFFSET 0x200
+
+#define SFC_MAX_CHIPSELECT_NUM 2
-/* The SFC can transfer max 16KB - 1 at one time
- * we set it to 15.5KB here for alignment.
- */
#define SFC_MAX_IOSIZE_VER3 (512 * 31)
+/* Although up to 4GB, 64KB is enough with less mem reserved */
+#define SFC_MAX_IOSIZE_VER4 (0x10000U)
/* DMA is only enabled for large data transmission */
#define SFC_DMA_TRANS_THRETHOLD (0x40)
@@ -169,12 +169,14 @@
*/
#define SFC_MAX_SPEED (150 * 1000 * 1000)
+#define ROCKCHIP_AUTOSUSPEND_DELAY 2000
+
struct rockchip_sfc {
struct device *dev;
void __iomem *regbase;
struct clk *hclk;
struct clk *clk;
- u32 frequency;
+ u32 speed[SFC_MAX_CHIPSELECT_NUM];
/* virtual mapped addr for dma_buffer */
void *buffer;
dma_addr_t dma_buffer;
@@ -216,6 +218,22 @@ static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
return SFC_MAX_IOSIZE_VER3;
}
+static int rockchip_sfc_clk_set_rate(struct rockchip_sfc *sfc, unsigned long speed)
+{
+ if (sfc->version >= SFC_VER_8)
+ return clk_set_rate(sfc->clk, speed * 2);
+ else
+ return clk_set_rate(sfc->clk, speed);
+}
+
+static unsigned long rockchip_sfc_clk_get_rate(struct rockchip_sfc *sfc)
+{
+ if (sfc->version >= SFC_VER_8)
+ return clk_get_rate(sfc->clk) / 2;
+ else
+ return clk_get_rate(sfc->clk);
+}
+
static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
{
u32 reg;
@@ -302,6 +320,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
u32 len)
{
u32 ctrl = 0, cmd = 0;
+ u8 cs = spi_get_chipselect(mem->spi, 0);
/* set CMD */
cmd = op->cmd.opcode;
@@ -315,7 +334,8 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
} else {
cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
- writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
+ writel(op->addr.nbytes * 8 - 1,
+ sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_ABIT);
}
ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
@@ -347,7 +367,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
/* set the Controller */
ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
- cmd |= spi_get_chipselect(mem->spi, 0) << SFC_CMD_CS_SHIFT;
+ cmd |= cs << SFC_CMD_CS_SHIFT;
dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
op->addr.nbytes, op->addr.buswidth,
@@ -355,7 +375,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
ctrl, cmd, op->addr.val, len);
- writel(ctrl, sfc->regbase + SFC_CTRL);
+ writel(ctrl, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_CTRL);
writel(cmd, sfc->regbase + SFC_CMD);
if (op->addr.nbytes)
writel(op->addr.val, sfc->regbase + SFC_ADDR);
@@ -453,8 +473,10 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
- if (op->data.dir == SPI_MEM_DATA_OUT)
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
memcpy(sfc->buffer, op->data.buf.out, len);
+ dma_sync_single_for_device(sfc->dev, sfc->dma_buffer, len, DMA_TO_DEVICE);
+ }
ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
@@ -462,8 +484,11 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
ret = -ETIMEDOUT;
}
rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
- if (op->data.dir == SPI_MEM_DATA_IN)
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_sync_single_for_cpu(sfc->dev, sfc->dma_buffer, len, DMA_FROM_DEVICE);
memcpy(op->data.buf.in, sfc->buffer, len);
+ }
return ret;
}
@@ -473,6 +498,16 @@ static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
int ret = 0;
u32 status;
+ /*
+ * There is very little data left in fifo, and the controller will
+ * complete the transmission in a short period of time.
+ */
+ ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
+ !(status & SFC_SR_IS_BUSY),
+ 0, 10);
+ if (!ret)
+ return 0;
+
ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
!(status & SFC_SR_IS_BUSY),
20, timeout_us);
@@ -491,14 +526,22 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op
struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller);
u32 len = op->data.nbytes;
int ret;
+ u8 cs = spi_get_chipselect(mem->spi, 0);
- if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) {
- ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz);
+ ret = pm_runtime_get_sync(sfc->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sfc->dev);
+ return ret;
+ }
+
+ if (unlikely(op->max_freq != sfc->speed[cs]) &&
+ !has_acpi_companion(sfc->dev)) {
+ ret = rockchip_sfc_clk_set_rate(sfc, op->max_freq);
if (ret)
- return ret;
- sfc->frequency = mem->spi->max_speed_hz;
+ goto out;
+ sfc->speed[cs] = op->max_freq;
dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
- sfc->frequency, clk_get_rate(sfc->clk));
+ sfc->speed[cs], rockchip_sfc_clk_get_rate(sfc));
}
rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
@@ -515,11 +558,17 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op
if (ret != len) {
dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
}
- return rockchip_sfc_xfer_done(sfc, 100000);
+ ret = rockchip_sfc_xfer_done(sfc, 100000);
+out:
+ pm_runtime_mark_last_busy(sfc->dev);
+ pm_runtime_put_autosuspend(sfc->dev);
+
+ return ret;
}
static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
@@ -536,6 +585,10 @@ static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
.adjust_op_size = rockchip_sfc_adjust_op_size,
};
+static const struct spi_controller_mem_caps rockchip_sfc_mem_caps = {
+ .per_op_freq = true,
+};
+
static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
{
struct rockchip_sfc *sfc = dev_id;
@@ -561,6 +614,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
struct spi_controller *host;
struct rockchip_sfc *sfc;
int ret;
+ u32 i, val;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*sfc));
if (!host)
@@ -568,6 +622,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->mem_ops = &rockchip_sfc_mem_ops;
+ host->mem_caps = &rockchip_sfc_mem_caps;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
host->max_speed_hz = SFC_MAX_SPEED;
@@ -581,31 +636,29 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
if (IS_ERR(sfc->regbase))
return PTR_ERR(sfc->regbase);
- sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
+ if (!has_acpi_companion(&pdev->dev))
+ sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
if (IS_ERR(sfc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(sfc->clk),
"Failed to get sfc interface clk\n");
- sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
+ if (!has_acpi_companion(&pdev->dev))
+ sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
if (IS_ERR(sfc->hclk))
return dev_err_probe(&pdev->dev, PTR_ERR(sfc->hclk),
"Failed to get sfc ahb clk\n");
- sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma");
-
- if (sfc->use_dma) {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_warn(dev, "Unable to set dma mask\n");
- return ret;
- }
-
- sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3,
- &sfc->dma_buffer, GFP_KERNEL);
- if (!sfc->buffer)
- return -ENOMEM;
+ if (has_acpi_companion(&pdev->dev)) {
+ ret = device_property_read_u32(&pdev->dev, "clock-frequency", &val);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to find clock-frequency in ACPI\n");
+ for (i = 0; i < SFC_MAX_CHIPSELECT_NUM; i++)
+ sfc->speed[i] = val;
}
+ sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma");
+
ret = clk_prepare_enable(sfc->hclk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable ahb clk\n");
@@ -630,19 +683,47 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
goto err_irq;
}
+ platform_set_drvdata(pdev, sfc);
+
ret = rockchip_sfc_init(sfc);
if (ret)
goto err_irq;
- sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
sfc->version = rockchip_sfc_get_version(sfc);
+ sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
+
+ pm_runtime_set_autosuspend_delay(dev, ROCKCHIP_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_noresume(dev);
+
+ if (sfc->use_dma) {
+ sfc->buffer = (u8 *)__get_free_pages(GFP_KERNEL | GFP_DMA32,
+ get_order(sfc->max_iosize));
+ if (!sfc->buffer) {
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+ sfc->dma_buffer = virt_to_phys(sfc->buffer);
+ }
- ret = spi_register_controller(host);
+ ret = devm_spi_register_controller(dev, host);
if (ret)
- goto err_irq;
+ goto err_register;
- return 0;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+err_register:
+ free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
+err_dma:
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
err_irq:
clk_disable_unprepare(sfc->clk);
err_clk:
@@ -657,11 +738,80 @@ static void rockchip_sfc_remove(struct platform_device *pdev)
struct spi_controller *host = sfc->host;
spi_unregister_controller(host);
+ free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
clk_disable_unprepare(sfc->clk);
clk_disable_unprepare(sfc->hclk);
}
+#ifdef CONFIG_PM
+static int rockchip_sfc_runtime_suspend(struct device *dev)
+{
+ struct rockchip_sfc *sfc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(sfc->clk);
+ clk_disable_unprepare(sfc->hclk);
+
+ return 0;
+}
+
+static int rockchip_sfc_runtime_resume(struct device *dev)
+{
+ struct rockchip_sfc *sfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(sfc->hclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(sfc->clk);
+ if (ret < 0)
+ clk_disable_unprepare(sfc->hclk);
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_sfc_suspend(struct device *dev)
+{
+ pinctrl_pm_select_sleep_state(dev);
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int rockchip_sfc_resume(struct device *dev)
+{
+ struct rockchip_sfc *sfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ rockchip_sfc_init(sfc);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops rockchip_sfc_pm_ops = {
+ SET_RUNTIME_PM_OPS(rockchip_sfc_runtime_suspend,
+ rockchip_sfc_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(rockchip_sfc_suspend, rockchip_sfc_resume)
+};
+
static const struct of_device_id rockchip_sfc_dt_ids[] = {
{ .compatible = "rockchip,sfc"},
{ /* sentinel */ }
@@ -672,6 +822,7 @@ static struct platform_driver rockchip_sfc_driver = {
.driver = {
.name = "rockchip-sfc",
.of_match_table = rockchip_sfc_dt_ids,
+ .pm = &rockchip_sfc_pm_ops,
},
.probe = rockchip_sfc_probe,
.remove = rockchip_sfc_remove,
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index eecf9ea95ae3..1627aa66c965 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -7,13 +7,15 @@
#include <linux/kernel.h>
#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
#include <linux/platform_data/sc18is602.h>
+#include <linux/property.h>
+
#include <linux/gpio/consumer.h>
enum chips { sc18is602, sc18is602b, sc18is603 };
@@ -236,9 +238,7 @@ static int sc18is602_setup(struct spi_device *spi)
static int sc18is602_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
- struct device_node *np = dev->of_node;
struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
struct sc18is602 *hw;
struct spi_controller *host;
@@ -251,8 +251,9 @@ static int sc18is602_probe(struct i2c_client *client)
if (!host)
return -ENOMEM;
+ device_set_node(&host->dev, dev_fwnode(dev));
+
hw = spi_controller_get_devdata(host);
- i2c_set_clientdata(client, hw);
/* assert reset and then release */
hw->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
@@ -265,11 +266,7 @@ static int sc18is602_probe(struct i2c_client *client)
hw->dev = dev;
hw->ctrl = 0xff;
- if (client->dev.of_node)
- hw->id = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- hw->id = id->driver_data;
-
+ hw->id = (uintptr_t)i2c_get_match_data(client);
switch (hw->id) {
case sc18is602:
case sc18is602b:
@@ -278,28 +275,21 @@ static int sc18is602_probe(struct i2c_client *client)
break;
case sc18is603:
host->num_chipselect = 2;
- if (pdata) {
+ if (pdata)
hw->freq = pdata->clock_frequency;
- } else {
- const __be32 *val;
- int len;
-
- val = of_get_property(np, "clock-frequency", &len);
- if (val && len >= sizeof(__be32))
- hw->freq = be32_to_cpup(val);
- }
+ else
+ device_property_read_u32(dev, "clock-frequency", &hw->freq);
if (!hw->freq)
hw->freq = SC18IS602_CLOCK;
break;
}
- host->bus_num = np ? -1 : client->adapter->nr;
+ host->bus_num = dev_fwnode(dev) ? -1 : client->adapter->nr;
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->setup = sc18is602_setup;
host->transfer_one_message = sc18is602_transfer_one;
host->max_transfer_size = sc18is602_max_transfer_size;
host->max_message_size = sc18is602_max_transfer_size;
- host->dev.of_node = np;
host->min_speed_hz = hw->freq / 128;
host->max_speed_hz = hw->freq / 4;
@@ -314,7 +304,7 @@ static const struct i2c_device_id sc18is602_id[] = {
};
MODULE_DEVICE_TABLE(i2c, sc18is602_id);
-static const struct of_device_id sc18is602_of_match[] __maybe_unused = {
+static const struct of_device_id sc18is602_of_match[] = {
{
.compatible = "nxp,sc18is602",
.data = (void *)sc18is602
@@ -334,7 +324,7 @@ MODULE_DEVICE_TABLE(of, sc18is602_of_match);
static struct i2c_driver sc18is602_driver = {
.driver = {
.name = "sc18is602",
- .of_match_table = of_match_ptr(sc18is602_of_match),
+ .of_match_table = sc18is602_of_match,
},
.probe = sc18is602_probe,
.id_table = sc18is602_id,
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
index adac645732fe..6ad4b729897e 100644
--- a/drivers/spi/spi-sn-f-ospi.c
+++ b/drivers/spi/spi-sn-f-ospi.c
@@ -335,7 +335,6 @@ static void f_ospi_config_indir_protocol(struct f_ospi *ospi,
static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct spi_device *spi = mem->spi;
u32 irq_stat_en;
int ret;
@@ -343,7 +342,7 @@ static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem,
if (ret)
return ret;
- f_ospi_config_clk(ospi, spi->max_speed_hz);
+ f_ospi_config_clk(ospi, op->max_freq);
f_ospi_config_indir_protocol(ospi, mem, op);
@@ -577,6 +576,10 @@ static const struct spi_controller_mem_ops f_ospi_mem_ops = {
.exec_op = f_ospi_exec_op,
};
+static const struct spi_controller_mem_caps f_ospi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int f_ospi_init(struct f_ospi *ospi)
{
int ret;
@@ -614,6 +617,7 @@ static int f_ospi_probe(struct platform_device *pdev)
| SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL
| SPI_MODE_0 | SPI_MODE_1 | SPI_LSB_FIRST;
ctlr->mem_ops = &f_ospi_mem_ops;
+ ctlr->mem_caps = &f_ospi_mem_caps;
ctlr->bus_num = -1;
of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (num_cs > OSPI_NUM_CS) {
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 9122350402b5..a284d2794586 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -623,7 +623,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
mutex_lock(&qspi->list_lock);
if (!qspi->mmap_enabled || qspi->current_cs != spi_get_chipselect(mem->spi, 0)) {
- ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz);
+ ti_qspi_setup_clk(qspi, op->max_freq);
ti_qspi_enable_memory_map(mem->spi);
}
ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
@@ -658,6 +658,10 @@ static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.adjust_op_size = ti_qspi_adjust_op_size,
};
+static const struct spi_controller_mem_caps ti_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int ti_qspi_start_transfer_one(struct spi_controller *host,
struct spi_message *m)
{
@@ -777,6 +781,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
host->mem_ops = &ti_qspi_mem_ops;
+ host->mem_caps = &ti_qspi_mem_caps;
if (!of_property_read_u32(np, "num-cs", &num_cs))
host->num_chipselect = num_cs;
@@ -826,20 +831,12 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (of_property_present(np, "syscon-chipselects")) {
qspi->ctrl_base =
- syscon_regmap_lookup_by_phandle(np,
- "syscon-chipselects");
+ syscon_regmap_lookup_by_phandle_args(np, "syscon-chipselects",
+ 1, &qspi->ctrl_reg);
if (IS_ERR(qspi->ctrl_base)) {
ret = PTR_ERR(qspi->ctrl_base);
goto free_host;
}
- ret = of_property_read_u32_index(np,
- "syscon-chipselects",
- 1, &qspi->ctrl_reg);
- if (ret) {
- dev_err(&pdev->dev,
- "couldn't get ctrl_mod reg index\n");
- goto free_host;
- }
}
qspi->fclk = devm_clk_get(&pdev->dev, "fck");
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index dee9c339a35e..2bd25c75f881 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -318,6 +318,7 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
* zynq_qspi_config_op - Configure QSPI controller for specified transfer
* @xqspi: Pointer to the zynq_qspi structure
* @spi: Pointer to the spi_device structure
+ * @op: The memory operation to execute
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -331,7 +332,8 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
* controller the driver will set the highest or lowest frequency supported by
* controller.
*/
-static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
+static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
u32 config_reg, baud_rate_val = 0;
@@ -346,7 +348,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
*/
while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
(clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
- spi->max_speed_hz)
+ op->max_freq)
baud_rate_val++;
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
@@ -379,12 +381,21 @@ static int zynq_qspi_setup_op(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
+ int ret;
if (ctlr->busy)
return -EBUSY;
- clk_enable(qspi->refclk);
- clk_enable(qspi->pclk);
+ ret = clk_enable(qspi->refclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(qspi->pclk);
+ if (ret) {
+ clk_disable(qspi->refclk);
+ return ret;
+ }
+
zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
@@ -534,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
op->dummy.buswidth, op->data.buswidth);
zynq_qspi_chipselect(mem->spi, true);
- zynq_qspi_config_op(xqspi, mem->spi);
+ zynq_qspi_config_op(xqspi, mem->spi, op);
if (op->cmd.opcode) {
reinit_completion(&xqspi->data_completion);
@@ -620,6 +631,10 @@ static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
.exec_op = zynq_qspi_exec_mem_op,
};
+static const struct spi_controller_mem_caps zynq_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
/**
* zynq_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
@@ -706,6 +721,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->mem_ops = &zynq_qspi_mem_ops;
+ ctlr->mem_caps = &zynq_qspi_mem_caps;
ctlr->setup = zynq_qspi_setup_op;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
ctlr->dev.of_node = np;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 549a6e0c9654..d800d79f62a7 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -535,7 +535,7 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
* zynqmp_qspi_config_op - Configure QSPI controller for specified
* transfer
* @xqspi: Pointer to the zynqmp_qspi structure
- * @qspi: Pointer to the spi_device structure
+ * @op: The memory operation to execute
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -553,12 +553,12 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
* frequency supported by controller.
*/
static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
- struct spi_device *qspi)
+ const struct spi_mem_op *op)
{
ulong clk_rate;
u32 config_reg, req_speed_hz, baud_rate_val = 0;
- req_speed_hz = qspi->max_speed_hz;
+ req_speed_hz = op->max_freq;
if (xqspi->speed_hz != req_speed_hz) {
xqspi->speed_hz = req_speed_hz;
@@ -1072,7 +1072,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
op->dummy.buswidth, op->data.buswidth);
mutex_lock(&xqspi->op_lock);
- zynqmp_qspi_config_op(xqspi, mem->spi);
+ zynqmp_qspi_config_op(xqspi, op);
zynqmp_qspi_chipselect(mem->spi, false);
genfifoentry |= xqspi->genfifocs;
genfifoentry |= xqspi->genfifobus;
@@ -1224,6 +1224,10 @@ static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
.exec_op = zynqmp_qspi_exec_op,
};
+static const struct spi_controller_mem_caps zynqmp_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
/**
* zynqmp_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
@@ -1333,6 +1337,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &zynqmp_qspi_mem_ops;
+ ctlr->mem_caps = &zynqmp_qspi_mem_caps;
ctlr->setup = zynqmp_qspi_setup_op;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->dev.of_node = np;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ff1add2ecb91..a7a4647717d4 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -410,29 +410,21 @@ static int spi_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
struct spi_device *spi = to_spi_device(dev);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
int ret;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret)
return ret;
- if (dev->of_node) {
+ if (is_of_node(fwnode))
spi->irq = of_irq_get(dev->of_node, 0);
- if (spi->irq == -EPROBE_DEFER)
- return dev_err_probe(dev, -EPROBE_DEFER, "Failed to get irq\n");
- if (spi->irq < 0)
- spi->irq = 0;
- }
-
- if (has_acpi_companion(dev) && spi->irq < 0) {
- struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
-
- spi->irq = acpi_dev_gpio_irq_get(adev, 0);
- if (spi->irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (spi->irq < 0)
- spi->irq = 0;
- }
+ else if (is_acpi_device_node(fwnode) && spi->irq < 0)
+ spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
+ if (spi->irq == -EPROBE_DEFER)
+ return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
+ if (spi->irq < 0)
+ spi->irq = 0;
ret = dev_pm_domain_attach(dev, true);
if (ret)
@@ -874,15 +866,18 @@ EXPORT_SYMBOL_GPL(spi_new_device);
*/
void spi_unregister_device(struct spi_device *spi)
{
+ struct fwnode_handle *fwnode;
+
if (!spi)
return;
- if (spi->dev.of_node) {
- of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
- of_node_put(spi->dev.of_node);
+ fwnode = dev_fwnode(&spi->dev);
+ if (is_of_node(fwnode)) {
+ of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
+ of_node_put(to_of_node(fwnode));
+ } else if (is_acpi_device_node(fwnode)) {
+ acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
}
- if (ACPI_COMPANION(&spi->dev))
- acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_remove_software_node(&spi->dev);
device_del(&spi->dev);
spi_cleanup(spi);
@@ -1059,7 +1054,7 @@ static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool
* ambiguity. That's why we use enable, that takes SPI_CS_HIGH
* into account.
*/
- if (has_acpi_companion(&spi->dev))
+ if (is_acpi_device_node(dev_fwnode(&spi->dev)))
gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
else
/* Polarity handled by GPIO library */
@@ -2060,7 +2055,7 @@ static int spi_init_queue(struct spi_controller *ctlr)
ctlr->busy = false;
ctlr->queue_empty = true;
- ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
+ ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
if (IS_ERR(ctlr->kworker)) {
dev_err(&ctlr->dev, "failed to create message pump kworker\n");
return PTR_ERR(ctlr->kworker);
@@ -4841,7 +4836,7 @@ extern struct notifier_block spi_of_notifier;
#if IS_ENABLED(CONFIG_ACPI)
static int spi_acpi_controller_match(struct device *dev, const void *data)
{
- return ACPI_COMPANION(dev->parent) == data;
+ return device_match_acpi_dev(dev->parent, data);
}
struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 653f82984216..58ae4304fdab 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -698,19 +698,24 @@ static const struct class spidev_class = {
.name = "spidev",
};
+/*
+ * The spi device ids are expected to match the device names of the
+ * spidev_dt_ids array below. Both arrays are kept in the same ordering.
+ */
static const struct spi_device_id spidev_spi_ids[] = {
- { .name = "bh2228fv" },
- { .name = "dh2228fv" },
- { .name = "jg10309-01" },
- { .name = "ltc2488" },
- { .name = "sx1301" },
- { .name = "bk4" },
- { .name = "dhcom-board" },
- { .name = "m53cpld" },
- { .name = "spi-petra" },
- { .name = "spi-authenta" },
- { .name = "em3581" },
- { .name = "si3210" },
+ { .name = /* cisco */ "spi-petra" },
+ { .name = /* dh */ "dhcom-board" },
+ { .name = /* elgin */ "jg10309-01" },
+ { .name = /* lineartechnology */ "ltc2488" },
+ { .name = /* lwn */ "bk4" },
+ { .name = /* lwn */ "bk4-spi" },
+ { .name = /* menlo */ "m53cpld" },
+ { .name = /* micron */ "spi-authenta" },
+ { .name = /* rohm */ "bh2228fv" },
+ { .name = /* rohm */ "dh2228fv" },
+ { .name = /* semtech */ "sx1301" },
+ { .name = /* silabs */ "em3581" },
+ { .name = /* silabs */ "si3210" },
{},
};
MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
@@ -734,6 +739,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "elgin,jg10309-01", .data = &spidev_of_check },
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
+ { .compatible = "lwn,bk4-spi", .data = &spidev_of_check },
{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
{ .compatible = "micron,spi-authenta", .data = &spidev_of_check },
{ .compatible = "rohm,bh2228fv", .data = &spidev_of_check },
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index c053ee9c1361..7fcc46a0bb48 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1802,7 +1802,8 @@ static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
}
static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
- struct wireless_dev *wdev, int *dbm)
+ struct wireless_dev *wdev,
+ unsigned int link_id, int *dbm)
{
*dbm = (12);
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 922b207bc69d..70d76f3dd693 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -2,9 +2,9 @@
config ISCSI_TARGET
tristate "SCSI Target Mode Stack"
depends on INET
+ select CRC32
select CRYPTO
- select CRYPTO_CRC32C
- select CRYPTO_CRC32C_INTEL if X86
+ select CRYPTO_HASH
help
Say M to enable the SCSI target mode stack. A SCSI target mode stack
is software that makes local storage available over a storage network
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 6002283cbeba..091c1efccfb7 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -8,7 +8,7 @@
*
******************************************************************************/
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/completion.h>
@@ -490,8 +490,8 @@ void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
}
EXPORT_SYMBOL(iscsit_aborted_task);
-static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
- u32, u32, const void *, void *);
+static u32 iscsit_crc_buf(const void *buf, u32 payload_length,
+ u32 padding, const void *pad_bytes);
static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
static int
@@ -510,9 +510,7 @@ iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL,
- header_digest);
+ *header_digest = iscsit_crc_buf(hdr, ISCSI_HDR_LEN, 0, NULL);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -537,11 +535,9 @@ iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
- data_buf, data_buf_len,
- padding, &cmd->pad_bytes,
- &cmd->data_crc);
-
+ cmd->data_crc = iscsit_crc_buf(data_buf, data_buf_len,
+ padding,
+ &cmd->pad_bytes);
iov[niov].iov_base = &cmd->data_crc;
iov[niov++].iov_len = ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -566,8 +562,8 @@ iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
u32 data_offset, u32 data_length);
static void iscsit_unmap_iovec(struct iscsit_cmd *);
-static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
- u32, u32, u32, u8 *);
+static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length,
+ u32 padding, const u8 *pad_bytes);
static int
iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
const struct iscsi_datain *datain)
@@ -584,10 +580,8 @@ iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
- ISCSI_HDR_LEN, 0, NULL,
- header_digest);
-
+ *header_digest = iscsit_crc_buf(cmd->pdu, ISCSI_HDR_LEN, 0,
+ NULL);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -614,12 +608,8 @@ iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
- cmd, datain->offset,
- datain->length,
- cmd->padding,
- cmd->pad_bytes);
-
+ cmd->data_crc = iscsit_crc_sglist(cmd, datain->length,
+ cmd->padding, cmd->pad_bytes);
iov[iov_count].iov_base = &cmd->data_crc;
iov[iov_count++].iov_len = ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -1404,77 +1394,45 @@ iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
return iscsit_get_immediate_data(cmd, hdr, dump_payload);
}
-static u32 iscsit_do_crypto_hash_sg(
- struct ahash_request *hash,
- struct iscsit_cmd *cmd,
- u32 data_offset,
- u32 data_length,
- u32 padding,
- u8 *pad_bytes)
+static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length,
+ u32 padding, const u8 *pad_bytes)
{
- u32 data_crc;
- struct scatterlist *sg;
- unsigned int page_off;
-
- crypto_ahash_init(hash);
-
- sg = cmd->first_data_sg;
- page_off = cmd->first_data_sg_off;
-
- if (data_length && page_off) {
- struct scatterlist first_sg;
- u32 len = min_t(u32, data_length, sg->length - page_off);
-
- sg_init_table(&first_sg, 1);
- sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
-
- ahash_request_set_crypt(hash, &first_sg, NULL, len);
- crypto_ahash_update(hash);
-
- data_length -= len;
- sg = sg_next(sg);
- }
+ struct scatterlist *sg = cmd->first_data_sg;
+ unsigned int page_off = cmd->first_data_sg_off;
+ u32 crc = ~0;
while (data_length) {
- u32 cur_len = min_t(u32, data_length, sg->length);
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+ const void *virt;
- ahash_request_set_crypt(hash, sg, NULL, cur_len);
- crypto_ahash_update(hash);
+ virt = kmap_local_page(sg_page(sg)) + sg->offset + page_off;
+ crc = crc32c(crc, virt, cur_len);
+ kunmap_local(virt);
- data_length -= cur_len;
/* iscsit_map_iovec has already checked for invalid sg pointers */
sg = sg_next(sg);
- }
- if (padding) {
- struct scatterlist pad_sg;
-
- sg_init_one(&pad_sg, pad_bytes, padding);
- ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
- padding);
- crypto_ahash_finup(hash);
- } else {
- ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
- crypto_ahash_final(hash);
+ page_off = 0;
+ data_length -= cur_len;
}
- return data_crc;
+ if (padding)
+ crc = crc32c(crc, pad_bytes, padding);
+
+ return ~crc;
}
-static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
- const void *buf, u32 payload_length, u32 padding,
- const void *pad_bytes, void *data_crc)
+static u32 iscsit_crc_buf(const void *buf, u32 payload_length,
+ u32 padding, const void *pad_bytes)
{
- struct scatterlist sg[2];
+ u32 crc = ~0;
- sg_init_table(sg, ARRAY_SIZE(sg));
- sg_set_buf(sg, buf, payload_length);
- if (padding)
- sg_set_buf(sg + 1, pad_bytes, padding);
+ crc = crc32c(crc, buf, payload_length);
- ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
+ if (padding)
+ crc = crc32c(crc, pad_bytes, padding);
- crypto_ahash_digest(hash);
+ return ~crc;
}
int
@@ -1662,11 +1620,8 @@ iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
- be32_to_cpu(hdr->offset),
- payload_length, padding,
- cmd->pad_bytes);
-
+ data_crc = iscsit_crc_sglist(cmd, payload_length, padding,
+ cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
@@ -1925,10 +1880,8 @@ static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cm
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
- payload_length, padding,
- cmd->pad_bytes, &data_crc);
-
+ data_crc = iscsit_crc_buf(ping_data, payload_length,
+ padding, cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
" 0x%08x does not match computed 0x%08x\n",
@@ -2328,10 +2281,7 @@ iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- text_in, rx_size, 0, NULL,
- &data_crc);
-
+ data_crc = iscsit_crc_buf(text_in, rx_size, 0, NULL);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
" 0x%08x does not match computed"
@@ -2688,10 +2638,8 @@ static int iscsit_handle_immediate_data(
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
- cmd->write_data_done, length, padding,
- cmd->pad_bytes);
-
+ data_crc = iscsit_crc_sglist(cmd, length, padding,
+ cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ImmediateData CRC32C DataDigest 0x%08x"
" does not match computed 0x%08x\n", checksum,
@@ -4116,10 +4064,8 @@ static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
break;
}
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
- ISCSI_HDR_LEN, 0, NULL,
- &checksum);
-
+ checksum = iscsit_crc_buf(buffer, ISCSI_HDR_LEN, 0,
+ NULL);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
" received 0x%08x, computed 0x%08x\n",
@@ -4406,15 +4352,6 @@ int iscsit_close_connection(
*/
iscsit_check_conn_usage_count(conn);
- ahash_request_free(conn->conn_tx_hash);
- if (conn->conn_rx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
- ahash_request_free(conn->conn_rx_hash);
- crypto_free_ahash(tfm);
- }
-
if (conn->sock)
sock_release(conn->sock);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 90b870f234f0..c2ac9a99ebbb 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -8,7 +8,6 @@
*
******************************************************************************/
-#include <crypto/hash.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kthread.h>
@@ -71,46 +70,6 @@ out_login:
return NULL;
}
-/*
- * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
- * per struct iscsit_conn libcrypto contexts for crc32c and crc32-intel
- */
-int iscsi_login_setup_crypto(struct iscsit_conn *conn)
-{
- struct crypto_ahash *tfm;
-
- /*
- * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
- * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
- * to software 1x8 byte slicing from crc32c.ko
- */
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
- pr_err("crypto_alloc_ahash() failed\n");
- return -ENOMEM;
- }
-
- conn->conn_rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!conn->conn_rx_hash) {
- pr_err("ahash_request_alloc() failed for conn_rx_hash\n");
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
- ahash_request_set_callback(conn->conn_rx_hash, 0, NULL, NULL);
-
- conn->conn_tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!conn->conn_tx_hash) {
- pr_err("ahash_request_alloc() failed for conn_tx_hash\n");
- ahash_request_free(conn->conn_rx_hash);
- conn->conn_rx_hash = NULL;
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
- ahash_request_set_callback(conn->conn_tx_hash, 0, NULL, NULL);
-
- return 0;
-}
-
static int iscsi_login_check_initiator_version(
struct iscsit_conn *conn,
u8 version_max,
@@ -1165,15 +1124,6 @@ old_sess_out:
iscsit_dec_session_usage_count(conn->sess);
}
- ahash_request_free(conn->conn_tx_hash);
- if (conn->conn_rx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
- ahash_request_free(conn->conn_rx_hash);
- crypto_free_ahash(tfm);
- }
-
if (conn->param_list) {
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index e8760735486b..03c7d695d58f 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -9,7 +9,6 @@ struct iscsi_login;
struct iscsi_np;
struct sockaddr_storage;
-extern int iscsi_login_setup_crypto(struct iscsit_conn *);
extern int iscsi_check_for_session_reinstatement(struct iscsit_conn *);
extern int iscsi_login_post_auth_non_zero_tsih(struct iscsit_conn *, u16, u32);
extern int iscsit_setup_np(struct iscsi_np *,
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index fa3fb5f4e6bc..16e3ded98c32 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1194,14 +1194,7 @@ int iscsi_target_locate_portal(
goto get_target;
sess->sess_ops->SessionType = 1;
- /*
- * Setup crc32c modules from libcrypto
- */
- if (iscsi_login_setup_crypto(conn) < 0) {
- pr_err("iscsi_login_setup_crypto() failed\n");
- ret = -1;
- goto out;
- }
+
/*
* Serialize access across the discovery struct iscsi_portal_group to
* process login attempt.
@@ -1258,17 +1251,7 @@ get_target:
}
conn->tpg_np = tpg_np;
pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
- /*
- * Setup crc32c modules from libcrypto
- */
- if (iscsi_login_setup_crypto(conn) < 0) {
- pr_err("iscsi_login_setup_crypto() failed\n");
- kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
- iscsit_put_tiqn_for_login(tiqn);
- conn->tpg = NULL;
- ret = -1;
- goto out;
- }
+
/*
* Serialize access across the struct iscsi_portal_group to
* process login attempt.
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 287ac5b0495f..f991cf759836 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -823,7 +823,6 @@ static sense_reason_t
pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct request *req)
{
- struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct bio *bio = NULL;
struct page *page;
struct scatterlist *sg;
@@ -871,12 +870,11 @@ new_bio:
(rw) ? "rw" : "r", nr_vecs);
}
- pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
+ pr_debug("PSCSI: Calling bio_add_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
- rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
- bio, page, bytes, off);
+ rc = bio_add_page(bio, page, bytes, off);
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
bio_segments(bio), nr_vecs);
if (rc != bytes) {
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 3e33cf2af73b..f0c3ac1103bb 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1272,8 +1272,9 @@ static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
&res.smccc);
if (res.result.build_id)
- pr_info("revision %lu.%lu (%08lx)", res.result.major,
- res.result.minor, res.result.build_id);
+ pr_info("revision %lu.%lu (%0*lx)", res.result.major,
+ res.result.minor, (int)sizeof(res.result.build_id) * 2,
+ res.result.build_id);
else
pr_info("revision %lu.%lu", res.result.major, res.result.minor);
}
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 97f3d819852b..51951967d67f 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -7,6 +7,27 @@
* Based on step_wise.c with following Copyrights:
* Copyright (C) 2012 Intel Corp
* Copyright (C) 2012 Durgadoss R <durgadoss.r@intel.com>
+ *
+ * Regulation Logic: a two point regulation, deliver cooling state depending
+ * on the previous state shown in this diagram:
+ *
+ * Fan: OFF ON
+ *
+ * |
+ * |
+ * trip_temp: +---->+
+ * | | ^
+ * | | |
+ * | | Temperature
+ * (trip_temp - hyst): +<----+
+ * |
+ * |
+ * |
+ *
+ * * If the fan is not running and temperature exceeds trip_temp, the fan
+ * gets turned on.
+ * * In case the fan is running, temperature must fall below
+ * (trip_temp - hyst) so that the fan gets turned off again.
*/
#include <linux/thermal.h>
@@ -34,36 +55,14 @@ static void bang_bang_set_instance_target(struct thermal_instance *instance,
}
/**
- * bang_bang_control - controls devices associated with the given zone
+ * bang_bang_trip_crossed - controls devices associated with the given zone
* @tz: thermal_zone_device
* @trip: the trip point
- * @crossed_up: whether or not the trip has been crossed on the way up
- *
- * Regulation Logic: a two point regulation, deliver cooling state depending
- * on the previous state shown in this diagram:
- *
- * Fan: OFF ON
- *
- * |
- * |
- * trip_temp: +---->+
- * | | ^
- * | | |
- * | | Temperature
- * (trip_temp - hyst): +<----+
- * |
- * |
- * |
- *
- * * If the fan is not running and temperature exceeds trip_temp, the fan
- * gets turned on.
- * * In case the fan is running, temperature must fall below
- * (trip_temp - hyst) so that the fan gets turned off again.
- *
+ * @upward: whether or not the trip has been crossed on the way up
*/
-static void bang_bang_control(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- bool crossed_up)
+static void bang_bang_trip_crossed(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ bool upward)
{
const struct thermal_trip_desc *td = trip_to_trip_desc(trip);
struct thermal_instance *instance;
@@ -75,7 +74,7 @@ static void bang_bang_control(struct thermal_zone_device *tz,
tz->temperature, trip->hysteresis);
list_for_each_entry(instance, &td->thermal_instances, trip_node)
- bang_bang_set_instance_target(instance, crossed_up);
+ bang_bang_set_instance_target(instance, upward);
}
static void bang_bang_manage(struct thermal_zone_device *tz)
@@ -123,7 +122,7 @@ static void bang_bang_update_tz(struct thermal_zone_device *tz,
static struct thermal_governor thermal_gov_bang_bang = {
.name = "bang_bang",
- .trip_crossed = bang_bang_control,
+ .trip_crossed = bang_bang_trip_crossed,
.manage = bang_bang_manage,
.update_tz = bang_bang_update_tz,
};
diff --git a/drivers/thermal/gov_user_space.c b/drivers/thermal/gov_user_space.c
index 75137b419eb2..ef95cf7d65ef 100644
--- a/drivers/thermal/gov_user_space.c
+++ b/drivers/thermal/gov_user_space.c
@@ -23,16 +23,16 @@ static int user_space_bind(struct thermal_zone_device *tz)
}
/**
- * notify_user_space - Notifies user space about thermal events
+ * user_space_trip_crossed - Notify user space about trip crossing events
* @tz: thermal_zone_device
* @trip: trip point
- * @crossed_up: whether or not the trip has been crossed on the way up
+ * @upward: whether or not the trip has been crossed on the way up
*
* This function notifies the user space through UEvents.
*/
-static void notify_user_space(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- bool crossed_up)
+static void user_space_trip_crossed(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ bool upward)
{
char *thermal_prop[5];
int i;
@@ -52,7 +52,7 @@ static void notify_user_space(struct thermal_zone_device *tz,
static struct thermal_governor thermal_gov_user_space = {
.name = "user_space",
- .trip_crossed = notify_user_space,
+ .trip_crossed = user_space_trip_crossed,
.bind_to_tz = user_space_bind,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_user_space);
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index a31f2f32996a..e0268fac7093 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -21,8 +21,8 @@ config INTEL_TCC
config X86_PKG_TEMP_THERMAL
tristate "X86 package temperature thermal driver"
- depends on X86_THERMAL_VECTOR
- select THERMAL_GOV_USER_SPACE
+ depends on X86_THERMAL_VECTOR && NET
+ select THERMAL_NETLINK
select INTEL_TCC
default m
help
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index e76b13e44d03..4c699f0896b5 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -5,8 +5,8 @@
config INT340X_THERMAL
tristate "ACPI INT340X thermal drivers"
- depends on X86_64 && ACPI && PCI
- select THERMAL_GOV_USER_SPACE
+ depends on X86_64 && ACPI && PCI && NET
+ select THERMAL_NETLINK
select ACPI_THERMAL_REL
select ACPI_FAN
select ACPI_THERMAL_LIB
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 8660ef2175be..0e07693ecf59 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -521,7 +521,6 @@ static struct thermal_zone_device_ops int3400_thermal_ops = {
};
static struct thermal_zone_params int3400_thermal_params = {
- .governor_name = "user_space",
.no_hwmon = true,
};
@@ -690,6 +689,7 @@ static const struct acpi_device_id int3400_thermal_match[] = {
{"INTC1042", 0},
{"INTC1068", 0},
{"INTC10A0", 0},
+ {"INTC10D4", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 04aa0afb3b1d..5a925a8df7b3 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -275,6 +275,7 @@ static const struct acpi_device_id int3403_device_ids[] = {
{"INTC1062", 0},
{"INTC1069", 0},
{"INTC10A1", 0},
+ {"INTC10D5", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 31ed338eb83c..8dca6a6aceca 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -105,7 +105,6 @@ static int int340x_thermal_read_trips(struct acpi_device *zone_adev,
}
static struct thermal_zone_params int340x_thermal_params = {
- .governor_name = "user_space",
.no_hwmon = true,
};
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index d5eca6db2c00..ba2d89d3024c 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -30,6 +30,7 @@
#define PCI_DEVICE_ID_INTEL_RPL_THERMAL 0xA71D
#define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903
#define PCI_DEVICE_ID_INTEL_TGL_THERMAL 0x9A03
+#define PCI_DEVICE_ID_INTEL_PTL_THERMAL 0xB01D
struct power_config {
u32 index;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 145d471546d5..a55aaa8cef42 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -272,7 +272,6 @@ static const struct thermal_zone_device_ops tzone_ops = {
};
static struct thermal_zone_params tzone_params = {
- .governor_name = "user_space",
.no_hwmon = true,
};
@@ -495,6 +494,9 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_WT_HINT) },
{ PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
+ { PCI_DEVICE_DATA(INTEL, PTL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
+ PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_MSI_SUPPORT |
+ PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
{ },
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 19a3894ad752..2328ac0d8561 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -453,23 +453,23 @@ static void move_to_trips_invalid(struct thermal_zone_device *tz,
static void thermal_governor_trip_crossed(struct thermal_governor *governor,
struct thermal_zone_device *tz,
const struct thermal_trip *trip,
- bool crossed_up)
+ bool upward)
{
if (trip->type == THERMAL_TRIP_HOT || trip->type == THERMAL_TRIP_CRITICAL)
return;
if (governor->trip_crossed)
- governor->trip_crossed(tz, trip, crossed_up);
+ governor->trip_crossed(tz, trip, upward);
}
static void thermal_trip_crossed(struct thermal_zone_device *tz,
struct thermal_trip_desc *td,
struct thermal_governor *governor,
- bool crossed_up)
+ bool upward)
{
const struct thermal_trip *trip = &td->trip;
- if (crossed_up) {
+ if (upward) {
if (trip->type == THERMAL_TRIP_PASSIVE)
tz->passive++;
else if (trip->type == THERMAL_TRIP_CRITICAL ||
@@ -486,7 +486,7 @@ static void thermal_trip_crossed(struct thermal_zone_device *tz,
thermal_notify_tz_trip_down(tz, trip);
thermal_debug_tz_trip_down(tz, trip);
}
- thermal_governor_trip_crossed(governor, tz, trip, crossed_up);
+ thermal_governor_trip_crossed(governor, tz, trip, upward);
}
void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz,
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index be271e7c8f41..09866f0ce765 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -56,7 +56,7 @@ struct thermal_governor {
void (*unbind_from_tz)(struct thermal_zone_device *tz);
void (*trip_crossed)(struct thermal_zone_device *tz,
const struct thermal_trip *trip,
- bool crossed_up);
+ bool upward);
void (*manage)(struct thermal_zone_device *tz);
void (*update_tz)(struct thermal_zone_device *tz,
enum thermal_notify_event reason);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 9c26e8767515..3094f3c89e82 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -10411,7 +10411,6 @@ static int ufshcd_add_scsi_host(struct ufs_hba *hba)
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
.ops = &ufshcd_tmf_ops,
- .flags = BLK_MQ_F_NO_SCHED,
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 03c22114214b..935c0efea0b6 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -213,8 +213,7 @@ usb_acpi_get_connect_type(struct usb_port *port_dev, acpi_handle *handle)
* no connectable, the port would be not used.
*/
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status) && pld)
+ if (acpi_get_physical_device_location(handle, &pld) && pld)
port_dev->location = USB_ACPI_LOCATION_VALID |
pld->group_token << 8 | pld->group_position;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 8c8b5e6041cc..dc98ceecb724 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -592,12 +592,9 @@ static ssize_t max_sectors_store(struct device *dev, struct device_attribute *at
if (sscanf(buf, "%hu", &ms) <= 0)
return -EINVAL;
- blk_mq_freeze_queue(sdev->request_queue);
lim = queue_limits_start_update(sdev->request_queue);
lim.max_hw_sectors = ms;
- ret = queue_limits_commit_update(sdev->request_queue, &lim);
- blk_mq_unfreeze_queue(sdev->request_queue);
-
+ ret = queue_limits_commit_update_frozen(sdev->request_queue, &lim);
if (ret)
return ret;
return count;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 6021eeb903fe..95c0c63119ac 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -7635,7 +7635,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
mutex_init(&port->lock);
mutex_init(&port->swap_lock);
- port->wq = kthread_create_worker(0, dev_name(dev));
+ port->wq = kthread_run_worker(0, dev_name(dev));
if (IS_ERR(port->wq))
return ERR_CAST(port->wq);
sched_set_fifo(port->wq->task);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 5f581e71e201..36099047560d 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1952,7 +1952,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
goto out_free;
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
- dests[1].counter_id = mlx5_fc_id(node->ucast_counter.counter);
+ dests[1].counter = node->ucast_counter.counter;
#endif
node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS);
if (IS_ERR(node->ucast_rule)) {
@@ -1961,7 +1961,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
}
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
- dests[1].counter_id = mlx5_fc_id(node->mcast_counter.counter);
+ dests[1].counter = node->mcast_counter.counter;
#endif
memset(dmac_c, 0, ETH_ALEN);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 8ffea8430f95..c204fc8e471a 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -229,7 +229,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
dev = &vdpasim->vdpa.dev;
kthread_init_work(&vdpasim->work, vdpasim_work_fn);
- vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s",
+ vdpasim->worker = kthread_run_worker(0, "vDPA sim worker: %s",
dev_attr->name);
if (IS_ERR(vdpasim->worker))
goto err_iommu;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 36bfb6deb8ab..d866608da8d1 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -2199,7 +2199,7 @@ static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u
static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2211,7 +2211,7 @@ static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2227,7 +2227,7 @@ static const struct bin_attribute edid1_attr = {
.mode = 0444,
},
.size = EDID_LENGTH,
- .read = radeon_show_edid1,
+ .read_new = radeon_show_edid1,
};
static const struct bin_attribute edid2_attr = {
@@ -2236,7 +2236,7 @@ static const struct bin_attribute edid2_attr = {
.mode = 0444,
},
.size = EDID_LENGTH,
- .read = radeon_show_edid2,
+ .read_new = radeon_show_edid2,
};
static int radeonfb_pci_register(struct pci_dev *pdev,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 20517448487e..0e1bd3dba255 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -275,7 +275,7 @@ static const struct fb_ops efifb_ops = {
.fb_setcolreg = efifb_setcolreg,
};
-static int efifb_setup(struct screen_info *si, char *options)
+static void efifb_setup(struct screen_info *si, char *options)
{
char *this_opt;
@@ -299,8 +299,6 @@ static int efifb_setup(struct screen_info *si, char *options)
use_bgrt = false;
}
}
-
- return 0;
}
static inline bool fb_base_is_valid(struct screen_info *si)
diff --git a/drivers/video/fbdev/omap/lcd_dma.c b/drivers/video/fbdev/omap/lcd_dma.c
index f85817635a8c..0da23c57e475 100644
--- a/drivers/video/fbdev/omap/lcd_dma.c
+++ b/drivers/video/fbdev/omap/lcd_dma.c
@@ -432,8 +432,8 @@ static int __init omap_init_lcd_dma(void)
spin_lock_init(&lcd_dma.lock);
- r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
- "LCD DMA", NULL);
+ r = request_threaded_irq(INT_DMA_LCD, NULL, lcd_dma_irq_handler,
+ IRQF_ONESHOT, "LCD DMA", NULL);
if (r != 0)
pr_err("unable to request IRQ for LCD DMA (error %d)\n", r);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index c3329c8b4c16..ccb96a5be07e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -3933,18 +3933,13 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
- if (np && of_property_read_bool(np, "syscon-pol")) {
- dispc.syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol");
+ if (np && of_property_present(np, "syscon-pol")) {
+ dispc.syscon_pol = syscon_regmap_lookup_by_phandle_args(np, "syscon-pol",
+ 1, &dispc.syscon_pol_offset);
if (IS_ERR(dispc.syscon_pol)) {
dev_err(&pdev->dev, "failed to get syscon-pol regmap\n");
return PTR_ERR(dispc.syscon_pol);
}
-
- if (of_property_read_u32_index(np, "syscon-pol", 1,
- &dispc.syscon_pol_offset)) {
- dev_err(&pdev->dev, "failed to get syscon-pol offset\n");
- return -EINVAL;
- }
}
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index c04cbe0ef173..7c636db79882 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -36,6 +36,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
np = of_get_next_parent(np);
}
+ of_node_put(np);
return NULL;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index b33f62c5cb22..bb7fe54dd019 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -567,23 +567,6 @@ static void hdmi_core_enable_interrupts(struct hdmi_core_data *core)
REG_FLD_MOD(core->base, HDMI_CORE_IH_MUTE, 0x0, 1, 0);
}
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
-{
- void __iomem *base = core->base;
-
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT1, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT2, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_AS_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_CEC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_VP_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CMPHY_STAT0, 0xff, 7, 0);
-
- return 0;
-}
-
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h
index 192c9b6e2f7b..493857374a15 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h
@@ -283,7 +283,6 @@ struct csc_table {
int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 935cd8413ed5..4715dcb59811 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -2123,11 +2123,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev)
{
struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
- int brightness = bdev->props.brightness;
-
- if (bdev->props.power != BACKLIGHT_POWER_ON ||
- bdev->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ int brightness = backlight_get_brightness(bdev);
ch->bl_brightness = brightness;
return ch->cfg->bl_info.set_brightness(brightness);
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 86ecbb2d86db..7734377b2d87 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -27,6 +27,7 @@
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include <asm/div64.h>
@@ -1712,8 +1713,8 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
BUG();
}
- dev_info(info->dev, "fb %s %sabled at start\n",
- fbname, enable ? "en" : "dis");
+ dev_info(info->dev, "fb %s %s at start\n",
+ fbname, str_enabled_disabled(enable));
/* check to see if our routing allows this */
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 71ac9e36f67c..acadf0eb450c 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1416,7 +1416,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
static ssize_t edid_show(
struct file *filp,
- struct kobject *kobj, struct bin_attribute *a,
+ struct kobject *kobj, const struct bin_attribute *a,
char *buf, loff_t off, size_t count) {
struct device *fbdev = kobj_to_dev(kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
@@ -1438,7 +1438,7 @@ static ssize_t edid_show(
static ssize_t edid_store(
struct file *filp,
- struct kobject *kobj, struct bin_attribute *a,
+ struct kobject *kobj, const struct bin_attribute *a,
char *src, loff_t src_off, size_t src_size) {
struct device *fbdev = kobj_to_dev(kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
@@ -1482,8 +1482,8 @@ static const struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0666,
.size = EDID_LENGTH,
- .read = edid_show,
- .write = edid_store
+ .read_new = edid_show,
+ .write_new = edid_store
};
static const struct device_attribute fb_device_attrs[] = {
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index fce0f5db7ba3..eedab14c7d51 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -185,9 +185,10 @@ static inline void setindex(int index)
/* Check if the video mode is supported by the driver */
static inline int check_mode_supported(const struct screen_info *si)
{
+ unsigned int type = screen_info_video_type(si);
+
/* only EGA and VGA in 16 color graphic mode are supported */
- if (si->orig_video_isVGA != VIDEO_TYPE_EGAC &&
- si->orig_video_isVGA != VIDEO_TYPE_VGAC)
+ if (type != VIDEO_TYPE_EGAC && type != VIDEO_TYPE_VGAC)
return -ENODEV;
if (si->orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
@@ -1338,7 +1339,7 @@ static int vga16fb_probe(struct platform_device *dev)
printk(KERN_INFO "vga16fb: mapped to 0x%p\n", info->screen_base);
par = info->par;
- par->isVGA = si->orig_video_isVGA == VIDEO_TYPE_VGAC;
+ par->isVGA = screen_info_video_type(si) == VIDEO_TYPE_VGAC;
par->palette_blanked = 0;
par->vesa_blanked = 0;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index ba301f3f4951..45b42f14a750 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -895,34 +895,6 @@ hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
}
/**
- * hdmi_infoframe_check() - check a HDMI infoframe
- * @frame: HDMI infoframe
- *
- * Validates that the infoframe is consistent and updates derived fields
- * (eg. length) based on other fields.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int
-hdmi_infoframe_check(union hdmi_infoframe *frame)
-{
- switch (frame->any.type) {
- case HDMI_INFOFRAME_TYPE_AVI:
- return hdmi_avi_infoframe_check(&frame->avi);
- case HDMI_INFOFRAME_TYPE_SPD:
- return hdmi_spd_infoframe_check(&frame->spd);
- case HDMI_INFOFRAME_TYPE_AUDIO:
- return hdmi_audio_infoframe_check(&frame->audio);
- case HDMI_INFOFRAME_TYPE_VENDOR:
- return hdmi_vendor_any_infoframe_check(&frame->vendor);
- default:
- WARN(1, "Bad infoframe type %d\n", frame->any.type);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(hdmi_infoframe_check);
-
-/**
* hdmi_infoframe_pack_only() - write a HDMI infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
diff --git a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
index 488153879ec9..87f162736b2e 100644
--- a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
+++ b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
@@ -6,6 +6,7 @@
#include <linux/arm-smccc.h>
#include <linux/cc_platform.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/tsm.h>
@@ -219,6 +220,13 @@ static void __exit arm_cca_guest_exit(void)
}
module_exit(arm_cca_guest_exit);
+/* modalias, so userspace can autoload this module when RSI is available */
+static const struct platform_device_id arm_cca_match[] __maybe_unused = {
+ { RSI_PDEV_NAME, 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, arm_cca_match);
MODULE_AUTHOR("Sami Mujawar <sami.mujawar@arm.com>");
MODULE_DESCRIPTION("Arm CCA Guest TSM Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/virt/coco/sev-guest/Kconfig b/drivers/virt/coco/sev-guest/Kconfig
index 0b772bd921d8..a6405ab6c2c3 100644
--- a/drivers/virt/coco/sev-guest/Kconfig
+++ b/drivers/virt/coco/sev-guest/Kconfig
@@ -2,7 +2,6 @@ config SEV_GUEST
tristate "AMD SEV Guest driver"
default m
depends on AMD_MEM_ENCRYPT
- select CRYPTO_LIB_AESGCM
select TSM_REPORTS
help
SEV-SNP firmware provides the guest a mechanism to communicate with
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index b699771be029..264b6523fe52 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -31,9 +31,6 @@
#define DEVICE_NAME "sev-guest"
-#define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
-#define SNP_REQ_RETRY_DELAY (2*HZ)
-
#define SVSM_MAX_RETRIES 3
struct snp_guest_dev {
@@ -60,86 +57,6 @@ static int vmpck_id = -1;
module_param(vmpck_id, int, 0444);
MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
-/* Mutex to serialize the shared buffer access and command handling. */
-static DEFINE_MUTEX(snp_cmd_mutex);
-
-static bool is_vmpck_empty(struct snp_msg_desc *mdesc)
-{
- char zero_key[VMPCK_KEY_LEN] = {0};
-
- if (mdesc->vmpck)
- return !memcmp(mdesc->vmpck, zero_key, VMPCK_KEY_LEN);
-
- return true;
-}
-
-/*
- * If an error is received from the host or AMD Secure Processor (ASP) there
- * are two options. Either retry the exact same encrypted request or discontinue
- * using the VMPCK.
- *
- * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
- * encrypt the requests. The IV for this scheme is the sequence number. GCM
- * cannot tolerate IV reuse.
- *
- * The ASP FW v1.51 only increments the sequence numbers on a successful
- * guest<->ASP back and forth and only accepts messages at its exact sequence
- * number.
- *
- * So if the sequence number were to be reused the encryption scheme is
- * vulnerable. If the sequence number were incremented for a fresh IV the ASP
- * will reject the request.
- */
-static void snp_disable_vmpck(struct snp_msg_desc *mdesc)
-{
- pr_alert("Disabling VMPCK%d communication key to prevent IV reuse.\n",
- vmpck_id);
- memzero_explicit(mdesc->vmpck, VMPCK_KEY_LEN);
- mdesc->vmpck = NULL;
-}
-
-static inline u64 __snp_get_msg_seqno(struct snp_msg_desc *mdesc)
-{
- u64 count;
-
- lockdep_assert_held(&snp_cmd_mutex);
-
- /* Read the current message sequence counter from secrets pages */
- count = *mdesc->os_area_msg_seqno;
-
- return count + 1;
-}
-
-/* Return a non-zero on success */
-static u64 snp_get_msg_seqno(struct snp_msg_desc *mdesc)
-{
- u64 count = __snp_get_msg_seqno(mdesc);
-
- /*
- * The message sequence counter for the SNP guest request is a 64-bit
- * value but the version 2 of GHCB specification defines a 32-bit storage
- * for it. If the counter exceeds the 32-bit value then return zero.
- * The caller should check the return value, but if the caller happens to
- * not check the value and use it, then the firmware treats zero as an
- * invalid number and will fail the message request.
- */
- if (count >= UINT_MAX) {
- pr_err("request message sequence counter overflow\n");
- return 0;
- }
-
- return count;
-}
-
-static void snp_inc_msg_seqno(struct snp_msg_desc *mdesc)
-{
- /*
- * The counter is also incremented by the PSP, so increment it by 2
- * and save in secrets page.
- */
- *mdesc->os_area_msg_seqno += 2;
-}
-
static inline struct snp_guest_dev *to_snp_dev(struct file *file)
{
struct miscdevice *dev = file->private_data;
@@ -147,242 +64,6 @@ static inline struct snp_guest_dev *to_snp_dev(struct file *file)
return container_of(dev, struct snp_guest_dev, misc);
}
-static struct aesgcm_ctx *snp_init_crypto(u8 *key, size_t keylen)
-{
- struct aesgcm_ctx *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
- if (!ctx)
- return NULL;
-
- if (aesgcm_expandkey(ctx, key, keylen, AUTHTAG_LEN)) {
- pr_err("Crypto context initialization failed\n");
- kfree(ctx);
- return NULL;
- }
-
- return ctx;
-}
-
-static int verify_and_dec_payload(struct snp_msg_desc *mdesc, struct snp_guest_req *req)
-{
- struct snp_guest_msg *resp_msg = &mdesc->secret_response;
- struct snp_guest_msg *req_msg = &mdesc->secret_request;
- struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr;
- struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr;
- struct aesgcm_ctx *ctx = mdesc->ctx;
- u8 iv[GCM_AES_IV_SIZE] = {};
-
- pr_debug("response [seqno %lld type %d version %d sz %d]\n",
- resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version,
- resp_msg_hdr->msg_sz);
-
- /* Copy response from shared memory to encrypted memory. */
- memcpy(resp_msg, mdesc->response, sizeof(*resp_msg));
-
- /* Verify that the sequence counter is incremented by 1 */
- if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1)))
- return -EBADMSG;
-
- /* Verify response message type and version number. */
- if (resp_msg_hdr->msg_type != (req_msg_hdr->msg_type + 1) ||
- resp_msg_hdr->msg_version != req_msg_hdr->msg_version)
- return -EBADMSG;
-
- /*
- * If the message size is greater than our buffer length then return
- * an error.
- */
- if (unlikely((resp_msg_hdr->msg_sz + ctx->authsize) > req->resp_sz))
- return -EBADMSG;
-
- /* Decrypt the payload */
- memcpy(iv, &resp_msg_hdr->msg_seqno, min(sizeof(iv), sizeof(resp_msg_hdr->msg_seqno)));
- if (!aesgcm_decrypt(ctx, req->resp_buf, resp_msg->payload, resp_msg_hdr->msg_sz,
- &resp_msg_hdr->algo, AAD_LEN, iv, resp_msg_hdr->authtag))
- return -EBADMSG;
-
- return 0;
-}
-
-static int enc_payload(struct snp_msg_desc *mdesc, u64 seqno, struct snp_guest_req *req)
-{
- struct snp_guest_msg *msg = &mdesc->secret_request;
- struct snp_guest_msg_hdr *hdr = &msg->hdr;
- struct aesgcm_ctx *ctx = mdesc->ctx;
- u8 iv[GCM_AES_IV_SIZE] = {};
-
- memset(msg, 0, sizeof(*msg));
-
- hdr->algo = SNP_AEAD_AES_256_GCM;
- hdr->hdr_version = MSG_HDR_VER;
- hdr->hdr_sz = sizeof(*hdr);
- hdr->msg_type = req->msg_type;
- hdr->msg_version = req->msg_version;
- hdr->msg_seqno = seqno;
- hdr->msg_vmpck = req->vmpck_id;
- hdr->msg_sz = req->req_sz;
-
- /* Verify the sequence number is non-zero */
- if (!hdr->msg_seqno)
- return -ENOSR;
-
- pr_debug("request [seqno %lld type %d version %d sz %d]\n",
- hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
-
- if (WARN_ON((req->req_sz + ctx->authsize) > sizeof(msg->payload)))
- return -EBADMSG;
-
- memcpy(iv, &hdr->msg_seqno, min(sizeof(iv), sizeof(hdr->msg_seqno)));
- aesgcm_encrypt(ctx, msg->payload, req->req_buf, req->req_sz, &hdr->algo,
- AAD_LEN, iv, hdr->authtag);
-
- return 0;
-}
-
-static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
- struct snp_guest_request_ioctl *rio)
-{
- unsigned long req_start = jiffies;
- unsigned int override_npages = 0;
- u64 override_err = 0;
- int rc;
-
-retry_request:
- /*
- * Call firmware to process the request. In this function the encrypted
- * message enters shared memory with the host. So after this call the
- * sequence number must be incremented or the VMPCK must be deleted to
- * prevent reuse of the IV.
- */
- rc = snp_issue_guest_request(req, &mdesc->input, rio);
- switch (rc) {
- case -ENOSPC:
- /*
- * If the extended guest request fails due to having too
- * small of a certificate data buffer, retry the same
- * guest request without the extended data request in
- * order to increment the sequence number and thus avoid
- * IV reuse.
- */
- override_npages = mdesc->input.data_npages;
- req->exit_code = SVM_VMGEXIT_GUEST_REQUEST;
-
- /*
- * Override the error to inform callers the given extended
- * request buffer size was too small and give the caller the
- * required buffer size.
- */
- override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
-
- /*
- * If this call to the firmware succeeds, the sequence number can
- * be incremented allowing for continued use of the VMPCK. If
- * there is an error reflected in the return value, this value
- * is checked further down and the result will be the deletion
- * of the VMPCK and the error code being propagated back to the
- * user as an ioctl() return code.
- */
- goto retry_request;
-
- /*
- * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
- * throttled. Retry in the driver to avoid returning and reusing the
- * message sequence number on a different message.
- */
- case -EAGAIN:
- if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
- rc = -ETIMEDOUT;
- break;
- }
- schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
- goto retry_request;
- }
-
- /*
- * Increment the message sequence number. There is no harm in doing
- * this now because decryption uses the value stored in the response
- * structure and any failure will wipe the VMPCK, preventing further
- * use anyway.
- */
- snp_inc_msg_seqno(mdesc);
-
- if (override_err) {
- rio->exitinfo2 = override_err;
-
- /*
- * If an extended guest request was issued and the supplied certificate
- * buffer was not large enough, a standard guest request was issued to
- * prevent IV reuse. If the standard request was successful, return -EIO
- * back to the caller as would have originally been returned.
- */
- if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
- rc = -EIO;
- }
-
- if (override_npages)
- mdesc->input.data_npages = override_npages;
-
- return rc;
-}
-
-static int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
- struct snp_guest_request_ioctl *rio)
-{
- u64 seqno;
- int rc;
-
- guard(mutex)(&snp_cmd_mutex);
-
- /* Check if the VMPCK is not empty */
- if (is_vmpck_empty(mdesc)) {
- pr_err_ratelimited("VMPCK is disabled\n");
- return -ENOTTY;
- }
-
- /* Get message sequence and verify that its a non-zero */
- seqno = snp_get_msg_seqno(mdesc);
- if (!seqno)
- return -EIO;
-
- /* Clear shared memory's response for the host to populate. */
- memset(mdesc->response, 0, sizeof(struct snp_guest_msg));
-
- /* Encrypt the userspace provided payload in mdesc->secret_request. */
- rc = enc_payload(mdesc, seqno, req);
- if (rc)
- return rc;
-
- /*
- * Write the fully encrypted request to the shared unencrypted
- * request page.
- */
- memcpy(mdesc->request, &mdesc->secret_request,
- sizeof(mdesc->secret_request));
-
- rc = __handle_guest_request(mdesc, req, rio);
- if (rc) {
- if (rc == -EIO &&
- rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
- return rc;
-
- pr_alert("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
- rc, rio->exitinfo2);
-
- snp_disable_vmpck(mdesc);
- return rc;
- }
-
- rc = verify_and_dec_payload(mdesc, req);
- if (rc) {
- pr_alert("Detected unexpected decode failure from ASP. rc: %d\n", rc);
- snp_disable_vmpck(mdesc);
- return rc;
- }
-
- return 0;
-}
-
struct snp_req_resp {
sockptr_t req_data;
sockptr_t resp_data;
@@ -414,7 +95,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
req.msg_version = arg->msg_version;
req.msg_type = SNP_MSG_REPORT_REQ;
- req.vmpck_id = vmpck_id;
+ req.vmpck_id = mdesc->vmpck_id;
req.req_buf = report_req;
req.req_sz = sizeof(*report_req);
req.resp_buf = report_resp->data;
@@ -461,7 +142,7 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
req.msg_version = arg->msg_version;
req.msg_type = SNP_MSG_KEY_REQ;
- req.vmpck_id = vmpck_id;
+ req.vmpck_id = mdesc->vmpck_id;
req.req_buf = derived_key_req;
req.req_sz = sizeof(*derived_key_req);
req.resp_buf = buf;
@@ -539,7 +220,7 @@ cmd:
req.msg_version = arg->msg_version;
req.msg_type = SNP_MSG_REPORT_REQ;
- req.vmpck_id = vmpck_id;
+ req.vmpck_id = mdesc->vmpck_id;
req.req_buf = &report_req->data;
req.req_sz = sizeof(report_req->data);
req.resp_buf = report_resp->data;
@@ -616,76 +297,11 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
return ret;
}
-static void free_shared_pages(void *buf, size_t sz)
-{
- unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
- int ret;
-
- if (!buf)
- return;
-
- ret = set_memory_encrypted((unsigned long)buf, npages);
- if (ret) {
- WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
- return;
- }
-
- __free_pages(virt_to_page(buf), get_order(sz));
-}
-
-static void *alloc_shared_pages(struct device *dev, size_t sz)
-{
- unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
- struct page *page;
- int ret;
-
- page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
- if (!page)
- return NULL;
-
- ret = set_memory_decrypted((unsigned long)page_address(page), npages);
- if (ret) {
- dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
- __free_pages(page, get_order(sz));
- return NULL;
- }
-
- return page_address(page);
-}
-
static const struct file_operations snp_guest_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = snp_guest_ioctl,
};
-static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno)
-{
- u8 *key = NULL;
-
- switch (id) {
- case 0:
- *seqno = &secrets->os_area.msg_seqno_0;
- key = secrets->vmpck0;
- break;
- case 1:
- *seqno = &secrets->os_area.msg_seqno_1;
- key = secrets->vmpck1;
- break;
- case 2:
- *seqno = &secrets->os_area.msg_seqno_2;
- key = secrets->vmpck2;
- break;
- case 3:
- *seqno = &secrets->os_area.msg_seqno_3;
- key = secrets->vmpck3;
- break;
- default:
- break;
- }
-
- return key;
-}
-
struct snp_msg_report_resp_hdr {
u32 status;
u32 report_size;
@@ -979,13 +595,10 @@ static void unregister_sev_tsm(void *data)
static int __init sev_guest_probe(struct platform_device *pdev)
{
- struct sev_guest_platform_data *data;
- struct snp_secrets_page *secrets;
struct device *dev = &pdev->dev;
struct snp_guest_dev *snp_dev;
struct snp_msg_desc *mdesc;
struct miscdevice *misc;
- void __iomem *mapping;
int ret;
BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
@@ -993,115 +606,57 @@ static int __init sev_guest_probe(struct platform_device *pdev)
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return -ENODEV;
- if (!dev->platform_data)
- return -ENODEV;
-
- data = (struct sev_guest_platform_data *)dev->platform_data;
- mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
- if (!mapping)
- return -ENODEV;
-
- secrets = (__force void *)mapping;
-
- ret = -ENOMEM;
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
if (!snp_dev)
- goto e_unmap;
-
- mdesc = devm_kzalloc(&pdev->dev, sizeof(struct snp_msg_desc), GFP_KERNEL);
- if (!mdesc)
- goto e_unmap;
-
- /* Adjust the default VMPCK key based on the executing VMPL level */
- if (vmpck_id == -1)
- vmpck_id = snp_vmpl;
+ return -ENOMEM;
- ret = -EINVAL;
- mdesc->vmpck = get_vmpck(vmpck_id, secrets, &mdesc->os_area_msg_seqno);
- if (!mdesc->vmpck) {
- dev_err(dev, "Invalid VMPCK%d communication key\n", vmpck_id);
- goto e_unmap;
- }
+ mdesc = snp_msg_alloc();
+ if (IS_ERR_OR_NULL(mdesc))
+ return -ENOMEM;
- /* Verify that VMPCK is not zero. */
- if (is_vmpck_empty(mdesc)) {
- dev_err(dev, "Empty VMPCK%d communication key\n", vmpck_id);
- goto e_unmap;
- }
+ ret = snp_msg_init(mdesc, vmpck_id);
+ if (ret)
+ goto e_msg_init;
platform_set_drvdata(pdev, snp_dev);
snp_dev->dev = dev;
- mdesc->secrets = secrets;
-
- /* Allocate the shared page used for the request and response message. */
- mdesc->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
- if (!mdesc->request)
- goto e_unmap;
-
- mdesc->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
- if (!mdesc->response)
- goto e_free_request;
-
- mdesc->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
- if (!mdesc->certs_data)
- goto e_free_response;
-
- ret = -EIO;
- mdesc->ctx = snp_init_crypto(mdesc->vmpck, VMPCK_KEY_LEN);
- if (!mdesc->ctx)
- goto e_free_cert_data;
misc = &snp_dev->misc;
misc->minor = MISC_DYNAMIC_MINOR;
misc->name = DEVICE_NAME;
misc->fops = &snp_guest_fops;
- /* Initialize the input addresses for guest request */
- mdesc->input.req_gpa = __pa(mdesc->request);
- mdesc->input.resp_gpa = __pa(mdesc->response);
- mdesc->input.data_gpa = __pa(mdesc->certs_data);
-
/* Set the privlevel_floor attribute based on the vmpck_id */
- sev_tsm_ops.privlevel_floor = vmpck_id;
+ sev_tsm_ops.privlevel_floor = mdesc->vmpck_id;
ret = tsm_register(&sev_tsm_ops, snp_dev);
if (ret)
- goto e_free_cert_data;
+ goto e_msg_init;
ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
if (ret)
- goto e_free_cert_data;
+ goto e_msg_init;
ret = misc_register(misc);
if (ret)
- goto e_free_ctx;
+ goto e_msg_init;
snp_dev->msg_desc = mdesc;
- dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n", vmpck_id);
+ dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n",
+ mdesc->vmpck_id);
return 0;
-e_free_ctx:
- kfree(mdesc->ctx);
-e_free_cert_data:
- free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
-e_free_response:
- free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
-e_free_request:
- free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
-e_unmap:
- iounmap(mapping);
+e_msg_init:
+ snp_msg_free(mdesc);
+
return ret;
}
static void __exit sev_guest_remove(struct platform_device *pdev)
{
struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
- struct snp_msg_desc *mdesc = snp_dev->msg_desc;
- free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
- free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
- free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
- kfree(mdesc->ctx);
+ snp_msg_free(snp_dev->msg_desc);
misc_deregister(&snp_dev->misc);
}
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
index cc329887bfae..11b153e7454e 100644
--- a/drivers/virt/vboxguest/Kconfig
+++ b/drivers/virt/vboxguest/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VBOXGUEST
tristate "Virtual Box Guest integration support"
- depends on X86 && PCI && INPUT
+ depends on (ARM64 || X86) && PCI && INPUT
help
This is a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b9095751e43b..b10ed9f5b543 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -377,6 +377,24 @@ static void virtio_dev_remove(struct device *_d)
of_node_put(dev->dev.of_node);
}
+/*
+ * virtio_irq_get_affinity - get IRQ affinity mask for device
+ * @_d: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @_d and @irq_vec.
+ */
+static const struct cpumask *virtio_irq_get_affinity(struct device *_d,
+ unsigned int irq_vec)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+
+ if (!dev->config->get_vq_affinity)
+ return NULL;
+
+ return dev->config->get_vq_affinity(dev, irq_vec);
+}
+
static const struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
@@ -384,6 +402,7 @@ static const struct bus_type virtio_bus = {
.uevent = virtio_uevent,
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
+ .irq_get_affinity = virtio_irq_get_affinity,
};
int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 4190cb800cc4..19698d87dc57 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -1229,7 +1229,7 @@ int __init watchdog_dev_init(void)
{
int err;
- watchdog_kworker = kthread_create_worker(0, "watchdogd");
+ watchdog_kworker = kthread_run_worker(0, "watchdogd");
if (IS_ERR(watchdog_kworker)) {
pr_err("Failed to create watchdog kworker\n");
return PTR_ERR(watchdog_kworker);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 985e155ebe4b..41309d38f78c 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -722,12 +722,6 @@ static struct irq_info *xen_irq_init(unsigned int irq)
INIT_RCU_WORK(&info->rwork, delayed_free_irq);
set_info_for_irq(irq, info);
- /*
- * Interrupt affinity setting can be immediate. No point
- * in delaying it until an interrupt is handled.
- */
- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-
INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head);
}
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 3d34dba9bb2d..10aedcd21363 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -81,7 +81,7 @@ static struct attribute *zorro_device_attrs[] = {
};
static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct zorro_dev *z = to_zorro_dev(kobj_to_dev(kobj));
@@ -98,23 +98,23 @@ static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd));
}
-static struct bin_attribute zorro_config_attr = {
+static const struct bin_attribute zorro_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO,
},
.size = sizeof(struct ConfigDev),
- .read = zorro_read_config,
+ .read_new = zorro_read_config,
};
-static struct bin_attribute *zorro_device_bin_attrs[] = {
+static const struct bin_attribute *const zorro_device_bin_attrs[] = {
&zorro_config_attr,
NULL
};
static const struct attribute_group zorro_device_attr_group = {
.attrs = zorro_device_attrs,
- .bin_attrs = zorro_device_bin_attrs,
+ .bin_attrs_new = zorro_device_bin_attrs,
};
const struct attribute_group *zorro_device_attribute_groups[] = {